issue
dict
pr
dict
pr_details
dict
{ "body": "To reproduce: \n- snapshot an index\n- modify mappings and settings of the index\n- close the index\n- restore the index\n\nObserved behavior:\n- the mappings and settings are not reverted back to the original (snapshotted) state\n\nExpected behavior:\n- the mappings and settings should match the state in the snapshot\n", "comments": [], "number": 5210, "title": "Restore of an existing index doesn’t restore mappings and settings" }
{ "body": "... exists\n\nCloses #5210\n", "number": 5211, "review_comments": [], "title": "Restore process should replace the mapping and settings if index already exists" }
{ "commits": [ { "message": "Restore process should replace the mapping and settings if index already exists\n\nCloses #5210" } ], "files": [ { "diff": "@@ -191,7 +191,8 @@ public ClusterState execute(ClusterState currentState) {\n \"] shard from snapshot with [\" + snapshotIndexMetaData.getNumberOfShards() + \"] shards\");\n }\n // Index exists and it's closed - open it in metadata and start recovery\n- IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(currentIndexMetaData).state(IndexMetaData.State.OPEN);\n+ IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN);\n+ indexMdBuilder.version(Math.max(snapshotIndexMetaData.version(), currentIndexMetaData.version() + 1));\n IndexMetaData updatedIndexMetaData = indexMdBuilder.index(renamedIndex).build();\n rtBuilder.addAsRestore(updatedIndexMetaData, restoreSource);\n blocks.removeIndexBlock(index, INDEX_CLOSED_BLOCK);", "filename": "src/main/java/org/elasticsearch/snapshots/RestoreService.java", "status": "modified" }, { "diff": "@@ -29,12 +29,15 @@\n import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;\n import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;\n import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;\n+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;\n import org.elasticsearch.action.count.CountResponse;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.cluster.ClusterState;\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n+import org.elasticsearch.cluster.metadata.MappingMetaData;\n import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;\n import org.elasticsearch.common.Strings;\n+import org.elasticsearch.common.collect.ImmutableOpenMap;\n import org.elasticsearch.common.settings.ImmutableSettings;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.unit.TimeValue;\n@@ -138,6 +141,54 @@ public void basicWorkFlowTest() throws Exception {\n assertThat(clusterState.getMetaData().hasIndex(\"test-idx-2\"), equalTo(false));\n }\n \n+ @Test\n+ public void restoreWithDifferentMappingsAndSettingsTest() throws Exception {\n+ Client client = client();\n+\n+ logger.info(\"--> creating repository\");\n+ PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository(\"test-repo\")\n+ .setType(\"fs\").setSettings(ImmutableSettings.settingsBuilder()\n+ .put(\"location\", newTempDir(LifecycleScope.SUITE))\n+ .put(\"compress\", randomBoolean())\n+ .put(\"chunk_size\", randomIntBetween(100, 1000))\n+ ).get();\n+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));\n+\n+ logger.info(\"--> create index with foo type\");\n+ assertAcked(prepareCreate(\"test-idx\", 2, ImmutableSettings.builder().put(\"refresh_interval\", 10)));\n+\n+ assertAcked(client().admin().indices().preparePutMapping(\"test-idx\").setType(\"foo\").setSource(\"baz\", \"type=string\"));\n+ ensureGreen();\n+\n+ logger.info(\"--> snapshot it\");\n+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot(\"test-repo\", \"test-snap\").setWaitForCompletion(true).setIndices(\"test-idx\").get();\n+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));\n+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));\n+\n+ logger.info(\"--> delete the index and recreate it with bar type\");\n+ wipeIndices(\"test-idx\");\n+ assertAcked(prepareCreate(\"test-idx\", 2, ImmutableSettings.builder().put(\"refresh_interval\", 5)));\n+ assertAcked(client().admin().indices().preparePutMapping(\"test-idx\").setType(\"bar\").setSource(\"baz\", \"type=string\"));\n+ ensureGreen();\n+\n+ logger.info(\"--> close index\");\n+ client.admin().indices().prepareClose(\"test-idx\").get();\n+\n+ logger.info(\"--> restore all indices from the snapshot\");\n+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot(\"test-repo\", \"test-snap\").setWaitForCompletion(true).execute().actionGet();\n+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));\n+ ensureGreen();\n+\n+ logger.info(\"--> assert that old mapping is restored\");\n+ ImmutableOpenMap<String, MappingMetaData> mappings = client().admin().cluster().prepareState().get().getState().getMetaData().getIndices().get(\"test-idx\").getMappings();\n+ assertThat(mappings.get(\"foo\"), notNullValue());\n+ assertThat(mappings.get(\"bar\"), nullValue());\n+\n+ logger.info(\"--> assert that old settings are restored\");\n+ GetSettingsResponse getSettingsResponse = client.admin().indices().prepareGetSettings(\"test-idx\").execute().actionGet();\n+ assertThat(getSettingsResponse.getSetting(\"test-idx\", \"index.refresh_interval\"), equalTo(\"10\"));\n+ }\n+\n @Test\n public void emptySnapshotTest() throws Exception {\n Client client = client();", "filename": "src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java", "status": "modified" } ] }
{ "body": "say our data structure is\n\n```\ninvoice:{\n vendor:{\n id:10\n name: \"google\"\n }\n}\n```\n\nvendor object is optional to the invoice and may be absent\n\nwhen calculating missing aggregation on vendor.id it fails with \n\n```\nearchPhaseExecutionException[Failed to execute phase [query_fetch], all shards failed; shardFailures {[mr2aF25CTPGrkvzftHq9Rg][award][0]: ClassCastException[org.elasticsearch.search.aggregations.support.FieldDataSource$Bytes$FieldData cannot be cast to org.elasticsearch.search.aggregations.support.FieldDataSource$Numeric]}]\n```\n\nI would expect missing aggregation to treat missing parent objects of the field the aggregation is calculated for as if the field itself is missing or it would be virtually impossible to guarantee that such aggregation would finish successfully over deeply nested graphs where any part of the path to the missing field may be absent\n", "comments": [ { "body": "Indeed the missing aggregation should work on fields that are not mapped and count every hit as a document that misses the field.\n\nI just tried to reproduce this issue without luck, could you please try to either provide us with a curl recreation or give the stacktrace of the `ClassCastException` in the logs? Thanks.\n", "created_at": "2014-02-20T08:42:31Z" }, { "body": "@jpountz No logs just what I get in the response (maybe I need to switch in debug mode?)\n\nhere is the recreation. it does not fail if missing is the only aggregation or the sibling bucket aggs is on some other field only when you do say terms on a field and next to it missing on the same field\nso the issue may be slightly different that I thought initially\n\nhttps://gist.github.com/roytmana/9114933\n", "created_at": "2014-02-20T14:34:31Z" }, { "body": "hi @roytmana, this is indeed a bug, will be working on fixing it. Thx for reporting!\n", "created_at": "2014-02-20T16:14:59Z" } ], "number": 5190, "title": "\"Missing\" aggregation fails when object containing aggregation field is missing as well" }
{ "body": "... + required Value Source type as a combi key (used to be only field name). This fixes a problem where multiple aggregations where defined on the same field, yet require different types of value sources.\n\nCloses #5190\n", "number": 5205, "review_comments": [], "title": "Changed the caching of FieldDataSource in aggs to be based on field name..." }
{ "commits": [ { "message": "Changed the caching of FieldDataSource in aggs to be based on field name + required Value Source type as a combi key (used to be only field name). This fixes a problem where multiple aggregations where defined on the same field, yet require different types of value sources.\n\nCloses #5190" } ], "files": [ { "diff": "@@ -48,7 +48,7 @@ public class AggregationContext implements ReaderContextAware, ScorerAware {\n \n private final SearchContext searchContext;\n \n- private ObjectObjectOpenHashMap<String, FieldDataSource>[] perDepthFieldDataSources = new ObjectObjectOpenHashMap[4];\n+ private ObjectObjectOpenHashMap<ConfigCacheKey, FieldDataSource>[] perDepthFieldDataSources = new ObjectObjectOpenHashMap[4];\n private List<ReaderContextAware> readerAwares = new ArrayList<ReaderContextAware>();\n private List<ScorerAware> scorerAwares = new ArrayList<ScorerAware>();\n \n@@ -102,9 +102,9 @@ public <VS extends ValuesSource> VS valuesSource(ValuesSourceConfig<VS> config,\n perDepthFieldDataSources = Arrays.copyOf(perDepthFieldDataSources, ArrayUtil.oversize(1 + depth, RamUsageEstimator.NUM_BYTES_OBJECT_REF));\n }\n if (perDepthFieldDataSources[depth] == null) {\n- perDepthFieldDataSources[depth] = new ObjectObjectOpenHashMap<String, FieldDataSource>();\n+ perDepthFieldDataSources[depth] = new ObjectObjectOpenHashMap<ConfigCacheKey, FieldDataSource>();\n }\n- final ObjectObjectOpenHashMap<String, FieldDataSource> fieldDataSources = perDepthFieldDataSources[depth];\n+ final ObjectObjectOpenHashMap<ConfigCacheKey, FieldDataSource> fieldDataSources = perDepthFieldDataSources[depth];\n \n if (config.fieldContext == null) {\n if (NumericValuesSource.class.isAssignableFrom(config.valueSourceType)) {\n@@ -139,14 +139,15 @@ private NumericValuesSource numericScript(ValuesSourceConfig<?> config) {\n return new NumericValuesSource(source, config.formatter(), config.parser());\n }\n \n- private NumericValuesSource numericField(ObjectObjectOpenHashMap<String, FieldDataSource> fieldDataSources, ValuesSourceConfig<?> config) {\n- FieldDataSource.Numeric dataSource = (FieldDataSource.Numeric) fieldDataSources.get(config.fieldContext.field());\n+ private NumericValuesSource numericField(ObjectObjectOpenHashMap<ConfigCacheKey, FieldDataSource> fieldDataSources, ValuesSourceConfig<?> config) {\n+ final ConfigCacheKey cacheKey = new ConfigCacheKey(config);\n+ FieldDataSource.Numeric dataSource = (FieldDataSource.Numeric) fieldDataSources.get(cacheKey);\n if (dataSource == null) {\n FieldDataSource.MetaData metaData = FieldDataSource.MetaData.load(config.fieldContext.indexFieldData(), searchContext);\n dataSource = new FieldDataSource.Numeric.FieldData((IndexNumericFieldData<?>) config.fieldContext.indexFieldData(), metaData);\n setReaderIfNeeded((ReaderContextAware) dataSource);\n readerAwares.add((ReaderContextAware) dataSource);\n- fieldDataSources.put(config.fieldContext.field(), dataSource);\n+ fieldDataSources.put(cacheKey, dataSource);\n }\n if (config.script != null) {\n setScorerIfNeeded(config.script);\n@@ -166,8 +167,9 @@ private NumericValuesSource numericField(ObjectObjectOpenHashMap<String, FieldDa\n return new NumericValuesSource(dataSource, config.formatter(), config.parser());\n }\n \n- private ValuesSource bytesField(ObjectObjectOpenHashMap<String, FieldDataSource> fieldDataSources, ValuesSourceConfig<?> config) {\n- FieldDataSource dataSource = fieldDataSources.get(config.fieldContext.field());\n+ private ValuesSource bytesField(ObjectObjectOpenHashMap<ConfigCacheKey, FieldDataSource> fieldDataSources, ValuesSourceConfig<?> config) {\n+ final ConfigCacheKey cacheKey = new ConfigCacheKey(config);\n+ FieldDataSource dataSource = fieldDataSources.get(cacheKey);\n if (dataSource == null) {\n final IndexFieldData<?> indexFieldData = config.fieldContext.indexFieldData();\n FieldDataSource.MetaData metaData = FieldDataSource.MetaData.load(config.fieldContext.indexFieldData(), searchContext);\n@@ -178,7 +180,7 @@ private ValuesSource bytesField(ObjectObjectOpenHashMap<String, FieldDataSource>\n }\n setReaderIfNeeded((ReaderContextAware) dataSource);\n readerAwares.add((ReaderContextAware) dataSource);\n- fieldDataSources.put(config.fieldContext.field(), dataSource);\n+ fieldDataSources.put(cacheKey, dataSource);\n }\n if (config.script != null) {\n setScorerIfNeeded(config.script);\n@@ -218,14 +220,15 @@ private BytesValuesSource bytesScript(ValuesSourceConfig<?> config) {\n return new BytesValuesSource(source);\n }\n \n- private GeoPointValuesSource geoPointField(ObjectObjectOpenHashMap<String, FieldDataSource> fieldDataSources, ValuesSourceConfig<?> config) {\n- FieldDataSource.GeoPoint dataSource = (FieldDataSource.GeoPoint) fieldDataSources.get(config.fieldContext.field());\n+ private GeoPointValuesSource geoPointField(ObjectObjectOpenHashMap<ConfigCacheKey, FieldDataSource> fieldDataSources, ValuesSourceConfig<?> config) {\n+ final ConfigCacheKey cacheKey = new ConfigCacheKey(config);\n+ FieldDataSource.GeoPoint dataSource = (FieldDataSource.GeoPoint) fieldDataSources.get(cacheKey);\n if (dataSource == null) {\n FieldDataSource.MetaData metaData = FieldDataSource.MetaData.load(config.fieldContext.indexFieldData(), searchContext);\n dataSource = new FieldDataSource.GeoPoint((IndexGeoPointFieldData<?>) config.fieldContext.indexFieldData(), metaData);\n setReaderIfNeeded(dataSource);\n readerAwares.add(dataSource);\n- fieldDataSources.put(config.fieldContext.field(), dataSource);\n+ fieldDataSources.put(cacheKey, dataSource);\n }\n if (config.needsHashes) {\n dataSource.setNeedsHashes(true);\n@@ -254,4 +257,35 @@ private void setScorerIfNeeded(ScorerAware scorerAware) {\n scorerAware.setScorer(scorer);\n }\n }\n+\n+ private static class ConfigCacheKey {\n+\n+ private final String field;\n+ private final Class<? extends ValuesSource> valueSourceType;\n+\n+ private ConfigCacheKey(ValuesSourceConfig config) {\n+ this.field = config.fieldContext.field();\n+ this.valueSourceType = config.valueSourceType;\n+ }\n+\n+ @Override\n+ public boolean equals(Object o) {\n+ if (this == o) return true;\n+ if (o == null || getClass() != o.getClass()) return false;\n+\n+ ConfigCacheKey that = (ConfigCacheKey) o;\n+\n+ if (!field.equals(that.field)) return false;\n+ if (!valueSourceType.equals(that.valueSourceType)) return false;\n+\n+ return true;\n+ }\n+\n+ @Override\n+ public int hashCode() {\n+ int result = field.hashCode();\n+ result = 31 * result + valueSourceType.hashCode();\n+ return result;\n+ }\n+ }\n }", "filename": "src/main/java/org/elasticsearch/search/aggregations/support/AggregationContext.java", "status": "modified" }, { "diff": "@@ -0,0 +1,113 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.search.aggregations;\n+\n+import com.carrotsearch.hppc.IntIntMap;\n+import com.carrotsearch.hppc.IntIntOpenHashMap;\n+import org.elasticsearch.action.index.IndexRequestBuilder;\n+import org.elasticsearch.action.search.SearchResponse;\n+import org.elasticsearch.common.settings.ImmutableSettings;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.search.aggregations.bucket.missing.Missing;\n+import org.elasticsearch.search.aggregations.bucket.terms.Terms;\n+import org.elasticsearch.test.ElasticsearchIntegrationTest;\n+import org.junit.Test;\n+\n+import java.util.Collection;\n+\n+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n+import static org.elasticsearch.search.aggregations.AggregationBuilders.missing;\n+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;\n+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;\n+import static org.hamcrest.CoreMatchers.equalTo;\n+\n+/**\n+ *\n+ */\n+public class CombiTests extends ElasticsearchIntegrationTest {\n+\n+\n+ @Override\n+ public Settings indexSettings() {\n+ return ImmutableSettings.builder()\n+ .put(\"index.number_of_shards\", between(1, 5))\n+ .put(\"index.number_of_replicas\", between(0, 1))\n+ .build();\n+ }\n+\n+ /**\n+ * Making sure that if there are multiple aggregations, working on the same field, yet require different\n+ * value source type, they can all still work. It used to fail as we used to cache the ValueSource by the\n+ * field name. If the cached value source was of type \"bytes\" and another aggregation on the field required to see\n+ * it as \"numeric\", it didn't work. Now we cache the Value Sources by a custom key (field name + ValueSource type)\n+ * so there's no conflict there.\n+ */\n+ @Test\n+ public void multipleAggs_OnSameField_WithDifferentRequiredValueSourceType() throws Exception {\n+\n+ createIndex(\"idx\");\n+ IndexRequestBuilder[] builders = new IndexRequestBuilder[randomInt(30)];\n+ IntIntMap values = new IntIntOpenHashMap();\n+ long missingValues = 0;\n+ for (int i = 0; i < builders.length; i++) {\n+ String name = \"name_\" + randomIntBetween(1, 10);\n+ if (rarely()) {\n+ missingValues++;\n+ builders[i] = client().prepareIndex(\"idx\", \"type\").setSource(jsonBuilder()\n+ .startObject()\n+ .field(\"name\", name)\n+ .endObject());\n+ } else {\n+ int value = randomIntBetween(1, 10);\n+ values.put(value, values.getOrDefault(value, 0) + 1);\n+ builders[i] = client().prepareIndex(\"idx\", \"type\").setSource(jsonBuilder()\n+ .startObject()\n+ .field(\"name\", name)\n+ .field(\"value\", value)\n+ .endObject());\n+ }\n+ }\n+ indexRandom(true, builders);\n+ ensureSearchable();\n+\n+\n+ SearchResponse response = client().prepareSearch(\"idx\")\n+ .addAggregation(missing(\"missing_values\").field(\"value\"))\n+ .addAggregation(terms(\"values\").field(\"value\"))\n+ .execute().actionGet();\n+\n+ assertSearchResponse(response);\n+\n+ Aggregations aggs = response.getAggregations();\n+\n+ Missing missing = aggs.get(\"missing_values\");\n+ assertNotNull(missing);\n+ assertThat(missing.getDocCount(), equalTo(missingValues));\n+\n+ Terms terms = aggs.get(\"values\");\n+ assertNotNull(terms);\n+ Collection<Terms.Bucket> buckets = terms.getBuckets();\n+ assertThat(buckets.size(), equalTo(values.size()));\n+ for (Terms.Bucket bucket : buckets) {\n+ values.remove(bucket.getKeyAsNumber().intValue());\n+ }\n+ assertTrue(values.isEmpty());\n+ }\n+}", "filename": "src/test/java/org/elasticsearch/search/aggregations/CombiTests.java", "status": "added" }, { "diff": "@@ -42,7 +42,10 @@\n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.core.IsNull.notNullValue;\n \n-/** Additional tests that aim at testing more complex aggregation trees on larger random datasets, so that things like the growth of dynamic arrays is tested. */\n+/**\n+ * Additional tests that aim at testing more complex aggregation trees on larger random datasets, so that things like\n+ * the growth of dynamic arrays is tested.\n+ */\n public class RandomTests extends ElasticsearchIntegrationTest {\n \n @Override", "filename": "src/test/java/org/elasticsearch/search/aggregations/RandomTests.java", "status": "modified" }, { "diff": "@@ -115,7 +115,7 @@ public void sizeIsZero() {\n .size(0))\n .execute().actionGet();\n \n- assertSearchResponse(response);System.out.println(response);\n+ assertSearchResponse(response);\n \n Terms terms = response.getAggregations().get(\"terms\");\n assertThat(terms, notNullValue());", "filename": "src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsTests.java", "status": "modified" } ] }
{ "body": "", "comments": [], "number": 5185, "title": "XContentBuilder.yamlBuilder() incorrectly returns a SMILE builder" }
{ "body": "Closes #5185\n", "number": 5186, "review_comments": [], "title": "Fix yamlBuilder() to return YAML builder instead of SMILE" }
{ "commits": [ { "message": "Fix yamlBuilder() to return YAML builder instead of SMILE\n\nCloses #5185" } ], "files": [ { "diff": "@@ -72,7 +72,7 @@ public static XContentBuilder smileBuilder(OutputStream os) throws IOException {\n * Returns a content builder using YAML format ({@link org.elasticsearch.common.xcontent.XContentType#YAML}.\n */\n public static XContentBuilder yamlBuilder() throws IOException {\n- return contentBuilder(XContentType.SMILE);\n+ return contentBuilder(XContentType.YAML);\n }\n \n /**", "filename": "src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java", "status": "modified" } ] }
{ "body": "We have noticed that sometime all shards do not respond to a DFS query then fetch and we get only 4/5 shards responding (and in debugging noticed the same for query then fetch). Turning the logs to debug we see the following message when only 4/5 shards respond.\n\n```\n2014-02-18 00:01:19,574 DEBUG (elasticsearch[dev][search][T#2]) log4j.Log4jESLogger<109>: [dev] [17] Failed to execute query phase\norg.elasticsearch.search.SearchContextMissingException: No search context found for id [17] \n at org.elasticsearch.search.SearchService.findContext(SearchService.java:455) \n at org.elasticsearch.search.SearchService.executeQueryPhase(SearchService.java:279) \n at org.elasticsearch.search.action.SearchServiceTransportAction.sendExecuteQuery(SearchServiceTransportAction.java:236) \n at org.elasticsearch.action.search.type.TransportSearchDfsQueryThenFetchAction$AsyncAction.executeQuery(TransportSearchDfsQueryThenFetchAction.java:148)\n at org.elasticsearch.action.search.type.TransportSearchDfsQueryThenFetchAction$AsyncAction$2.run(TransportSearchDfsQueryThenFetchAction.java:132) \n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) \n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:744) \n```\n\nLooking into what causes this, I was able to reproduce the issue more quickly by setting the SearchService reaper thread to run almost continuously by explicitly setting \"search.keep_alive_interval\" to a low value in the milliseconds range vs every minute. (we do see the same behavior without modifying this value, but \n\nI saw two issues occur with some extra debugging. The first is that when SearchContext is created, the default value of lastAccessedTime is 0 and if the reaper runs against that context quickly enough, the context will be freed before it is used. \n\n```\n2014-02-18 15:32:53,394 DEBUG (elasticsearch[dev][scheduler][T#1]) log4j.Log4jESLogger<104>: [dev] freeing search context 1390 time: 1392737573376 lastAccessTime: 0 keepAlive: 300000\n2014-02-18 15:32:53,399 DEBUG (elasticsearch[dev][search][T#3]) log4j.Log4jESLogger<109>: [dev] [1390] Failed to execute query phase\norg.elasticsearch.search.SearchContextMissingException: No search context found for id [1390]\n at org.elasticsearch.search.SearchService.findContext(SearchService.java:455)\n at org.elasticsearch.search.SearchService.executeQueryPhase(SearchService.java:279)\n at org.elasticsearch.search.action.SearchServiceTransportAction.sendExecuteQuery(SearchServiceTransportAction.java:236)\n at org.elasticsearch.action.search.type.TransportSearchDfsQueryThenFetchAction$AsyncAction.executeQuery(TransportSearchDfsQueryThenFetchAction.java:148)\n at org.elasticsearch.action.search.type.TransportSearchDfsQueryThenFetchAction$AsyncAction$2.run(TransportSearchDfsQueryThenFetchAction.java:132)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:744)\n```\n\nThe second the reaper calls context.lastAccessTime() multiple times, but the value can change after the first if statement and an incorrect value will be used in the next statement (such as -1 when the context is being used).\n\n```\n2014-02-18 15:24:38,721 DEBUG (elasticsearch[dev][scheduler][T#1]) log4j.Log4jESLogger<104>: [dev] freeing search context 1691 time: 1392737078619 lastAccessTime: -1 keepAlive: 300000\n2014-02-18 15:24:38,725 DEBUG (elasticsearch[dev][search][T#4]) log4j.Log4jESLogger<109>: [dev] [1691] Failed to execute query phase\norg.elasticsearch.search.SearchContextMissingException: No search context found for id [1691]\n at org.elasticsearch.search.SearchService.findContext(SearchService.java:455)\n at org.elasticsearch.search.SearchService.executeQueryPhase(SearchService.java:279)\n at org.elasticsearch.search.action.SearchServiceTransportAction.sendExecuteQuery(SearchServiceTransportAction.java:236)\n at org.elasticsearch.action.search.type.TransportSearchDfsQueryThenFetchAction$AsyncAction.executeQuery(TransportSearchDfsQueryThenFetchAction.java:148)\n at org.elasticsearch.action.search.type.TransportSearchDfsQueryThenFetchAction$AsyncAction$2.run(TransportSearchDfsQueryThenFetchAction.java:132)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:744)\n```\n\nThis [gist](https://gist.github.com/jaymode/9074150) contains code that I have used to resolve the issues. If this needs to be submitted as a pull request, I can do that as well.\n", "comments": [ { "body": "I looked at the gist and I think you should submit this as a PR! Good catch! Can you make sure you sign the CLA as well so we can pull this in quickly. \n\nOne comment about the initialization I think we should initialize the new context with `-1` instead so we just skip it on the reaper?\n", "created_at": "2014-02-18T17:01:13Z" }, { "body": "Will create a PR and sign the CLA shortly.\n\nRegarding initialization, I thought about using `-1` but decided against it as there could be some unknown case, where lastAccessTime is always `-1`, so the search context will never be cleaned up by the reaper. Initializing to the estimated time allows it to be cleaned up by the reaper in that case. Let me know what you think.\n", "created_at": "2014-02-18T18:36:24Z" }, { "body": "@jaymode I agree on the `-1` and the special casing. We actually set it to `-1` each time we access the context. to prevent the reaper from closing the context while we are using it. I think we should do the same when initialising. There should be some logic that frees the context if there is an exception thrown too.\n", "created_at": "2014-02-18T18:48:10Z" } ], "number": 5165, "title": "SearchContext is occasionally closed prematurely" }
{ "body": "PR for #5165\n", "number": 5170, "review_comments": [ { "body": "given this I think we should add some checks to the place where this is created in `SearchService`\nie. this:\n\n``` Java\n SearchContext createAndPutContext(ShardSearchRequest request) throws ElasticsearchException {\n SearchContext context = createContext(request);\n activeContexts.put(context.id(), context);\n context.indexShard().searchService().onNewContext(context);\n return context;\n }\n```\n\nshould look like:\n\n``` Java\n SearchContext createAndPutContext(ShardSearchRequest request) throws ElasticsearchException {\n SearchContext context = createContext(request);\n boolean success = false;\n try {\n activeContexts.put(context.id(), context);\n context.indexShard().searchService().onNewContext(context);\n success = true;\n return context;\n } finally {\n if (!success) { \n freeContext(context);\n }\n }\n }\n```\n", "created_at": "2014-02-18T20:41:54Z" } ], "title": "Fix SearchContext occasionally closed prematurely " }
{ "commits": [ { "message": "Fix SearchContext from being closed prematurely\n\nFixes SearchContext from being closed during initialization or immediately\nafter processing is started\n\nCloses #5165" } ], "files": [ { "diff": "@@ -465,9 +465,17 @@ private SearchContext findContext(long id) throws SearchContextMissingException\n \n SearchContext createAndPutContext(ShardSearchRequest request) throws ElasticsearchException {\n SearchContext context = createContext(request);\n- activeContexts.put(context.id(), context);\n- context.indexShard().searchService().onNewContext(context);\n- return context;\n+ boolean success = false;\n+ try {\n+ activeContexts.put(context.id(), context);\n+ context.indexShard().searchService().onNewContext(context);\n+ success = true;\n+ return context;\n+ } finally {\n+ if (!success) {\n+ freeContext(context);\n+ }\n+ }\n }\n \n SearchContext createContext(ShardSearchRequest request) throws ElasticsearchException {\n@@ -838,10 +846,14 @@ class Reaper implements Runnable {\n public void run() {\n long time = threadPool.estimatedTimeInMillis();\n for (SearchContext context : activeContexts.values()) {\n- if (context.lastAccessTime() == -1) { // its being processed or timeout is disabled\n+ // Use the same value for both checks since lastAccessTime can\n+ // be modified by another thread between checks!\n+ long lastAccessTime = context.lastAccessTime();\n+ if (lastAccessTime == -1l) { // its being processed or timeout is disabled\n continue;\n }\n- if ((time - context.lastAccessTime() > context.keepAlive())) {\n+ if ((time - lastAccessTime > context.keepAlive())) {\n+ logger.debug(\"freeing search context [{}], time [{}], lastAccessTime [{}], keepAlive [{}]\", context.id(), time, lastAccessTime, context.keepAlive());\n freeContext(context);\n }\n }", "filename": "src/main/java/org/elasticsearch/search/SearchService.java", "status": "modified" }, { "diff": "@@ -169,7 +169,7 @@ public class DefaultSearchContext extends SearchContext {\n \n private volatile long keepAlive;\n \n- private volatile long lastAccessTime;\n+ private volatile long lastAccessTime = -1;\n \n private List<Releasable> clearables = null;\n ", "filename": "src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java", "status": "modified" } ] }
{ "body": "DEBIAN/postinst:37\n chmod 644 /etc/elasticsearch/*\n\nthis is only a good idea as long as there are no subdirectories in /etc/elasticsearch/ because after updating the elasticsearch package files in /etc/elasticsearch/synonyms (for example) can't be read anymore.\n", "comments": [], "number": 3820, "title": "Debian Package sets /etc/elasticsearch/* to 0644" }
{ "body": "The old post installation script on debian set all data to\n644 inside of /etc/elasticsearch, which does not work, when\nthere are subdirectories\n\nCloses #3820\n", "number": 5158, "review_comments": [], "title": "Set permission in debian postinst script correctly" }
{ "commits": [ { "message": "Set permission in debian postinst script correctly\n\nThe old post installation script on debian set all data to\n644 inside of /etc/elasticsearch, which does not work, when\nthere are subdirectories\n\nCloses #3820" } ], "files": [ { "diff": "@@ -34,7 +34,8 @@ case \"$1\" in\n \t# configuration files should not be modifiable by elasticsearch user, as this can be a security issue\n \tchown -Rh root:root /etc/elasticsearch/*\n \tchmod 755 /etc/elasticsearch\n-\tchmod 644 /etc/elasticsearch/*\n+\tfind /etc/elasticsearch -type f -exec chmod 644 {} ';'\n+\tfind /etc/elasticsearch -type d -exec chmod 755 {} ';'\n \t\n \t# if $2 is set, this is an upgrade\n \tif ( [ -n $2 ] && [ \"$RESTART_ON_UPGRADE\" = \"true\" ] ) ; then", "filename": "src/deb/control/postinst", "status": "modified" } ] }
{ "body": "At least in 1.0.0\n\n``` shell\n# create index and object\ncurl -X PUT 'http://127.0.0.1:9200/wtf/?pretty'\ncurl -X PUT 'http://127.0.0.1:9200/wtf/test/1?pretty' -d '{\"complex\": { \"object\": \"it is\" }, \"simple\": \"value\"}'\n\n# no results\ncurl -X POST 'http://127.0.0.1:9200/wtf/test/_search?pretty&q=_exists_:complex'\n# has results\ncurl -X POST 'http://127.0.0.1:9200/wtf/test/_search?pretty&q=_exists_:simple'\n\n# has results\ncurl -X POST 'http://127.0.0.1:9200/wtf/test/_search?pretty' -d '{ \"query\": { \"filtered\": { \"filter\": { \"not\": { \"missing\": { \"field\": \"complex\" } } } } } }'\n\n# has results\ncurl -X POST 'http://127.0.0.1:9200/wtf/test/_search?pretty' -d '{ \"query\": { \"filtered\": { \"filter\": { \"not\": { \"missing\": { \"field\": \"simple\" } } } } } }'\n```\n\nThis behaviour should be either fixed or noted in docs.\n", "comments": [ { "body": "The `exists` filter works by looking at matches in the inverted index for any value of the given field. The reason why it doesn't work on object is that they are not indexed. In the example that you gave, the inverted index only has entries for `complex.object` and `simple`.\n\nI will a a note to the documentation.\n", "created_at": "2014-02-17T12:58:19Z" }, { "body": "So `missing` filter will use field data and not inverted index, right?\n", "created_at": "2014-02-17T13:02:51Z" }, { "body": "The `missing` filter works exactly as if you wrapped the `exists` filter under a `not` filter, so the same limitations apply.\n", "created_at": "2014-02-17T13:09:24Z" }, { "body": "Since this works:\n\n```\ncurl -X POST 'http://127.0.0.1:9200/wtf/test/_search?pretty' -d '{ \"query\": { \"filtered\": { \"filter\": { \"exists\": { \"field\": \"complex\" } } } } }'\n```\n\nand this doesn't:\n\n```\ncurl -X POST 'http://127.0.0.1:9200/wtf/test/_search?pretty&q=_exists_:complex'\n```\n\nI understand that `_exists_` in query only uses inverted index, but `exists` filter knows that `complex` is object and cannot be found in index so field data should be used. Do I get this right?\n", "created_at": "2014-02-17T13:34:28Z" }, { "body": "I had to check the code to understand what happens, the difference is that `exists` tries to be smarter than `_exists_` when matching an object by using sub-fields. That is, `exists` is able to translate `\"exists\": { \"field\": \"complex\"}` to `\"exists\": { \"field\": \"complex.object\"}`. Let me see if I can fix `_exists_` to behave consistently with `exists`.\n", "created_at": "2014-02-17T14:28:58Z" } ], "number": 5142, "title": "_exists_ doesn't work on objects" }
{ "body": "`_exists_` and `_missing_` miss field name expansion that `exists` and\n`missing` have, which allows these filters to work on `object` fields.\n\nClose #5142\n", "number": 5145, "review_comments": [], "title": "Make _exists_/_missing_ behave consistently with exists/missing." }
{ "commits": [ { "message": "Make _exists_/_missing_ behave consistently with exists/missing.\n\n`_exists_` and `_missing_` miss field name expansion that `exists` and\n`missing` have, which allows these filters to work on `object` fields.\n\nClose #5142" } ], "files": [ { "diff": "@@ -19,15 +19,11 @@\n \n package org.apache.lucene.queryparser.classic;\n \n-import org.apache.lucene.search.Filter;\n import org.apache.lucene.search.Query;\n-import org.apache.lucene.search.TermRangeFilter;\n import org.elasticsearch.common.lucene.search.XConstantScoreQuery;\n-import org.elasticsearch.index.mapper.MapperService;\n+import org.elasticsearch.index.query.ExistsFilterParser;\n import org.elasticsearch.index.query.QueryParseContext;\n \n-import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameFilter;\n-\n /**\n *\n */\n@@ -37,23 +33,6 @@ public class ExistsFieldQueryExtension implements FieldQueryExtension {\n \n @Override\n public Query query(QueryParseContext parseContext, String queryText) {\n- String fieldName = queryText;\n- Filter filter = null;\n- MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);\n- if (smartNameFieldMappers != null) {\n- if (smartNameFieldMappers.hasMapper()) {\n- filter = smartNameFieldMappers.mapper().rangeFilter(null, null, true, true, parseContext);\n- }\n- }\n- if (filter == null) {\n- filter = new TermRangeFilter(fieldName, null, null, true, true);\n- }\n-\n- // we always cache this one, really does not change...\n- filter = parseContext.cacheFilter(filter, null);\n-\n- filter = wrapSmartNameFilter(filter, smartNameFieldMappers, parseContext);\n-\n- return new XConstantScoreQuery(filter);\n+ return new XConstantScoreQuery(ExistsFilterParser.newFilter(parseContext, queryText, null));\n }\n }", "filename": "src/main/java/org/apache/lucene/queryparser/classic/ExistsFieldQueryExtension.java", "status": "modified" }, { "diff": "@@ -19,16 +19,11 @@\n \n package org.apache.lucene.queryparser.classic;\n \n-import org.apache.lucene.search.Filter;\n import org.apache.lucene.search.Query;\n-import org.apache.lucene.search.TermRangeFilter;\n-import org.elasticsearch.common.lucene.search.NotFilter;\n import org.elasticsearch.common.lucene.search.XConstantScoreQuery;\n-import org.elasticsearch.index.mapper.MapperService;\n+import org.elasticsearch.index.query.MissingFilterParser;\n import org.elasticsearch.index.query.QueryParseContext;\n \n-import static org.elasticsearch.index.query.support.QueryParsers.wrapSmartNameFilter;\n-\n /**\n *\n */\n@@ -38,27 +33,7 @@ public class MissingFieldQueryExtension implements FieldQueryExtension {\n \n @Override\n public Query query(QueryParseContext parseContext, String queryText) {\n- String fieldName = queryText;\n-\n- Filter filter = null;\n- MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);\n- if (smartNameFieldMappers != null) {\n- if (smartNameFieldMappers.hasMapper()) {\n- filter = smartNameFieldMappers.mapper().rangeFilter(null, null, true, true, parseContext);\n- }\n- }\n- if (filter == null) {\n- filter = new TermRangeFilter(fieldName, null, null, true, true);\n- }\n-\n- // we always cache this one, really does not change... (exists)\n- filter = parseContext.cacheFilter(filter, null);\n- filter = new NotFilter(filter);\n- // cache the not filter as well, so it will be faster\n- filter = parseContext.cacheFilter(filter, null);\n-\n- filter = wrapSmartNameFilter(filter, smartNameFieldMappers, parseContext);\n-\n- return new XConstantScoreQuery(filter);\n+ return new XConstantScoreQuery(MissingFilterParser.newFilter(parseContext, queryText,\n+ MissingFilterParser.DEFAULT_EXISTENCE_VALUE, MissingFilterParser.DEFAULT_NULL_VALUE, null));\n }\n }", "filename": "src/main/java/org/apache/lucene/queryparser/classic/MissingFieldQueryExtension.java", "status": "modified" }, { "diff": "@@ -77,6 +77,10 @@ public Filter parse(QueryParseContext parseContext) throws IOException, QueryPar\n throw new QueryParsingException(parseContext.index(), \"exists must be provided with a [field]\");\n }\n \n+ return newFilter(parseContext, fieldPattern, filterName);\n+ }\n+\n+ public static Filter newFilter(QueryParseContext parseContext, String fieldPattern, String filterName) {\n MapperService.SmartNameObjectMapper smartNameObjectMapper = parseContext.smartObjectMapper(fieldPattern);\n if (smartNameObjectMapper != null && smartNameObjectMapper.hasMapper()) {\n // automatic make the object mapper pattern\n@@ -116,4 +120,5 @@ public Filter parse(QueryParseContext parseContext) throws IOException, QueryPar\n }\n return filter;\n }\n+\n }", "filename": "src/main/java/org/elasticsearch/index/query/ExistsFilterParser.java", "status": "modified" }, { "diff": "@@ -41,6 +41,8 @@\n public class MissingFilterParser implements FilterParser {\n \n public static final String NAME = \"missing\";\n+ public static final boolean DEFAULT_NULL_VALUE = false;\n+ public static final boolean DEFAULT_EXISTENCE_VALUE = true;\n \n @Inject\n public MissingFilterParser() {\n@@ -57,8 +59,8 @@ public Filter parse(QueryParseContext parseContext) throws IOException, QueryPar\n \n String fieldPattern = null;\n String filterName = null;\n- boolean nullValue = false;\n- boolean existence = true;\n+ boolean nullValue = DEFAULT_NULL_VALUE;\n+ boolean existence = DEFAULT_EXISTENCE_VALUE;\n \n XContentParser.Token token;\n String currentFieldName = null;\n@@ -84,6 +86,10 @@ public Filter parse(QueryParseContext parseContext) throws IOException, QueryPar\n throw new QueryParsingException(parseContext.index(), \"missing must be provided with a [field]\");\n }\n \n+ return newFilter(parseContext, fieldPattern, existence, nullValue, filterName);\n+ }\n+\n+ public static Filter newFilter(QueryParseContext parseContext, String fieldPattern, boolean existence, boolean nullValue, String filterName) {\n if (!existence && !nullValue) {\n throw new QueryParsingException(parseContext.index(), \"missing must have either existence, or null_value, or both set to true\");\n }", "filename": "src/main/java/org/elasticsearch/index/query/MissingFilterParser.java", "status": "modified" } ] }
{ "body": "In 1.0 Postings highlighter does not highlight trailing wildcard matches. I tried with both simple_query_string and query_string and things like photo\\* does not get highlighted\n", "comments": [ { "body": "Hi @roytmana , \ndo you mean that highlighting was working fine with the same query in 0.90? Can you post a recreation please?\n", "created_at": "2014-02-14T18:28:42Z" }, { "body": "I already migrated everything including index metadata to 1.0 so I can't\nconfirm with 100% certainty it was working in 0.90 but if you recall you\nand I were working on exactly the same issue a while ago and I believe it\nwas fixed.\n\nBefore I start working on a recreation (need to put it together from\nscratch) Do you think that it should NOT work by design?\n\nOn Fri, Feb 14, 2014 at 1:29 PM, Luca Cavanna notifications@github.comwrote:\n\n> Hi @roytmana https://github.com/roytmana ,\n> do you mean that highlighting was working fine with the same query in\n> 0.90? Can you post a recreation please?\n> \n> ## \n> \n> Reply to this email directly or view it on GitHubhttps://github.com/elasticsearch/elasticsearch/issues/5127#issuecomment-35110751\n> .\n", "created_at": "2014-02-14T18:33:50Z" }, { "body": "wait that has just been implemented in [lucene](https://issues.apache.org/jira/browse/LUCENE-5415) - I don't think we have support for MTQ in postings highlighter yet? This is coming with Lucene 4.7 \n", "created_at": "2014-02-14T18:33:52Z" }, { "body": "@roytmana I'm asking because I do remember I worked on this and we didn't touch anything in 1.0, thus I expect it to work on both 0.90 and 1.0. We also have tests for this which are green all the time.\n\n@s1monw we have our own custom postings highlighter, to which we added support for wildcards a while ago. Once lucene 4.7 is released I'll have a look at this again though ;)\n", "created_at": "2014-02-14T18:42:38Z" }, { "body": "@javanna let me create a recreation and test with it explicitly specifying highlighter in query. maybe something else has changed. I will post it shortly\n", "created_at": "2014-02-14T18:56:57Z" }, { "body": "It does not work. Here is a recreation (note I could not test actual curl as it does not take json on windows so I used different tools so excuse me if the curl syntax is broken ) \n\n```\ncurl -XDELETE http://localhost:9200/test\n\ncurl -XPOST http://localhost:9200/test -d '{\n \"settings\": {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 0\n },\n \"mappings\": {\n \"ht\": {\n \"dynamic\": \"strict\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\",\n \"index_options\": \"offsets\"\n }\n }\n }\n }\n}'\n\ncurl -XPOST http://localhost:9200/test/ht -d '{\"name\":\"photo equipment\"}'\ncurl -XPOST http://localhost:9200/test/ht -d '{\"name\":\"photography\"}'\n\ncurl -XPOST \"http://localhost:8680/ec-search/test/ht/_search\" -d'\n{\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"simple_query_string\": {\n \"fields\": [\n \"_all\"\n ],\n \"query\": \"photo\"\n }\n }\n ]\n }\n },\n \"highlight\": {\n \"fields\": {\n \"name\": {\n \"type\": \"postings\"\n }\n }\n }\n}'\n\ncurl -XPOST \"http://localhost:8680/ec-search/test/ht/_search\" -d'\n{\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"simple_query_string\": {\n \"fields\": [\n \"_all\"\n ],\n \"query\": \"photo*\"\n }\n }\n ]\n }\n },\n \"highlight\": {\n \"fields\": {\n \"name\": {\n \"type\": \"postings\"\n }\n }\n }\n}'\n```\n\nFirst (no wildcard) query returned highlight\n\n```\n{\n \"took\": 2,\n \"timed_out\": false,\n \"_shards\": {\n \"total\": 1,\n \"successful\": 1,\n \"failed\": 0\n },\n \"hits\": {\n \"total\": 1,\n \"max_score\": 0.625,\n \"hits\": [\n {\n \"_index\": \"test\",\n \"_type\": \"ht\",\n \"_id\": \"XU_c0rhUSBiu2KfVPjP-sg\",\n \"_score\": 0.625,\n \"_source\": {\n \"name\": \"photo equipment\"\n },\n \"highlight\": {\n \"name\": [\n \"<em>photo</em> equipment\"\n ]\n }\n }\n ]\n }\n}\n```\n\nsecond did not: \n\n```\n{\n \"took\": 1,\n \"timed_out\": false,\n \"_shards\": {\n \"total\": 1,\n \"successful\": 1,\n \"failed\": 0\n },\n \"hits\": {\n \"total\": 2,\n \"max_score\": 1,\n \"hits\": [\n {\n \"_index\": \"test\",\n \"_type\": \"ht\",\n \"_id\": \"XU_c0rhUSBiu2KfVPjP-sg\",\n \"_score\": 1,\n \"_source\": {\n \"name\": \"photo equipment\"\n }\n },\n {\n \"_index\": \"test\",\n \"_type\": \"ht\",\n \"_id\": \"tJOe5F7kQJiSMjOvTNPwpg\",\n \"_score\": 1,\n \"_source\": {\n \"name\": \"photography\"\n }\n }\n ]\n }\n}\n```\n", "created_at": "2014-02-14T19:31:43Z" }, { "body": "Another observation that is not directly related to the wildcards issue.\n\nWhen query is done on the _all field while highlighting is done on specific fields contributing to all, it works well when the fields are of string type. When the fields are numeric or date there will be no highlighting. However highlighting is done if searching on those numeric/date fields individually\n\nI guess it can't be helped due to field type loss in _all? But if it did work it would have been really great.\n\nPlease let me know if I should create a ticket for it or not\n", "created_at": "2014-02-14T21:12:34Z" }, { "body": "Hi @roytmana thanks for the recreation, I'm looking into this.\nThe problem is the same against both 0.90 and 1.0, wildcards do work but only when they are in the top-level query :) and not within compound queries. The fact that you query a specific type makes it a filtered query, which triggers this issue.\n", "created_at": "2014-02-17T12:26:36Z" }, { "body": "This was solved in #5143.\n", "created_at": "2014-02-21T20:59:29Z" } ], "number": 5127, "title": "Postings Highlighter does not highlight trailing wildcard matches" }
{ "body": "In #4052 we added support for highlighting multi term queries using the postings highlighter. That worked only for top-level queries though, and not for multi term queries that are nested for instance within a bool query, or filtered query, or a constant score query.\n\nThe way we can make this work is by walking the query structure and temporarily overriding the query rewrite method with a method that allows for multi terms extraction.\n\nCloses #5127 \n", "number": 5143, "review_comments": [ { "body": "this needs to be `FilteredQuery` rather than `XFilteredQuery` same goes for the `XConstantScore`\n", "created_at": "2014-02-21T11:27:01Z" }, { "body": "good point I think we should have both then... `XFilteredQuery` and `XConstantScore` needs to be there otherwise this doesn't work for our filtered query and our constant score query?\n", "created_at": "2014-02-21T11:30:57Z" }, { "body": "true for `XFilteredQuery` used to subclass `FilteredQuery` but not for Constant:\n\n``` Java\npublic class XConstantScoreQuery extends ConstantScoreQuery {\n```\n", "created_at": "2014-02-21T11:34:07Z" }, { "body": "you need to be careful here you might have a filter that is wrapped but not a query than you get a NPE here\n", "created_at": "2014-02-21T11:36:08Z" }, { "body": "I wrote a test for this (with constant score query with filter but without query) and it's no problem as the method does something only `if query instanceof SomeQuery`.\n", "created_at": "2014-02-21T20:27:03Z" }, { "body": "Agreed\n", "created_at": "2014-02-21T20:27:10Z" } ], "title": "Fixed multi term queries support in postings highlighter for non top-level queries" }
{ "commits": [ { "message": "Fixed multi term queries support in postings highlighter for non top-level queries\n\nIn #4052 we added support for highlighting multi term queries using the postings highlighter. That worked only for top-level queries though, and not for multi term queries that are nested for instance within a bool query, or filtered query, or a constant score query.\n\nThe way we make this work is by walking the query structure and temporarily overriding the query rewrite method with a method that allows for multi terms extraction.\n\nCloses #5102" } ], "files": [ { "diff": "@@ -57,12 +57,20 @@ highlighting using the postings highlighter on it:\n }\n --------------------------------------------------\n \n+[NOTE]\n Note that the postings highlighter is meant to perform simple query terms\n highlighting, regardless of their positions. That means that when used for\n instance in combination with a phrase query, it will highlight all the terms\n that the query is composed of, regardless of whether they are actually part of\n a query match, effectively ignoring their positions.\n \n+[WARNING]\n+The postings highlighter does support highlighting of multi term queries, like\n+prefix queries, wildcard queries and so on. On the other hand, this requires\n+the queries to be rewritten using a proper\n+<<query-dsl-multi-term-rewrite,rewrite method>> that supports multi term\n+extraction, which is a potentially expensive operation.\n+\n \n ==== Fast vector highlighter\n ", "filename": "docs/reference/search/request/highlighting.asciidoc", "status": "modified" }, { "diff": "@@ -18,6 +18,7 @@\n */\n package org.elasticsearch.search.highlight;\n \n+import com.google.common.collect.Lists;\n import com.google.common.collect.Maps;\n import org.apache.lucene.index.FieldInfo;\n import org.apache.lucene.index.IndexReader;\n@@ -33,6 +34,8 @@\n import org.apache.lucene.util.UnicodeUtil;\n import org.elasticsearch.ElasticsearchIllegalArgumentException;\n import org.elasticsearch.common.Strings;\n+import org.elasticsearch.common.collect.Tuple;\n+import org.elasticsearch.common.lucene.search.XFilteredQuery;\n import org.elasticsearch.common.text.StringText;\n import org.elasticsearch.index.mapper.FieldMapper;\n import org.elasticsearch.search.fetch.FetchPhaseExecutionException;\n@@ -144,25 +147,17 @@ public int compare(Snippet o1, Snippet o2) {\n }\n \n private static Query rewrite(HighlighterContext highlighterContext, IndexReader reader) throws IOException {\n- //rewrite is expensive: if the query was already rewritten we try not to rewrite\n- boolean mustRewrite = !highlighterContext.query.queryRewritten();\n \n Query original = highlighterContext.query.originalQuery();\n \n- MultiTermQuery originalMultiTermQuery = null;\n- MultiTermQuery.RewriteMethod originalRewriteMethod = null;\n- if (original instanceof MultiTermQuery) {\n- originalMultiTermQuery = (MultiTermQuery) original;\n- if (!allowsForTermExtraction(originalMultiTermQuery.getRewriteMethod())) {\n- originalRewriteMethod = originalMultiTermQuery.getRewriteMethod();\n- originalMultiTermQuery.setRewriteMethod(new MultiTermQuery.TopTermsScoringBooleanQueryRewrite(50));\n- //we need to rewrite anyway if it is a multi term query which was rewritten with the wrong rewrite method\n- mustRewrite = true;\n- }\n- }\n+ //we walk the query tree and when we encounter multi term queries we need to make sure the rewrite method\n+ //supports multi term extraction. If not we temporarily override it (and restore it after the rewrite).\n+ List<Tuple<MultiTermQuery, MultiTermQuery.RewriteMethod>> modifiedMultiTermQueries = Lists.newArrayList();\n+ overrideMultiTermRewriteMethod(original, modifiedMultiTermQueries);\n \n- if (!mustRewrite) {\n- //return the rewritten query\n+ //rewrite is expensive: if the query was already rewritten we try not to rewrite it again\n+ if (highlighterContext.query.queryRewritten() && modifiedMultiTermQueries.size() == 0) {\n+ //return the already rewritten query\n return highlighterContext.query.query();\n }\n \n@@ -172,16 +167,46 @@ private static Query rewrite(HighlighterContext highlighterContext, IndexReader\n query = rewrittenQuery;\n }\n \n- if (originalMultiTermQuery != null) {\n- if (originalRewriteMethod != null) {\n- //set back the original rewrite method after the rewrite is done\n- originalMultiTermQuery.setRewriteMethod(originalRewriteMethod);\n- }\n+ //set back the original rewrite method after the rewrite is done\n+ for (Tuple<MultiTermQuery, MultiTermQuery.RewriteMethod> modifiedMultiTermQuery : modifiedMultiTermQueries) {\n+ modifiedMultiTermQuery.v1().setRewriteMethod(modifiedMultiTermQuery.v2());\n }\n \n return query;\n }\n \n+ private static void overrideMultiTermRewriteMethod(Query query, List<Tuple<MultiTermQuery, MultiTermQuery.RewriteMethod>> modifiedMultiTermQueries) {\n+\n+ if (query instanceof MultiTermQuery) {\n+ MultiTermQuery originalMultiTermQuery = (MultiTermQuery) query;\n+ if (!allowsForTermExtraction(originalMultiTermQuery.getRewriteMethod())) {\n+ MultiTermQuery.RewriteMethod originalRewriteMethod = originalMultiTermQuery.getRewriteMethod();\n+ originalMultiTermQuery.setRewriteMethod(new MultiTermQuery.TopTermsScoringBooleanQueryRewrite(50));\n+ //we need to rewrite anyway if it is a multi term query which was rewritten with the wrong rewrite method\n+ modifiedMultiTermQueries.add(Tuple.tuple(originalMultiTermQuery, originalRewriteMethod));\n+ }\n+ }\n+\n+ if (query instanceof BooleanQuery) {\n+ BooleanQuery booleanQuery = (BooleanQuery) query;\n+ for (BooleanClause booleanClause : booleanQuery) {\n+ overrideMultiTermRewriteMethod(booleanClause.getQuery(), modifiedMultiTermQueries);\n+ }\n+ }\n+\n+ if (query instanceof XFilteredQuery) {\n+ overrideMultiTermRewriteMethod(((XFilteredQuery) query).getQuery(), modifiedMultiTermQueries);\n+ }\n+\n+ if (query instanceof FilteredQuery) {\n+ overrideMultiTermRewriteMethod(((FilteredQuery) query).getQuery(), modifiedMultiTermQueries);\n+ }\n+\n+ if (query instanceof ConstantScoreQuery) {\n+ overrideMultiTermRewriteMethod(((ConstantScoreQuery) query).getQuery(), modifiedMultiTermQueries);\n+ }\n+ }\n+\n private static boolean allowsForTermExtraction(MultiTermQuery.RewriteMethod rewriteMethod) {\n return rewriteMethod instanceof TopTermsRewrite || rewriteMethod instanceof ScoringRewrite;\n }", "filename": "src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java", "status": "modified" }, { "diff": "@@ -48,6 +48,7 @@\n import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH;\n import static org.elasticsearch.client.Requests.searchRequest;\n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n+import static org.elasticsearch.index.query.FilterBuilders.*;\n import static org.elasticsearch.index.query.QueryBuilders.*;\n import static org.elasticsearch.search.builder.SearchSourceBuilder.highlight;\n import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;\n@@ -1393,23 +1394,23 @@ public void testPhrasePrefix() throws ElasticsearchException, IOException {\n .setSource(\"field4\", \"a quick fast blue car\").get();\n refresh();\n \n- source = searchSource().postFilter(FilterBuilders.typeFilter(\"type2\")).query(matchPhrasePrefixQuery(\"field3\", \"fast bro\")).from(0).size(60).explain(true)\n+ source = searchSource().postFilter(typeFilter(\"type2\")).query(matchPhrasePrefixQuery(\"field3\", \"fast bro\")).from(0).size(60).explain(true)\n .highlight(highlight().field(\"field3\").order(\"score\").preTags(\"<x>\").postTags(\"</x>\"));\n \n searchResponse = client().search(searchRequest(\"test\").source(source).searchType(QUERY_THEN_FETCH)).actionGet();\n \n assertHighlight(searchResponse, 0, \"field3\", 0, 1, equalTo(\"The <x>quick</x> <x>brown</x> fox jumps over the lazy dog\"));\n \n logger.info(\"--> highlighting and searching on field4\");\n- source = searchSource().postFilter(FilterBuilders.typeFilter(\"type2\")).query(matchPhrasePrefixQuery(\"field4\", \"the fast bro\")).from(0).size(60).explain(true)\n+ source = searchSource().postFilter(typeFilter(\"type2\")).query(matchPhrasePrefixQuery(\"field4\", \"the fast bro\")).from(0).size(60).explain(true)\n .highlight(highlight().field(\"field4\").order(\"score\").preTags(\"<x>\").postTags(\"</x>\"));\n searchResponse = client().search(searchRequest(\"test\").source(source).searchType(QUERY_THEN_FETCH)).actionGet();\n \n assertHighlight(searchResponse, 0, \"field4\", 0, 1, equalTo(\"<x>The quick browse</x> button is a fancy thing, right bro?\"));\n assertHighlight(searchResponse, 1, \"field4\", 0, 1, equalTo(\"<x>The quick brown</x> fox jumps over the lazy dog\"));\n \n logger.info(\"--> highlighting and searching on field4\");\n- source = searchSource().postFilter(FilterBuilders.typeFilter(\"type2\")).query(matchPhrasePrefixQuery(\"field4\", \"a fast quick blue ca\")).from(0).size(60).explain(true)\n+ source = searchSource().postFilter(typeFilter(\"type2\")).query(matchPhrasePrefixQuery(\"field4\", \"a fast quick blue ca\")).from(0).size(60).explain(true)\n .highlight(highlight().field(\"field4\").order(\"score\").preTags(\"<x>\").postTags(\"</x>\"));\n searchResponse = client().search(searchRequest(\"test\").source(source).searchType(QUERY_THEN_FETCH)).actionGet();\n \n@@ -2419,6 +2420,85 @@ public void testPostingsHighlighterQueryString() throws Exception {\n }\n }\n \n+ @Test\n+ public void testPostingsHighlighterRegexpQueryWithinConstantScoreQuery() throws Exception {\n+\n+ assertAcked(client().admin().indices().prepareCreate(\"test\").addMapping(\"type1\", type1PostingsffsetsMapping()));\n+ ensureGreen();\n+\n+ client().prepareIndex(\"test\", \"type1\").setSource(\"field1\", \"The photography word will get highlighted\").get();\n+ refresh();\n+\n+ logger.info(\"--> highlighting and searching on field1\");\n+ for (String rewriteMethod : REWRITE_METHODS) {\n+ SearchSourceBuilder source = searchSource().query(constantScoreQuery(regexpQuery(\"field1\", \"pho[a-z]+\").rewrite(rewriteMethod)))\n+ .highlight(highlight().field(\"field1\"));\n+ SearchResponse searchResponse = client().search(searchRequest(\"test\").source(source)\n+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();\n+ assertHighlight(searchResponse, 0, \"field1\", 0, 1, equalTo(\"The <em>photography</em> word will get highlighted\"));\n+ }\n+ }\n+\n+ @Test\n+ public void testPostingsHighlighterMultiTermQueryMultipleLevels() throws Exception {\n+\n+ assertAcked(client().admin().indices().prepareCreate(\"test\").addMapping(\"type1\", type1PostingsffsetsMapping()));\n+ ensureGreen();\n+\n+ client().prepareIndex(\"test\", \"type1\").setSource(\"field1\", \"The photography word will get highlighted\").get();\n+ refresh();\n+\n+ logger.info(\"--> highlighting and searching on field1\");\n+ for (String rewriteMethod : REWRITE_METHODS) {\n+ SearchSourceBuilder source = searchSource().query(boolQuery()\n+ .should(constantScoreQuery(FilterBuilders.missingFilter(\"field1\")))\n+ .should(matchQuery(\"field1\", \"test\"))\n+ .should(filteredQuery(queryString(\"field1:photo*\").rewrite(rewriteMethod), null)))\n+ .highlight(highlight().field(\"field1\"));\n+ SearchResponse searchResponse = client().search(searchRequest(\"test\").source(source)\n+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();\n+ assertHighlight(searchResponse, 0, \"field1\", 0, 1, equalTo(\"The <em>photography</em> word will get highlighted\"));\n+ }\n+ }\n+\n+ @Test\n+ public void testPostingsHighlighterPrefixQueryWithinBooleanQuery() throws Exception {\n+\n+ assertAcked(client().admin().indices().prepareCreate(\"test\").addMapping(\"type1\", type1PostingsffsetsMapping()));\n+ ensureGreen();\n+\n+ client().prepareIndex(\"test\", \"type1\").setSource(\"field1\", \"The photography word will get highlighted\").get();\n+ refresh();\n+\n+ logger.info(\"--> highlighting and searching on field1\");\n+ for (String rewriteMethod : REWRITE_METHODS) {\n+ SearchSourceBuilder source = searchSource().query(boolQuery().must(prefixQuery(\"field1\", \"photo\").rewrite(rewriteMethod)).should(matchQuery(\"field1\", \"test\").minimumShouldMatch(\"0\")))\n+ .highlight(highlight().field(\"field1\"));\n+ SearchResponse searchResponse = client().search(searchRequest(\"test\").source(source)\n+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();\n+ assertHighlight(searchResponse, 0, \"field1\", 0, 1, equalTo(\"The <em>photography</em> word will get highlighted\"));\n+ }\n+ }\n+\n+ @Test\n+ public void testPostingsHighlighterQueryStringWithinFilteredQuery() throws Exception {\n+\n+ assertAcked(client().admin().indices().prepareCreate(\"test\").addMapping(\"type1\", type1PostingsffsetsMapping()));\n+ ensureGreen();\n+\n+ client().prepareIndex(\"test\", \"type1\").setSource(\"field1\", \"The photography word will get highlighted\").get();\n+ refresh();\n+\n+ logger.info(\"--> highlighting and searching on field1\");\n+ for (String rewriteMethod : REWRITE_METHODS) {\n+ SearchSourceBuilder source = searchSource().query(filteredQuery(queryString(\"field1:photo*\").rewrite(rewriteMethod), missingFilter(\"field_null\")))\n+ .highlight(highlight().field(\"field1\"));\n+ SearchResponse searchResponse = client().search(searchRequest(\"test\").source(source)\n+ .searchType(randomBoolean() ? SearchType.DFS_QUERY_THEN_FETCH : SearchType.QUERY_THEN_FETCH)).get();\n+ assertHighlight(searchResponse, 0, \"field1\", 0, 1, equalTo(\"The <em>photography</em> word will get highlighted\"));\n+ }\n+ }\n+\n @Test\n @Slow\n public void testPostingsHighlighterManyDocs() throws Exception {", "filename": "src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java", "status": "modified" } ] }
{ "body": "When elasticsearch is running with assertions enabled, a constant score query that doesn't match a record can cause AssertionError to be thrown. To reproduce start elasticsearch with assertions enabled and execute the following script\n\non 0.90 branch:\n\n```\ncurl -XDELETE localhost:9200/test-idx\ncurl -XPUT 'localhost:9200/test-idx/'\ncurl -XPUT 'localhost:9200/_percolator/test-idx/1' -d '{\n \"query\" : {\n \"constant_score\":{\n \"filter\": {\n \"and\": [{\n \"query\": {\n \"query_string\" : {\n \"query\" : \"root\"\n }\n }\n }, {\n \"term\" : {\n \"message\" : \"tree\"\n }\n }]\n }\n }\n }\n}'\ncurl -XGET 'localhost:9200/test-idx/message/_percolate' -d '{\n \"doc\" : {\n \"message\" : \"A new bonsai tree in the office\"\n }\n}'\n```\n\nto reproduce on master:\n\n```\ncurl -XDELETE localhost:9200/test-idx\ncurl -XPUT 'localhost:9200/test-idx/.percolator/1' -d '{\n \"query\" : {\n \"constant_score\":{\n \"filter\": {\n \"and\": [{\n \"query\": {\n \"query_string\" : {\n \"query\" : \"root\"\n }\n }\n }, {\n \"term\" : {\n \"message\" : \"tree\"\n }\n }]\n }\n }\n }\n}'\ncurl -XGET 'localhost:9200/test-idx/message/_percolate' -d '{\n \"doc\" : {\n \"message\" : \"A new bonsai tree in the office\"\n }\n}'\n```\n\nOn the master the issue is not as prominent since the error is not returned to the user. On the current master the result of execution of the script above is the following error in the log:\n\n```\n[2014-02-07 10:49:32,594][WARN ][percolator ] [Blizzard II] [[31]] failed to execute query\njava.lang.AssertionError\n at org.apache.lucene.search.Scorer.score(Scorer.java:61)\n at org.apache.lucene.search.ConstantScoreQuery$ConstantScorer.score(ConstantScoreQuery.java:256)\n at org.apache.lucene.search.IndexSearcher.search(IndexSearcher.java:621)\n at org.apache.lucene.search.IndexSearcher.search(IndexSearcher.java:309)\n at org.elasticsearch.percolator.PercolatorService$4.doPercolate(PercolatorService.java:543)\n at org.elasticsearch.percolator.PercolatorService.percolate(PercolatorService.java:232)\n at org.elasticsearch.action.percolate.TransportPercolateAction.shardOperation(TransportPercolateAction.java:194)\n at org.elasticsearch.action.percolate.TransportPercolateAction.shardOperation(TransportPercolateAction.java:55)\n at org.elasticsearch.action.support.broadcast.TransportBroadcastOperationAction$AsyncBroadcastAction$2.run(TransportBroadcastOperationAction.java:226)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:724)\n```\n", "comments": [], "number": 5049, "title": "AssertionError during percolation using constant score query" }
{ "body": "AndDocIdSet#IteratorBasedIterator was potentially initialized with\nNO_MORE_DOCS which violates the initial state of DocIdSetIterator and\ncould lead to undefined behavior when used in a search context.\n\nCloses #5049\n", "number": 5070, "review_comments": [ { "body": "maybe the method should be named `newDocIdSetIterator`?\n", "created_at": "2014-02-10T16:21:17Z" }, { "body": "should the `sets.length == 1` case be optimized as well?\n", "created_at": "2014-02-10T16:22:00Z" }, { "body": "Should we have an `assert false` here? Behavior of `nextDoc` and `advance` are undefined when the iterator is exhausted so we shouldn't have anything that relies on the fact that these methods return `NO_MORE_DOCS` when the iterator is exhausted?\n", "created_at": "2014-02-10T16:32:56Z" }, { "body": "good point\n", "created_at": "2014-02-10T16:35:30Z" }, { "body": "yeah agreeed I will add\n", "created_at": "2014-02-10T16:35:43Z" }, { "body": "++ \n", "created_at": "2014-02-10T16:35:52Z" } ], "title": "Fix AndDocIdSet#IteratorBasedIterator to not violate initial doc state" }
{ "commits": [ { "message": "Fix AndDocIdSet#IteratorBasedIterator to not violate initial doc state\n\nAndDocIdSet#IteratorBasedIterator was potentially initialized with\nNO_MORE_DOCS which violates the initial state of DocIdSetIterator and\ncould lead to undefined behavior when used in a search context.\n\nCloses #5049" } ], "files": [ { "diff": "@@ -79,15 +79,15 @@ public DocIdSetIterator iterator() throws IOException {\n }\n }\n if (bits.isEmpty()) {\n- return new IteratorBasedIterator(iterators.toArray(new DocIdSet[iterators.size()]));\n+ return IteratorBasedIterator.newDocIdSetIterator(iterators.toArray(new DocIdSet[iterators.size()]));\n }\n if (iterators.isEmpty()) {\n return new BitsDocIdSetIterator(new AndBits(bits.toArray(new Bits[bits.size()])));\n }\n // combination of both..., first iterating over the \"fast\" ones, and then checking on the more\n // expensive ones\n return new BitsDocIdSetIterator.FilteredIterator(\n- new IteratorBasedIterator(iterators.toArray(new DocIdSet[iterators.size()])),\n+ IteratorBasedIterator.newDocIdSetIterator(iterators.toArray(new DocIdSet[iterators.size()])),\n new AndBits(bits.toArray(new Bits[bits.size()]))\n );\n }\n@@ -117,33 +117,40 @@ public int length() {\n }\n \n static class IteratorBasedIterator extends DocIdSetIterator {\n- int lastReturn = -1;\n- private DocIdSetIterator[] iterators = null;\n+ private int lastReturn = -1;\n+ private final DocIdSetIterator[] iterators;\n private final long cost;\n \n- IteratorBasedIterator(DocIdSet[] sets) throws IOException {\n- iterators = new DocIdSetIterator[sets.length];\n+\n+ public static DocIdSetIterator newDocIdSetIterator(DocIdSet[] sets) throws IOException {\n+ if (sets.length == 0) {\n+ return DocIdSetIterator.empty();\n+ }\n+ final DocIdSetIterator[] iterators = new DocIdSetIterator[sets.length];\n int j = 0;\n long cost = Integer.MAX_VALUE;\n for (DocIdSet set : sets) {\n if (set == null) {\n- lastReturn = DocIdSetIterator.NO_MORE_DOCS; // non matching\n- break;\n+ return DocIdSetIterator.empty();\n } else {\n- \n- DocIdSetIterator dcit = set.iterator();\n- if (dcit == null) {\n- lastReturn = DocIdSetIterator.NO_MORE_DOCS; // non matching\n- break;\n+ DocIdSetIterator docIdSetIterator = set.iterator();\n+ if (docIdSetIterator == null) {\n+ return DocIdSetIterator.empty();// non matching\n }\n- iterators[j++] = dcit;\n- cost = Math.min(cost, dcit.cost());\n+ iterators[j++] = docIdSetIterator;\n+ cost = Math.min(cost, docIdSetIterator.cost());\n }\n }\n- this.cost = cost;\n- if (lastReturn != DocIdSetIterator.NO_MORE_DOCS) {\n- lastReturn = (iterators.length > 0 ? -1 : DocIdSetIterator.NO_MORE_DOCS);\n+ if (sets.length == 1) {\n+ // shortcut if there is only one valid iterator.\n+ return iterators[0];\n }\n+ return new IteratorBasedIterator(iterators, cost);\n+ }\n+\n+ private IteratorBasedIterator(DocIdSetIterator[] iterators, long cost) throws IOException {\n+ this.iterators = iterators;\n+ this.cost = cost;\n }\n \n @Override\n@@ -154,7 +161,10 @@ public final int docID() {\n @Override\n public final int nextDoc() throws IOException {\n \n- if (lastReturn == DocIdSetIterator.NO_MORE_DOCS) return DocIdSetIterator.NO_MORE_DOCS;\n+ if (lastReturn == DocIdSetIterator.NO_MORE_DOCS) {\n+ assert false : \"Illegal State - DocIdSetIterator is already exhausted\";\n+ return DocIdSetIterator.NO_MORE_DOCS;\n+ }\n \n DocIdSetIterator dcit = iterators[0];\n int target = dcit.nextDoc();\n@@ -183,7 +193,10 @@ public final int nextDoc() throws IOException {\n @Override\n public final int advance(int target) throws IOException {\n \n- if (lastReturn == DocIdSetIterator.NO_MORE_DOCS) return DocIdSetIterator.NO_MORE_DOCS;\n+ if (lastReturn == DocIdSetIterator.NO_MORE_DOCS) {\n+ assert false : \"Illegal State - DocIdSetIterator is already exhausted\";\n+ return DocIdSetIterator.NO_MORE_DOCS;\n+ }\n \n DocIdSetIterator dcit = iterators[0];\n target = dcit.advance(target);", "filename": "src/main/java/org/elasticsearch/common/lucene/docset/AndDocIdSet.java", "status": "modified" }, { "diff": "@@ -1548,4 +1548,29 @@ public static String[] convertFromTextArray(PercolateResponse.Match[] matches, S\n return strings;\n }\n \n+ @Test\n+ public void percolateNonMatchingConstantScoreQuery() throws Exception {\n+ assertAcked(client().admin().indices().prepareCreate(\"test\"));\n+ ensureGreen();\n+\n+ logger.info(\"--> register a query\");\n+ client().prepareIndex(\"test\", PercolatorService.TYPE_NAME, \"1\")\n+ .setSource(jsonBuilder().startObject()\n+ .field(\"query\", QueryBuilders.constantScoreQuery(FilterBuilders.andFilter(\n+ FilterBuilders.queryFilter(QueryBuilders.queryString(\"root\")),\n+ FilterBuilders.termFilter(\"message\", \"tree\"))))\n+ .endObject())\n+ .setRefresh(true)\n+ .execute().actionGet();\n+\n+ PercolateResponse percolate = client().preparePercolate()\n+ .setIndices(\"test\").setDocumentType(\"doc\")\n+ .setSource(jsonBuilder().startObject()\n+ .startObject(\"doc\").field(\"message\", \"A new bonsai tree \").endObject()\n+ .endObject())\n+ .execute().actionGet();\n+ assertThat(percolate.getFailedShards(), equalTo(0));\n+ assertMatchCount(percolate, 0l);\n+ }\n+\n }", "filename": "src/test/java/org/elasticsearch/percolator/PercolatorTests.java", "status": "modified" } ] }
{ "body": "According to http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-aggregations-bucket-datehistogram-aggregation.html\n\nthe time_zone attribute can take integer value but in DateHistogramParser.java this attribute is only parsed when the value is a string.\n\nSo for example: \n time_zone: 1 \ngives: \n\nParse Failure [Unknown key for a VALUE_NUMBER in [agg_name]: [time_zone].]]\n\nVersion: ES 1.0RC2 and 1.0 branch.\n", "comments": [ { "body": "indeed... will be fixed, thx!\n", "created_at": "2014-02-08T21:21:29Z" } ], "number": 5057, "title": "date_histogram aggregation and time_zone" }
{ "body": "Closes #5057\n", "number": 5060, "review_comments": [], "title": "Fixed parsing time zones as numeric value in DateHistogramParser" }
{ "commits": [ { "message": "Fixed parsing time zones as numeric value in DateHistogramParser\n\nCloses #5057" } ], "files": [ { "diff": "@@ -110,13 +110,13 @@ public AggregatorFactory parse(String aggregationName, XContentParser parser, Se\n } else if (\"lang\".equals(currentFieldName)) {\n scriptLang = parser.text();\n } else if (\"time_zone\".equals(currentFieldName) || \"timeZone\".equals(currentFieldName)) {\n- preZone = parseZone(parser, token);\n+ preZone = parseZone(parser.text());\n } else if (\"pre_zone\".equals(currentFieldName) || \"preZone\".equals(currentFieldName)) {\n- preZone = parseZone(parser, token);\n+ preZone = parseZone(parser.text());\n } else if (\"pre_zone_adjust_large_interval\".equals(currentFieldName) || \"preZoneAdjustLargeInterval\".equals(currentFieldName)) {\n preZoneAdjustLargeInterval = parser.booleanValue();\n } else if (\"post_zone\".equals(currentFieldName) || \"postZone\".equals(currentFieldName)) {\n- postZone = parseZone(parser, token);\n+ postZone = parseZone(parser.text());\n } else if (\"pre_offset\".equals(currentFieldName) || \"preOffset\".equals(currentFieldName)) {\n preOffset = parseOffset(parser.text());\n } else if (\"post_offset\".equals(currentFieldName) || \"postOffset\".equals(currentFieldName)) {\n@@ -139,6 +139,12 @@ public AggregatorFactory parse(String aggregationName, XContentParser parser, Se\n } else if (token == XContentParser.Token.VALUE_NUMBER) {\n if (\"min_doc_count\".equals(currentFieldName) || \"minDocCount\".equals(currentFieldName)) {\n minDocCount = parser.longValue();\n+ } else if (\"time_zone\".equals(currentFieldName) || \"timeZone\".equals(currentFieldName)) {\n+ preZone = DateTimeZone.forOffsetHours(parser.intValue());\n+ } else if (\"pre_zone\".equals(currentFieldName) || \"preZone\".equals(currentFieldName)) {\n+ preZone = DateTimeZone.forOffsetHours(parser.intValue());\n+ } else if (\"post_zone\".equals(currentFieldName) || \"postZone\".equals(currentFieldName)) {\n+ postZone = DateTimeZone.forOffsetHours(parser.intValue());\n } else {\n throw new SearchParseException(context, \"Unknown key for a \" + token + \" in [\" + aggregationName + \"]: [\" + currentFieldName + \"].\");\n }\n@@ -247,23 +253,18 @@ private long parseOffset(String offset) throws IOException {\n return TimeValue.parseTimeValue(offset.substring(beginIndex), null).millis();\n }\n \n- private DateTimeZone parseZone(XContentParser parser, XContentParser.Token token) throws IOException {\n- if (token == XContentParser.Token.VALUE_NUMBER) {\n- return DateTimeZone.forOffsetHours(parser.intValue());\n+ private DateTimeZone parseZone(String text) throws IOException {\n+ int index = text.indexOf(':');\n+ if (index != -1) {\n+ int beginIndex = text.charAt(0) == '+' ? 1 : 0;\n+ // format like -02:30\n+ return DateTimeZone.forOffsetHoursMinutes(\n+ Integer.parseInt(text.substring(beginIndex, index)),\n+ Integer.parseInt(text.substring(index + 1))\n+ );\n } else {\n- String text = parser.text();\n- int index = text.indexOf(':');\n- if (index != -1) {\n- int beginIndex = text.charAt(0) == '+' ? 1 : 0;\n- // format like -02:30\n- return DateTimeZone.forOffsetHoursMinutes(\n- Integer.parseInt(text.substring(beginIndex, index)),\n- Integer.parseInt(text.substring(index + 1))\n- );\n- } else {\n- // id, listed here: http://joda-time.sourceforge.net/timezones.html\n- return DateTimeZone.forID(text);\n- }\n+ // id, listed here: http://joda-time.sourceforge.net/timezones.html\n+ return DateTimeZone.forID(text);\n }\n }\n ", "filename": "src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java", "status": "modified" }, { "diff": "@@ -22,6 +22,8 @@\n import org.elasticsearch.action.search.SearchResponse;\n import org.elasticsearch.common.settings.ImmutableSettings;\n import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.xcontent.XContentBuilder;\n+import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;\n import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogram;\n import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;\n import org.elasticsearch.search.aggregations.metrics.max.Max;\n@@ -33,6 +35,7 @@\n import org.junit.Before;\n import org.junit.Test;\n \n+import java.io.IOException;\n import java.util.ArrayList;\n import java.util.List;\n \n@@ -118,6 +121,78 @@ public void singleValuedField() throws Exception {\n assertThat(bucket.getDocCount(), equalTo(3l));\n }\n \n+ @Test\n+ public void singleValuedField_WithPostTimeZone() throws Exception {\n+ SearchResponse response;\n+ if (randomBoolean()) {\n+ response = client().prepareSearch(\"idx\")\n+ .addAggregation(dateHistogram(\"histo\").field(\"date\").interval(DateHistogram.Interval.DAY).postZone(\"-01:00\"))\n+ .execute().actionGet();\n+ } else {\n+\n+ // checking post_zone setting as an int\n+\n+ response = client().prepareSearch(\"idx\")\n+ .addAggregation(new AbstractAggregationBuilder(\"histo\", \"date_histogram\") {\n+ @Override\n+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {\n+ return builder.startObject(name)\n+ .startObject(type)\n+ .field(\"field\", \"date\")\n+ .field(\"interval\", \"1d\")\n+ .field(\"post_zone\", -1)\n+ .endObject()\n+ .endObject();\n+ }\n+ })\n+ .execute().actionGet();\n+ }\n+\n+ assertSearchResponse(response);\n+\n+\n+ DateHistogram histo = response.getAggregations().get(\"histo\");\n+ assertThat(histo, notNullValue());\n+ assertThat(histo.getName(), equalTo(\"histo\"));\n+ assertThat(histo.getBuckets().size(), equalTo(6));\n+\n+ long key = new DateTime(2012, 1, 2, 0, 0, DateTimeZone.forID(\"+01:00\")).getMillis();\n+ DateHistogram.Bucket bucket = histo.getBucketByKey(key);\n+ assertThat(bucket, notNullValue());\n+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));\n+ assertThat(bucket.getDocCount(), equalTo(1l));\n+\n+ key = new DateTime(2012, 2, 2, 0, 0, DateTimeZone.forID(\"+01:00\")).getMillis();\n+ bucket = histo.getBucketByKey(key);\n+ assertThat(bucket, notNullValue());\n+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));\n+ assertThat(bucket.getDocCount(), equalTo(1l));\n+\n+ key = new DateTime(2012, 2, 15, 0, 0, DateTimeZone.forID(\"+01:00\")).getMillis();\n+ bucket = histo.getBucketByKey(key);\n+ assertThat(bucket, notNullValue());\n+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));\n+ assertThat(bucket.getDocCount(), equalTo(1l));\n+\n+ key = new DateTime(2012, 3, 2, 0, 0, DateTimeZone.forID(\"+01:00\")).getMillis();\n+ bucket = histo.getBucketByKey(key);\n+ assertThat(bucket, notNullValue());\n+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));\n+ assertThat(bucket.getDocCount(), equalTo(1l));\n+\n+ key = new DateTime(2012, 3, 15, 0, 0, DateTimeZone.forID(\"+01:00\")).getMillis();\n+ bucket = histo.getBucketByKey(key);\n+ assertThat(bucket, notNullValue());\n+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));\n+ assertThat(bucket.getDocCount(), equalTo(1l));\n+\n+ key = new DateTime(2012, 3, 23, 0, 0, DateTimeZone.forID(\"+01:00\")).getMillis();\n+ bucket = histo.getBucketByKey(key);\n+ assertThat(bucket, notNullValue());\n+ assertThat(bucket.getKeyAsNumber().longValue(), equalTo(key));\n+ assertThat(bucket.getDocCount(), equalTo(1l));\n+ }\n+\n @Test\n public void singleValuedField_OrderedByKeyAsc() throws Exception {\n SearchResponse response = client().prepareSearch(\"idx\")", "filename": "src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java", "status": "modified" } ] }
{ "body": "Hi,\n\nA couple of days ago I started a thread on the mailing list (https://groups.google.com/forum/?fromgroups=#!topic/elasticsearch/c_xLCPOpvjc) about this issue, and the responses on it are slim.\n\nThe problem exists in the aggregations api since version 1.0.0.RC1 and is confirmed by me to also occur in 1.0.0.RC2.\n\nThe problem is that when you do a terms aggregation on an index sharded in multiple shards (10 in my case) it start to return inconsistent numbers. With this I mean that the numbers are different the second time compared to the first time. You cannot show these numbers to users as when they reload the analytics it shows totally different numbers than before without anything changing to the data.\n\nI created a test suit as a gist for you to recreate the problem your self. It is hosted at: https://gist.github.com/thanodnl/8803745.\n\nBut since it contains datafiles it is kind of bugged in the web interface of github. Best you can clone this gist by running: `$ git clone https://gist.github.com/8803745.git`\n\ncd into the newly created directory and run: `$ ./aggsbug.load.sh` to load the test set into your local database. This can take a couple of minutes since it is loading ~1M documents. I tried to recreate it with a smaller set, but then the issue is not appearing.\n\nOnce the data is loaded you can run a contained test with: `$ ./aggsbug.test.sh`. This will call the same aggregation twice, store the output, and later print the diff of the output.\n\nIf you recreated the bug the output of the test should be something like:\n\n```\n$ ./aggsbug.test.sh\n % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n100 1088 100 950 100 138 192 27 0:00:05 0:00:04 0:00:01 206\n % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n100 1086 100 948 100 138 2867 417 --:--:-- --:--:-- --:--:-- 2872\ndiff in 2 aggs calls:\n2c2\n< \"took\" : 4918,\n\n---\n> \"took\" : 325,\n18c18\n< \"doc_count\" : 3599\n\n---\n> \"doc_count\" : 3228\n21c21\n< \"doc_count\" : 2517\n\n---\n> \"doc_count\" : 2254\n24c24\n< \"doc_count\" : 2207\n\n---\n> \"doc_count\" : 2007\n27c27\n< \"doc_count\" : 2207\n\n---\n> \"doc_count\" : 1971\n30c30\n< \"doc_count\" : 1660\n\n---\n> \"doc_count\" : 1478\n33c33\n< \"doc_count\" : 1534\n\n---\n> \"doc_count\" : 1401\n36c36\n< \"doc_count\" : 1468\n\n---\n> \"doc_count\" : 1330\n39c39\n< \"doc_count\" : 1079\n\n---\n> \"doc_count\" : 952\n```\n\nWhen ran against 1.0.0.Beta2 the output is what is to be expected:\n\n```\n$ ./aggsbug.test.sh\n % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n100 1087 100 949 100 138 208 30 0:00:04 0:00:04 --:--:-- 208\n % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n100 1086 100 948 100 138 1525 222 --:--:-- --:--:-- --:--:-- 1526\ndiff in 2 aggs calls:\n2c2\n< \"took\" : 4525,\n\n---\n> \"took\" : 611,\n```\n\nYou see the output of the aggs is not occurring in the diff during the test, and the only diff between the two runs is the time it took to calculate the result.\n", "comments": [ { "body": "Thanks for reporting this issue, this looks like a bad bug indeed. I'll look into it.\n", "created_at": "2014-02-05T14:54:59Z" }, { "body": "More info from what I found.\n\nES 1.0.0.RC2\nMac OS X 10.8.5\nDarwin Jorg-Prantes-MacBook-Pro.local 12.5.0 Darwin Kernel Version 12.5.0: Sun Sep 29 13:33:47 PDT 2013; root:xnu-2050.48.12~1/RELEASE_X86_64 x86_64\njava version \"1.8.0\"\nJava(TM) SE Runtime Environment (build 1.8.0-b128)\nJava HotSpot(TM) 64-Bit Server VM (build 25.0-b69, mixed mode)\nG1GC enabled\n\nES 1.0.0.RC2\nRHEL 6.3\nLinux zephyros 2.6.32-279.el6.x86_64 #1 SMP Wed Jun 13 18:24:36 EDT 2012 x86_64 x86_64 x86_64 GNU/Linux\njava version \"1.8.0\"\nJava(TM) SE Runtime Environment (build 1.8.0-b128)\nJava HotSpot(TM) 64-Bit Server VM (build 25.0-b69, mixed mode)\nG1GC enabled\n\nOn Mac, counts may change between first and subsequent runs. On the first run, the counts are lower than on the subsequent runs.\n\nOn Linux, the effect is more subtle. Counts do not change between runs. But, it seems different shard count lead to deviating entries, on the lower buckets.\n\nHere are two Linux examples, using Nils' data set. First is 10 shards, second is 5 shards, the lower three buckets differ.\n\nshards=10\n\n```\n{\n \"took\" : 143,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 10,\n \"successful\" : 10,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : 1060387,\n \"max_score\" : 0.0,\n \"hits\" : [ ]\n },\n \"aggregations\" : {\n \"a\" : {\n \"buckets\" : [ {\n \"key\" : \"totaltrafficbos\",\n \"doc_count\" : 3599\n }, {\n \"key\" : \"mai93thm\",\n \"doc_count\" : 2517\n }, {\n \"key\" : \"mai90thm\",\n \"doc_count\" : 2207\n }, {\n \"key\" : \"mai95thm\",\n \"doc_count\" : 2207\n }, {\n \"key\" : \"totaltrafficnyc\",\n \"doc_count\" : 1660\n }, {\n \"key\" : \"confessions\",\n \"doc_count\" : 1534\n }, {\n \"key\" : \"incidentreports\",\n \"doc_count\" : 1468\n }, {\n \"key\" : \"nji80thm\",\n \"doc_count\" : 1071\n }, {\n \"key\" : \"pai76thm\",\n \"doc_count\" : 1039\n }, {\n \"key\" : \"txi35thm\",\n \"doc_count\" : 357\n } ]\n }\n }\n}\n```\n\nshards=5\n\n```\n{\n \"took\" : 302,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 5,\n \"successful\" : 5,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : 1060387,\n \"max_score\" : 0.0,\n \"hits\" : [ ]\n },\n \"aggregations\" : {\n \"a\" : {\n \"buckets\" : [ {\n \"key\" : \"totaltrafficbos\",\n \"doc_count\" : 3599\n }, {\n \"key\" : \"mai93thm\",\n \"doc_count\" : 2517\n }, {\n \"key\" : \"mai90thm\",\n \"doc_count\" : 2207\n }, {\n \"key\" : \"mai95thm\",\n \"doc_count\" : 2207\n }, {\n \"key\" : \"totaltrafficnyc\",\n \"doc_count\" : 1660\n }, {\n \"key\" : \"confessions\",\n \"doc_count\" : 1534\n }, {\n \"key\" : \"incidentreports\",\n \"doc_count\" : 1468\n }, {\n \"key\" : \"nji80thm\",\n \"doc_count\" : 1180\n }, {\n \"key\" : \"pai76thm\",\n \"doc_count\" : 936\n }, {\n \"key\" : \"nji78thm\",\n \"doc_count\" : 422\n } ]\n }\n }\n}\n```\n", "created_at": "2014-02-05T19:30:19Z" }, { "body": "I just learned it is already known that the bucket counts differ over shard numbers, also for facets https://github.com/elasticsearch/elasticsearch/issues/1305\n", "created_at": "2014-02-05T19:54:23Z" } ], "number": 5021, "title": "Aggregations return different counts when invoked twice in a row" }
{ "body": "The byte[] array that was used to store the term was owned by the BytesRefHash\nwhich is used to compute counts. However, the BytesRefHash is released at some\npoint and its content may be recycled.\n\nMockPageCacheRecycler has been improved to expose this issue (putting random\ncontent into the arrays upon release).\n\nNumber of documents/terms have been increased in RandomTests to make sure page\nrecycling occurs.\n\nClose #5021\n", "number": 5039, "review_comments": [ { "body": "this is the fix\n", "created_at": "2014-02-06T14:44:02Z" }, { "body": "I like this! :)\n", "created_at": "2014-02-06T14:47:32Z" }, { "body": "is there really a need for a deep copy? The bucket's BytesRef is an already dedicated instance per bucket.... you can also do `copyBytes` here no?\n", "created_at": "2014-02-07T00:14:24Z" }, { "body": "scratch that... after looking closer, copyBytes cannot be used, which means the other alternative would be to copy the array and assign it explicitly and indeed `deepCopy` is cleaner (creates a new instance, but we only do that for those buckets we return anyway)\n", "created_at": "2014-02-07T01:02:58Z" } ], "title": "Fix BytesRef owning issue in string terms aggregations." }
{ "commits": [ { "message": "Fix BytesRef owning issue in string terms aggregations.\n\nThe byte[] array that was used to store the term was owned by the BytesRefHash\nwhich is used to compute counts. However, the BytesRefHash is released at some\npoint and its content may be recycled.\n\nMockPageCacheRecycler has been improved to expose this issue (putting random\ncontent into the arrays upon release).\n\nNumber of documents/terms have been increased in RandomTests to make sure page\nrecycling occurs.\n\nClose #5021" } ], "files": [ { "diff": "@@ -60,6 +60,7 @@ private static int rehash(int hash) {\n \n /**\n * Return the key at <code>0 &lte; index &lte; capacity()</code>. The result is undefined if the slot is unused.\n+ * <p color=\"red\">Beware that the content of the {@link BytesRef} may become invalid as soon as {@link #release()} is called</p>\n */\n public BytesRef get(long id, BytesRef dest) {\n final long startOffset = startOffsets.get(id);", "filename": "src/main/java/org/elasticsearch/search/aggregations/bucket/BytesRefHash.java", "status": "modified" }, { "diff": "@@ -57,7 +57,7 @@ public static void registerStreams() {\n \n public static class Bucket extends InternalTerms.Bucket {\n \n- final BytesRef termBytes;\n+ BytesRef termBytes;\n \n public Bucket(BytesRef term, long docCount, InternalAggregations aggregations) {\n super(docCount, aggregations);", "filename": "src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java", "status": "modified" }, { "diff": "@@ -234,6 +234,8 @@ public boolean apply(BytesRef input) {\n final InternalTerms.Bucket[] list = new InternalTerms.Bucket[ordered.size()];\n for (int i = ordered.size() - 1; i >= 0; --i) {\n final StringTerms.Bucket bucket = (StringTerms.Bucket) ordered.pop();\n+ // the terms are owned by the BytesRefHash, we need to pull a copy since the BytesRef hash data may be recycled at some point\n+ bucket.termBytes = BytesRef.deepCopyOf(bucket.termBytes);\n bucket.aggregations = bucketAggregations(bucket.bucketOrd);\n list[i] = bucket;\n }", "filename": "src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java", "status": "modified" }, { "diff": "@@ -27,6 +27,7 @@\n import org.elasticsearch.test.TestCluster;\n import org.elasticsearch.threadpool.ThreadPool;\n \n+import java.lang.reflect.Array;\n import java.util.Random;\n import java.util.concurrent.ConcurrentMap;\n \n@@ -51,7 +52,7 @@ public MockPageCacheRecycler(Settings settings, ThreadPool threadPool) {\n random = new Random(seed);\n }\n \n- private static <T> V<T> wrap(final V<T> v) {\n+ private <T> V<T> wrap(final V<T> v) {\n ACQUIRED_PAGES.put(v, new Throwable());\n final Thread t = Thread.currentThread();\n return new V<T>() {\n@@ -67,6 +68,14 @@ public boolean release() throws ElasticsearchException {\n if (t == null) {\n throw new IllegalStateException(\"Releasing a page that has not been acquired\");\n }\n+ final T ref = v();\n+ for (int i = 0; i < Array.getLength(ref); ++i) {\n+ if (ref instanceof Object[]) {\n+ Array.set(ref, i, null);\n+ } else {\n+ Array.set(ref, i, (byte) random.nextInt(256));\n+ }\n+ }\n return v.release();\n }\n ", "filename": "src/test/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java", "status": "modified" }, { "diff": "@@ -148,8 +148,9 @@ public void testRandomRanges() throws Exception {\n \n // test long/double/string terms aggs with high number of buckets that require array growth\n public void testDuelTerms() throws Exception {\n- final int numDocs = atLeast(1000);\n- final int maxNumTerms = randomIntBetween(10, 10000);\n+ // These high numbers of docs and terms are important to trigger page recycling\n+ final int numDocs = atLeast(10000);\n+ final int maxNumTerms = randomIntBetween(10, 100000);\n \n final IntOpenHashSet valuesSet = new IntOpenHashSet();\n wipeIndices(\"idx\");", "filename": "src/test/java/org/elasticsearch/search/aggregations/RandomTests.java", "status": "modified" } ] }
{ "body": "When an analysis plugins provides default index settings using `PreBuiltAnalyzerProviderFactory`, `PreBuiltTokenFilterFactoryFactory`, `PreBuiltTokenizerFactoryFactory` or `PreBuiltCharFilterFactoryFactory` it fails when upgrading it with elasticsearch superior or equal to 0.90.5.\n\nRelated issue: #4936 \n\nFix is needed in core. But, in the meantime, analysis plugins developers can fix that issue by overloading default prebuilt factories.\n\nFor example:\n\n``` java\npublic class StempelAnalyzerProviderFactory extends PreBuiltAnalyzerProviderFactory {\n\n private final PreBuiltAnalyzerProvider analyzerProvider;\n\n public StempelAnalyzerProviderFactory(String name, AnalyzerScope scope, Analyzer analyzer) {\n super(name, scope, analyzer);\n analyzerProvider = new PreBuiltAnalyzerProvider(name, scope, analyzer);\n }\n\n @Override\n public AnalyzerProvider create(String name, Settings settings) {\n return analyzerProvider;\n }\n\n public Analyzer analyzer() {\n return analyzerProvider.get();\n }\n}\n```\n\nAnd instead of:\n\n``` java\n @Inject\n public PolishIndicesAnalysis(Settings settings, IndicesAnalysisService indicesAnalysisService) {\n super(settings);\n indicesAnalysisService.analyzerProviderFactories().put(\"polish\", new PreBuiltAnalyzerProviderFactory(\"polish\", AnalyzerScope.INDICES, new PolishAnalyzer(Lucene.ANALYZER_VERSION)));\n }\n```\n\ndo \n\n``` java\n @Inject\n public PolishIndicesAnalysis(Settings settings, IndicesAnalysisService indicesAnalysisService) {\n super(settings);\n indicesAnalysisService.analyzerProviderFactories().put(\"polish\", new StempelAnalyzerProviderFactory(\"polish\", AnalyzerScope.INDICES, new PolishAnalyzer(Lucene.ANALYZER_VERSION)));\n }\n```\n", "comments": [], "number": 5030, "title": "Upgrading analysis plugins fails" }
{ "body": "When an analysis plugins provides default index settings using `PreBuiltAnalyzerProviderFactory`, `PreBuiltTokenFilterFactoryFactory`, `PreBuiltTokenizerFactoryFactory` or `PreBuiltCharFilterFactoryFactory` it fails when upgrading it with elasticsearch superior or equal to 0.90.5.\n\nRelated issue: #4936\n\nFix is needed in core. But, in the meantime, analysis plugins developers can fix that issue by overloading default prebuilt factories.\n\nFor example:\n\n``` java\npublic class StempelAnalyzerProviderFactory extends PreBuiltAnalyzerProviderFactory {\n\n private final PreBuiltAnalyzerProvider analyzerProvider;\n\n public StempelAnalyzerProviderFactory(String name, AnalyzerScope scope, Analyzer analyzer) {\n super(name, scope, analyzer);\n analyzerProvider = new PreBuiltAnalyzerProvider(name, scope, analyzer);\n }\n\n @Override\n public AnalyzerProvider create(String name, Settings settings) {\n return analyzerProvider;\n }\n\n public Analyzer analyzer() {\n return analyzerProvider.get();\n }\n}\n```\n\nAnd instead of:\n\n``` java\n @Inject\n public PolishIndicesAnalysis(Settings settings, IndicesAnalysisService indicesAnalysisService) {\n super(settings);\n indicesAnalysisService.analyzerProviderFactories().put(\"polish\", new PreBuiltAnalyzerProviderFactory(\"polish\", AnalyzerScope.INDICES, new PolishAnalyzer(Lucene.ANALYZER_VERSION)));\n }\n```\n\ndo\n\n``` java\n @Inject\n public PolishIndicesAnalysis(Settings settings, IndicesAnalysisService indicesAnalysisService) {\n super(settings);\n indicesAnalysisService.analyzerProviderFactories().put(\"polish\", new StempelAnalyzerProviderFactory(\"polish\", AnalyzerScope.INDICES, new PolishAnalyzer(Lucene.ANALYZER_VERSION)));\n }\n```\n\nCloses #5030\n", "number": 5034, "review_comments": [ { "body": "can we just delegate to `valueOf` and if it throws an exception we return the default? I don't think we should do the linear checks here\n", "created_at": "2014-02-06T09:24:43Z" }, { "body": "same as above and can you make it `public static` rather than `static public`\n", "created_at": "2014-02-06T09:25:09Z" }, { "body": "while we are on it can' we move the `name.toUpperCase(Locale.ROOT);` into the valueOf method?\n", "created_at": "2014-02-06T09:28:16Z" }, { "body": "I think we should also name it `get` or `getOrDefault`\n", "created_at": "2014-02-06T09:28:40Z" }, { "body": "this looks awesome! exactly what I expected \n\ncan you also try to add prebuild TokenFitler and Tokenizer? just for kicks\n", "created_at": "2014-02-06T17:16:08Z" }, { "body": "those license headers are old afaik\n", "created_at": "2014-02-06T17:16:35Z" }, { "body": "hmm why did you remove the mapping from here? I think that was a good change?\nyou should add the settings from from `public Settings indexSettings()` are only used if you use `prepareCreate` so you should add the settings to the versionSettings below.\n\nother than that it looks awesome \n", "created_at": "2014-02-06T19:35:13Z" }, { "body": "Actually I ran the test without the patch and without the mapping and it was failing. So I guess that this analyzer is set by default when creating the index. So we don't need to set it on a specific field to reproduce the issue.\n\nThat said I can add the mapping again.\n", "created_at": "2014-02-06T22:18:44Z" }, { "body": "About `indexSettings()`, this is true. I added to `versionSettings` and I have now an error which I need to fix. Will push a new update soon.\n", "created_at": "2014-02-07T08:19:23Z" } ], "title": "Upgrading analysis plugins fails" }
{ "commits": [ { "message": "Upgrading analysis plugins fails\n\nWhen an analysis plugins provides default index settings using `PreBuiltAnalyzerProviderFactory`, `PreBuiltTokenFilterFactoryFactory` or `PreBuiltTokenizerFactoryFactory` it fails when upgrading it with elasticsearch superior or equal to 0.90.5.\n\nRelated issue: #4936\n\nFix is needed in core. But, in the meantime, analysis plugins developers can fix that issue by overloading default prebuilt factories.\n\nFor example:\n\n```java\npublic class StempelAnalyzerProviderFactory extends PreBuiltAnalyzerProviderFactory {\n\n private final PreBuiltAnalyzerProvider analyzerProvider;\n\n public StempelAnalyzerProviderFactory(String name, AnalyzerScope scope, Analyzer analyzer) {\n super(name, scope, analyzer);\n analyzerProvider = new PreBuiltAnalyzerProvider(name, scope, analyzer);\n }\n\n @Override\n public AnalyzerProvider create(String name, Settings settings) {\n return analyzerProvider;\n }\n\n public Analyzer analyzer() {\n return analyzerProvider.get();\n }\n}\n```\n\nAnd instead of:\n\n```java\n @Inject\n public PolishIndicesAnalysis(Settings settings, IndicesAnalysisService indicesAnalysisService) {\n super(settings);\n indicesAnalysisService.analyzerProviderFactories().put(\"polish\", new PreBuiltAnalyzerProviderFactory(\"polish\", AnalyzerScope.INDICES, new PolishAnalyzer(Lucene.ANALYZER_VERSION)));\n }\n```\n\ndo\n\n```java\n @Inject\n public PolishIndicesAnalysis(Settings settings, IndicesAnalysisService indicesAnalysisService) {\n super(settings);\n indicesAnalysisService.analyzerProviderFactories().put(\"polish\", new StempelAnalyzerProviderFactory(\"polish\", AnalyzerScope.INDICES, new PolishAnalyzer(Lucene.ANALYZER_VERSION)));\n }\n```\n\nCloses #5030" } ], "files": [ { "diff": "@@ -25,8 +25,6 @@\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.indices.analysis.PreBuiltAnalyzers;\n \n-import java.util.Locale;\n-\n /**\n *\n */\n@@ -42,8 +40,11 @@ public PreBuiltAnalyzerProviderFactory(String name, AnalyzerScope scope, Analyze\n public AnalyzerProvider create(String name, Settings settings) {\n Version indexVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT);\n if (!Version.CURRENT.equals(indexVersion)) {\n- Analyzer analyzer = PreBuiltAnalyzers.valueOf(name.toUpperCase(Locale.ROOT)).getAnalyzer(indexVersion);\n- return new PreBuiltAnalyzerProvider(name, AnalyzerScope.INDICES, analyzer);\n+ PreBuiltAnalyzers preBuiltAnalyzers = PreBuiltAnalyzers.getOrDefault(name, null);\n+ if (preBuiltAnalyzers != null) {\n+ Analyzer analyzer = preBuiltAnalyzers.getAnalyzer(indexVersion);\n+ return new PreBuiltAnalyzerProvider(name, AnalyzerScope.INDICES, analyzer);\n+ }\n }\n \n return analyzerProvider;", "filename": "src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java", "status": "modified" }, { "diff": "@@ -24,8 +24,6 @@\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.indices.analysis.PreBuiltCharFilters;\n \n-import java.util.Locale;\n-\n public class PreBuiltCharFilterFactoryFactory implements CharFilterFactoryFactory {\n \n private final CharFilterFactory charFilterFactory;\n@@ -38,9 +36,12 @@ public PreBuiltCharFilterFactoryFactory(CharFilterFactory charFilterFactory) {\n public CharFilterFactory create(String name, Settings settings) {\n Version indexVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT);\n if (!Version.CURRENT.equals(indexVersion)) {\n- return PreBuiltCharFilters.valueOf(name.toUpperCase(Locale.ROOT)).getCharFilterFactory(indexVersion);\n+ PreBuiltCharFilters preBuiltCharFilters = PreBuiltCharFilters.getOrDefault(name, null);\n+ if (preBuiltCharFilters != null) {\n+ return preBuiltCharFilters.getCharFilterFactory(indexVersion);\n+ }\n }\n \n return charFilterFactory;\n }\n-}\n\\ No newline at end of file\n+}", "filename": "src/main/java/org/elasticsearch/index/analysis/PreBuiltCharFilterFactoryFactory.java", "status": "modified" }, { "diff": "@@ -24,8 +24,6 @@\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.indices.analysis.PreBuiltTokenFilters;\n \n-import java.util.Locale;\n-\n public class PreBuiltTokenFilterFactoryFactory implements TokenFilterFactoryFactory {\n \n private final TokenFilterFactory tokenFilterFactory;\n@@ -38,8 +36,11 @@ public PreBuiltTokenFilterFactoryFactory(TokenFilterFactory tokenFilterFactory)\n public TokenFilterFactory create(String name, Settings settings) {\n Version indexVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT);\n if (!Version.CURRENT.equals(indexVersion)) {\n- return PreBuiltTokenFilters.valueOf(name.toUpperCase(Locale.ROOT)).getTokenFilterFactory(indexVersion);\n+ PreBuiltTokenFilters preBuiltTokenFilters = PreBuiltTokenFilters.getOrDefault(name, null);\n+ if (preBuiltTokenFilters != null) {\n+ return preBuiltTokenFilters.getTokenFilterFactory(indexVersion);\n+ }\n }\n return tokenFilterFactory;\n }\n-}\n\\ No newline at end of file\n+}", "filename": "src/main/java/org/elasticsearch/index/analysis/PreBuiltTokenFilterFactoryFactory.java", "status": "modified" }, { "diff": "@@ -24,8 +24,6 @@\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.indices.analysis.PreBuiltTokenizers;\n \n-import java.util.Locale;\n-\n public class PreBuiltTokenizerFactoryFactory implements TokenizerFactoryFactory {\n \n private final TokenizerFactory tokenizerFactory;\n@@ -38,10 +36,12 @@ public PreBuiltTokenizerFactoryFactory(TokenizerFactory tokenizerFactory) {\n public TokenizerFactory create(String name, Settings settings) {\n Version indexVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT);\n if (!Version.CURRENT.equals(indexVersion)) {\n- TokenizerFactory versionedTokenizerFactory = PreBuiltTokenizers.valueOf(name.toUpperCase(Locale.ROOT)).getTokenizerFactory(indexVersion);\n- return versionedTokenizerFactory;\n+ PreBuiltTokenizers preBuiltTokenizers = PreBuiltTokenizers.getOrDefault(name, null);\n+ if (preBuiltTokenizers != null) {\n+ return preBuiltTokenizers.getTokenizerFactory(indexVersion);\n+ }\n }\n \n return tokenizerFactory;\n }\n-}\n\\ No newline at end of file\n+}", "filename": "src/main/java/org/elasticsearch/index/analysis/PreBuiltTokenizerFactoryFactory.java", "status": "modified" }, { "diff": "@@ -65,6 +65,8 @@\n import org.elasticsearch.index.analysis.StandardHtmlStripAnalyzer;\n import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy;\n \n+import java.util.Locale;\n+\n /**\n *\n */\n@@ -401,4 +403,17 @@ public synchronized Analyzer getAnalyzer(Version version) {\n return analyzer;\n }\n \n+ /**\n+ * Get a pre built Analyzer by its name or fallback to the default one\n+ * @param name Analyzer name\n+ * @param defaultAnalyzer default Analyzer if name not found\n+ */\n+ public static PreBuiltAnalyzers getOrDefault(String name, PreBuiltAnalyzers defaultAnalyzer) {\n+ try {\n+ return valueOf(name.toUpperCase(Locale.ROOT));\n+ } catch (IllegalArgumentException e) {\n+ return defaultAnalyzer;\n+ }\n+ }\n+\n }", "filename": "src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java", "status": "modified" }, { "diff": "@@ -67,4 +67,17 @@ public Reader create(Reader tokenStream) {\n \n return charFilterFactory;\n }\n+\n+ /**\n+ * Get a pre built CharFilter by its name or fallback to the default one\n+ * @param name CharFilter name\n+ * @param defaultCharFilter default CharFilter if name not found\n+ */\n+ public static PreBuiltCharFilters getOrDefault(String name, PreBuiltCharFilters defaultCharFilter) {\n+ try {\n+ return valueOf(name.toUpperCase(Locale.ROOT));\n+ } catch (IllegalArgumentException e) {\n+ return defaultCharFilter;\n+ }\n+ }\n }", "filename": "src/main/java/org/elasticsearch/indices/analysis/PreBuiltCharFilters.java", "status": "modified" }, { "diff": "@@ -309,4 +309,16 @@ public TokenStream create(TokenStream tokenStream) {\n return factory;\n }\n \n+ /**\n+ * Get a pre built TokenFilter by its name or fallback to the default one\n+ * @param name TokenFilter name\n+ * @param defaultTokenFilter default TokenFilter if name not found\n+ */\n+ public static PreBuiltTokenFilters getOrDefault(String name, PreBuiltTokenFilters defaultTokenFilter) {\n+ try {\n+ return valueOf(name.toUpperCase(Locale.ROOT));\n+ } catch (IllegalArgumentException e) {\n+ return defaultTokenFilter;\n+ }\n+ }\n }", "filename": "src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenFilters.java", "status": "modified" }, { "diff": "@@ -151,4 +151,16 @@ public Tokenizer create(Reader reader) {\n return tokenizerFactory;\n }\n \n+ /**\n+ * Get a pre built Tokenizer by its name or fallback to the default one\n+ * @param name Tokenizer name\n+ * @param defaultTokenizer default Tokenizer if name not found\n+ */\n+ public static PreBuiltTokenizers getOrDefault(String name, PreBuiltTokenizers defaultTokenizer) {\n+ try {\n+ return valueOf(name.toUpperCase(Locale.ROOT));\n+ } catch (IllegalArgumentException e) {\n+ return defaultTokenizer;\n+ }\n+ }\n }", "filename": "src/main/java/org/elasticsearch/indices/analysis/PreBuiltTokenizers.java", "status": "modified" }, { "diff": "@@ -0,0 +1,47 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.indices.analysis;\n+\n+import org.elasticsearch.index.analysis.AnalysisModule;\n+\n+/**\n+ */\n+public class DummyAnalysisBinderProcessor extends AnalysisModule.AnalysisBinderProcessor {\n+\n+ @Override\n+ public void processAnalyzers(AnalyzersBindings analyzersBindings) {\n+ analyzersBindings.processAnalyzer(\"dummy\", DummyAnalyzerProvider.class);\n+ }\n+\n+ @Override\n+ public void processTokenFilters(TokenFiltersBindings tokenFiltersBindings) {\n+ tokenFiltersBindings.processTokenFilter(\"dummy_token_filter\", DummyTokenFilterFactory.class);\n+ }\n+\n+ @Override\n+ public void processTokenizers(TokenizersBindings tokenizersBindings) {\n+ tokenizersBindings.processTokenizer(\"dummy_tokenizer\", DummyTokenizerFactory.class);\n+ }\n+\n+ @Override\n+ public void processCharFilters(CharFiltersBindings charFiltersBindings) {\n+ charFiltersBindings.processCharFilter(\"dummy_char_filter\", DummyCharFilterFactory.class);\n+ }\n+}", "filename": "src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisBinderProcessor.java", "status": "added" }, { "diff": "@@ -0,0 +1,55 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.indices.analysis;\n+\n+import com.google.common.collect.ImmutableList;\n+import org.elasticsearch.common.inject.Module;\n+import org.elasticsearch.index.analysis.AnalysisModule;\n+import org.elasticsearch.plugins.AbstractPlugin;\n+\n+import java.util.Collection;\n+\n+public class DummyAnalysisPlugin extends AbstractPlugin {\n+ /**\n+ * The name of the plugin.\n+ */\n+ @Override\n+ public String name() {\n+ return \"analysis-dummy\";\n+ }\n+\n+ /**\n+ * The description of the plugin.\n+ */\n+ @Override\n+ public String description() {\n+ return \"Analysis Dummy Plugin\";\n+ }\n+\n+ @Override\n+ public Collection<Class<? extends Module>> modules() {\n+ return ImmutableList.<Class<? extends Module>>of(DummyIndicesAnalysisModule.class);\n+ }\n+\n+ public void onModule(AnalysisModule module) {\n+ module.addProcessor(new DummyAnalysisBinderProcessor());\n+ }\n+\n+}", "filename": "src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisPlugin.java", "status": "added" }, { "diff": "@@ -0,0 +1,37 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.indices.analysis;\n+\n+import org.apache.lucene.analysis.util.StopwordAnalyzerBase;\n+import org.apache.lucene.util.Version;\n+\n+import java.io.Reader;\n+\n+public class DummyAnalyzer extends StopwordAnalyzerBase {\n+\n+ protected DummyAnalyzer(Version version) {\n+ super(version);\n+ }\n+\n+ @Override\n+ protected TokenStreamComponents createComponents(String fieldName, Reader reader) {\n+ return null;\n+ }\n+}", "filename": "src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzer.java", "status": "added" }, { "diff": "@@ -0,0 +1,41 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.indices.analysis;\n+\n+import org.elasticsearch.common.lucene.Lucene;\n+import org.elasticsearch.index.analysis.AnalyzerProvider;\n+import org.elasticsearch.index.analysis.AnalyzerScope;\n+\n+public class DummyAnalyzerProvider implements AnalyzerProvider<DummyAnalyzer> {\n+ @Override\n+ public String name() {\n+ return \"dummy\";\n+ }\n+\n+ @Override\n+ public AnalyzerScope scope() {\n+ return AnalyzerScope.INDICES;\n+ }\n+\n+ @Override\n+ public DummyAnalyzer get() {\n+ return new DummyAnalyzer(Lucene.ANALYZER_VERSION);\n+ }\n+}", "filename": "src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzerProvider.java", "status": "added" }, { "diff": "@@ -0,0 +1,36 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.indices.analysis;\n+\n+import org.elasticsearch.index.analysis.CharFilterFactory;\n+\n+import java.io.Reader;\n+\n+public class DummyCharFilterFactory implements CharFilterFactory {\n+ @Override\n+ public String name() {\n+ return \"dummy_char_filter\";\n+ }\n+\n+ @Override\n+ public Reader create(Reader reader) {\n+ return null;\n+ }\n+}", "filename": "src/test/java/org/elasticsearch/indices/analysis/DummyCharFilterFactory.java", "status": "added" }, { "diff": "@@ -0,0 +1,43 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.indices.analysis;\n+\n+import org.elasticsearch.common.component.AbstractComponent;\n+import org.elasticsearch.common.inject.Inject;\n+import org.elasticsearch.common.lucene.Lucene;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.index.analysis.*;\n+\n+public class DummyIndicesAnalysis extends AbstractComponent {\n+\n+ @Inject\n+ public DummyIndicesAnalysis(Settings settings, IndicesAnalysisService indicesAnalysisService) {\n+ super(settings);\n+ indicesAnalysisService.analyzerProviderFactories().put(\"dummy\",\n+ new PreBuiltAnalyzerProviderFactory(\"dummy\", AnalyzerScope.INDICES,\n+ new DummyAnalyzer(Lucene.ANALYZER_VERSION)));\n+ indicesAnalysisService.tokenFilterFactories().put(\"dummy_token_filter\",\n+ new PreBuiltTokenFilterFactoryFactory(new DummyTokenFilterFactory()));\n+ indicesAnalysisService.charFilterFactories().put(\"dummy_char_filter\",\n+ new PreBuiltCharFilterFactoryFactory(new DummyCharFilterFactory()));\n+ indicesAnalysisService.tokenizerFactories().put(\"dummy_tokenizer\",\n+ new PreBuiltTokenizerFactoryFactory(new DummyTokenizerFactory()));\n+ }\n+}", "filename": "src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysis.java", "status": "added" }, { "diff": "@@ -0,0 +1,30 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.indices.analysis;\n+\n+import org.elasticsearch.common.inject.AbstractModule;\n+\n+public class DummyIndicesAnalysisModule extends AbstractModule {\n+\n+ @Override\n+ protected void configure() {\n+ bind(DummyIndicesAnalysis.class).asEagerSingleton();\n+ }\n+}", "filename": "src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysisModule.java", "status": "added" }, { "diff": "@@ -0,0 +1,33 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.indices.analysis;\n+\n+import org.apache.lucene.analysis.TokenStream;\n+import org.elasticsearch.index.analysis.TokenFilterFactory;\n+\n+public class DummyTokenFilterFactory implements TokenFilterFactory {\n+ @Override public String name() {\n+ return \"dummy_token_filter\";\n+ }\n+\n+ @Override public TokenStream create(TokenStream tokenStream) {\n+ return null;\n+ }\n+}", "filename": "src/test/java/org/elasticsearch/indices/analysis/DummyTokenFilterFactory.java", "status": "added" }, { "diff": "@@ -0,0 +1,37 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.indices.analysis;\n+\n+import org.apache.lucene.analysis.Tokenizer;\n+import org.elasticsearch.index.analysis.TokenizerFactory;\n+\n+import java.io.Reader;\n+\n+public class DummyTokenizerFactory implements TokenizerFactory {\n+ @Override\n+ public String name() {\n+ return \"dummy_tokenizer\";\n+ }\n+\n+ @Override\n+ public Tokenizer create(Reader reader) {\n+ return null;\n+ }\n+}", "filename": "src/test/java/org/elasticsearch/indices/analysis/DummyTokenizerFactory.java", "status": "added" }, { "diff": "@@ -16,6 +16,7 @@\n * specific language governing permissions and limitations\n * under the License.\n */\n+\n package org.elasticsearch.indices.analysis;\n \n import com.google.common.collect.Lists;\n@@ -41,8 +42,17 @@\n /**\n *\n */\n+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE)\n public class PreBuiltAnalyzerIntegrationTests extends ElasticsearchIntegrationTest {\n \n+ @Override\n+ protected Settings nodeSettings(int nodeOrdinal) {\n+ return ImmutableSettings.settingsBuilder()\n+ .put(\"plugin.types\", DummyAnalysisPlugin.class.getName())\n+ .put(super.nodeSettings(nodeOrdinal))\n+ .build();\n+ }\n+\n @Test\n public void testThatPreBuiltAnalyzersAreNotClosedOnIndexClose() throws Exception {\n Map<PreBuiltAnalyzers, List<Version>> loadedAnalyzers = Maps.newHashMap();\n@@ -108,6 +118,43 @@ public void testThatPreBuiltAnalyzersAreNotClosedOnIndexClose() throws Exception\n assertLuceneAnalyzersAreNotClosed(loadedAnalyzers);\n }\n \n+ /**\n+ * Test case for #5030: Upgrading analysis plugins fails\n+ * See https://github.com/elasticsearch/elasticsearch/issues/5030\n+ */\n+ @Test\n+ public void testThatPluginAnalyzersCanBeUpdated() throws Exception {\n+ final XContentBuilder mapping = jsonBuilder().startObject()\n+ .startObject(\"type\")\n+ .startObject(\"properties\")\n+ .startObject(\"foo\")\n+ .field(\"type\", \"string\")\n+ .field(\"analyzer\", \"dummy\")\n+ .endObject()\n+ .startObject(\"bar\")\n+ .field(\"type\", \"string\")\n+ .field(\"analyzer\", \"my_dummy\")\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .endObject();\n+\n+ Settings versionSettings = ImmutableSettings.builder()\n+ .put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion())\n+ .put(\"index.analysis.analyzer.my_dummy.type\", \"custom\")\n+ .put(\"index.analysis.analyzer.my_dummy.filter\", \"my_dummy_token_filter\")\n+ .put(\"index.analysis.analyzer.my_dummy.char_filter\", \"my_dummy_char_filter\")\n+ .put(\"index.analysis.analyzer.my_dummy.tokenizer\", \"my_dummy_tokenizer\")\n+ .put(\"index.analysis.tokenizer.my_dummy_tokenizer.type\", \"dummy_tokenizer\")\n+ .put(\"index.analysis.filter.my_dummy_token_filter.type\", \"dummy_token_filter\")\n+ .put(\"index.analysis.char_filter.my_dummy_char_filter.type\", \"dummy_char_filter\")\n+ .build();\n+\n+ client().admin().indices().prepareCreate(\"test-analysis-dummy\").addMapping(\"type\", mapping).setSettings(versionSettings).get();\n+\n+ ensureGreen();\n+ }\n+\n private void assertThatAnalyzersHaveBeenLoaded(Map<PreBuiltAnalyzers, List<Version>> expectedLoadedAnalyzers) {\n for (Map.Entry<PreBuiltAnalyzers, List<Version>> entry : expectedLoadedAnalyzers.entrySet()) {\n for (Version version : entry.getValue()) {", "filename": "src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationTests.java", "status": "modified" } ] }
{ "body": "In #5007 Simon noticed that some options can't be provided in camel case although they should.\n", "comments": [ { "body": "yeah I guess we should try to keep consistency so +1\n", "created_at": "2014-02-04T16:04:24Z" } ], "number": 5009, "title": "Camel-case support in aggregations" }
{ "body": "Close #5009\n", "number": 5010, "review_comments": [ { "body": "Maybe you should use `CharSequence` here instead as the param type?\n", "created_at": "2014-02-06T10:08:18Z" }, { "body": "add `assertEqualsIgnoreCase(s, new String(s));` :)\n", "created_at": "2014-02-06T10:09:52Z" }, { "body": "also a loop with \n\n``` Java\nfinal String s = randomRealisticUnicodeOfCodepointLength(10);\nassertEqualsIgnoreCase(s, s.toUpperCase(Locale.ROOT));\n```\n\nmaybe?\n", "created_at": "2014-02-06T10:10:49Z" }, { "body": "hmm general question, why do we not use `\"script_values_unique\".equalsIgnoreCase(currentFieldName)`\n", "created_at": "2014-02-06T10:12:20Z" } ], "title": "Ignore case when parsing `script_values_sorted|unique` in aggregations." }
{ "commits": [ { "message": "Ignore case when parsing `script_values_sorted|unique` in aggregations.\n\nClose #5009" } ], "files": [ { "diff": "@@ -131,7 +131,7 @@ public AggregatorFactory parse(String aggregationName, XContentParser parser, Se\n } else if (token == XContentParser.Token.VALUE_BOOLEAN) {\n if (\"keyed\".equals(currentFieldName)) {\n keyed = parser.booleanValue();\n- } else if (\"script_values_sorted\".equals(currentFieldName)) {\n+ } else if (\"script_values_sorted\".equals(currentFieldName) || \"scriptValuesSorted\".equals(currentFieldName)) {\n assumeSorted = parser.booleanValue();\n } else {\n throw new SearchParseException(context, \"Unknown key for a \" + token + \" in [\" + aggregationName + \"]: [\" + currentFieldName + \"].\");", "filename": "src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java", "status": "modified" }, { "diff": "@@ -88,7 +88,7 @@ public AggregatorFactory parse(String aggregationName, XContentParser parser, Se\n } else if (token == XContentParser.Token.VALUE_BOOLEAN) {\n if (\"keyed\".equals(currentFieldName)) {\n keyed = parser.booleanValue();\n- } else if (\"script_values_sorted\".equals(currentFieldName)) {\n+ } else if (\"script_values_sorted\".equals(currentFieldName) || \"scriptValuesSorted\".equals(currentFieldName)) {\n assumeSorted = parser.booleanValue();\n } else {\n throw new SearchParseException(context, \"Unknown key for a \" + token + \" in aggregation [\" + aggregationName + \"]: [\" + currentFieldName + \"].\");", "filename": "src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java", "status": "modified" }, { "diff": "@@ -115,7 +115,7 @@ public AggregatorFactory parse(String aggregationName, XContentParser parser, Se\n } else if (token == XContentParser.Token.VALUE_BOOLEAN) {\n if (\"keyed\".equals(currentFieldName)) {\n keyed = parser.booleanValue();\n- } else if (\"script_values_sorted\".equals(currentFieldName)) {\n+ } else if (\"script_values_sorted\".equals(currentFieldName) || \"scriptValuesSorted\".equals(currentFieldName)) {\n assumeSorted = parser.booleanValue();\n } else {\n throw new SearchParseException(context, \"Unknown key for a \" + token + \" in [\" + aggregationName + \"]: [\" + currentFieldName + \"].\");", "filename": "src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java", "status": "modified" }, { "diff": "@@ -125,7 +125,7 @@ public AggregatorFactory parse(String aggregationName, XContentParser parser, Se\n } else if (token == XContentParser.Token.VALUE_BOOLEAN) {\n if (\"keyed\".equals(currentFieldName)) {\n keyed = parser.booleanValue();\n- } else if (\"script_values_sorted\".equals(currentFieldName)) {\n+ } else if (\"script_values_sorted\".equals(currentFieldName) || \"scriptValuesSorted\".equals(currentFieldName)) {\n assumeSorted = parser.booleanValue();\n } else {\n throw new SearchParseException(context, \"Unknown key for a \" + token + \" in [\" + aggregationName + \"]: [\" + currentFieldName + \"].\");", "filename": "src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeParser.java", "status": "modified" }, { "diff": "@@ -127,7 +127,7 @@ public AggregatorFactory parse(String aggregationName, XContentParser parser, Se\n } else if (token == XContentParser.Token.VALUE_BOOLEAN) {\n if (\"keyed\".equals(currentFieldName)) {\n keyed = parser.booleanValue();\n- } else if (\"script_values_sorted\".equals(currentFieldName)) {\n+ } else if (\"script_values_sorted\".equals(currentFieldName) || \"scriptValuesSorted\".equals(currentFieldName)) {\n assumeSorted = parser.booleanValue();\n } else {\n throw new SearchParseException(context, \"Unknown key for a \" + token + \" in [\" + aggregationName + \"]: [\" + currentFieldName + \"].\");", "filename": "src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/IpRangeParser.java", "status": "modified" }, { "diff": "@@ -101,7 +101,7 @@ public AggregatorFactory parse(String aggregationName, XContentParser parser, Se\n throw new SearchParseException(context, \"Unknown key for a \" + token + \" in [\" + aggregationName + \"]: [\" + currentFieldName + \"].\");\n }\n } else if (token == XContentParser.Token.VALUE_BOOLEAN) {\n- if (\"script_values_unique\".equals(currentFieldName)) {\n+ if (\"script_values_unique\".equals(currentFieldName) || \"scriptValuesUnique\".equals(currentFieldName)) {\n assumeUnique = parser.booleanValue();\n } else {\n throw new SearchParseException(context, \"Unknown key for a \" + token + \" in [\" + aggregationName + \"]: [\" + currentFieldName + \"].\");", "filename": "src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParser.java", "status": "modified" }, { "diff": "@@ -74,7 +74,7 @@ public AggregatorFactory parse(String aggregationName, XContentParser parser, Se\n throw new SearchParseException(context, \"Unknown key for a \" + token + \" in [\" + aggregationName + \"]: [\" + currentFieldName + \"].\");\n }\n } else if (token == XContentParser.Token.VALUE_BOOLEAN) {\n- if (\"script_values_sorted\".equals(currentFieldName)) {\n+ if (\"script_values_sorted\".equals(currentFieldName) || \"scriptValuesSorted\".equals(currentFieldName)) {\n assumeSorted = parser.booleanValue();\n } else {\n throw new SearchParseException(context, \"Unknown key for a \" + token + \" in [\" + aggregationName + \"]: [\" + currentFieldName + \"].\");", "filename": "src/main/java/org/elasticsearch/search/aggregations/metrics/ValuesSourceMetricsAggregatorParser.java", "status": "modified" }, { "diff": "@@ -69,7 +69,7 @@ public AggregatorFactory parse(String aggregationName, XContentParser parser, Se\n throw new SearchParseException(context, \"Unknown key for a \" + token + \" in [\" + aggregationName + \"]: [\" + currentFieldName + \"].\");\n }\n } else if (token == XContentParser.Token.VALUE_BOOLEAN) {\n- if (\"script_values_unique\".equals(currentFieldName)) {\n+ if (\"script_values_unique\".equals(currentFieldName) || \"scriptValuesUnique\".equals(currentFieldName)) {\n assumeUnique = parser.booleanValue();\n } else {\n throw new SearchParseException(context, \"Unknown key for a \" + token + \" in [\" + aggregationName + \"]: [\" + currentFieldName + \"].\");", "filename": "src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java", "status": "modified" } ] }
{ "body": "`ScriptBytesValues.currentValueHash` doesn't return the hash value of the last returned term. The reason is that it has its own `BytesRef` (`ScriptDocValues.scratch`) to store the term while the hash code is computed on the parent's term (`BytesValues.scratch`).\n", "comments": [ { "body": "Damned! good catch\n", "created_at": "2014-02-04T10:50:31Z" }, { "body": "`FieldDataSource.WithScript.BytesValues` has the same issue.\n", "created_at": "2014-02-04T11:11:51Z" } ], "number": 5004, "title": "ScriptBytesValues.currentValueHash is wrong" }
{ "body": "This commit removes FilterBytesValues which is very trappy as the default\nimplementation forwards all method calls to the delegate. So if you do any\nnon-trivial modification to the terms or to the order of the terms, you need\nto remember to override currentValueHash, copyShared, and this is very\nerror-prone.\n\nFieldDataSource.WithScript.BytesValues and ScriptBytesValues now return correct\nhash codes, future bugs here would be catched by the new assertion in\nSortedUniqueBytesValues.\n\nThis bug was causing performance issues with scripts as all terms were assumed\nto have the same hash code.\n\nClose #5004\n", "number": 5006, "review_comments": [], "title": "Fix hashCode values of aggregations' BytesValues." }
{ "commits": [ { "message": "Fix hashCode values of aggregations' BytesValues.\n\nThis commit removes FilterBytesValues which is very trappy as the default\nimplementation forwards all method calls to the delegate. So if you do any\nnon-trivial modification to the terms or to the order of the terms, you need\nto remember to override currentValueHash, copyShared, and this is very\nerror-prone.\n\nFieldDataSource.WithScript.BytesValues and ScriptBytesValues now return correct\nhash codes, future bugs here would be catched by the new assertion in\nSortedUniqueBytesValues.\n\nThis bug was causing performance issues with scripts as all terms were assumed\nto have the same hash code.\n\nClose #5004" }, { "message": "Add dedicated tests." } ], "files": [ { "diff": "@@ -287,31 +287,34 @@ public org.elasticsearch.index.fielddata.BytesValues bytesValues() {\n return bytesValues;\n }\n \n- static class SortedUniqueBytesValues extends FilterBytesValues {\n+ static class SortedUniqueBytesValues extends BytesValues {\n \n- final BytesRef spare;\n+ final BytesValues delegate;\n int[] sortedIds;\n final BytesRefHash bytes;\n int numUniqueValues;\n int pos = Integer.MAX_VALUE;\n \n public SortedUniqueBytesValues(BytesValues delegate) {\n- super(delegate);\n+ super(delegate.isMultiValued());\n+ this.delegate = delegate;\n bytes = new BytesRefHash();\n- spare = new BytesRef();\n }\n \n @Override\n public int setDocument(int docId) {\n- final int numValues = super.setDocument(docId);\n+ final int numValues = delegate.setDocument(docId);\n if (numValues == 0) {\n sortedIds = null;\n return 0;\n }\n bytes.clear();\n bytes.reinit();\n for (int i = 0; i < numValues; ++i) {\n- bytes.add(super.nextValue(), super.currentValueHash());\n+ final BytesRef next = delegate.nextValue();\n+ final int hash = delegate.currentValueHash();\n+ assert hash == next.hashCode();\n+ bytes.add(next, hash);\n }\n numUniqueValues = bytes.size();\n sortedIds = bytes.sort(BytesRef.getUTF8SortedAsUnicodeComparator());\n@@ -321,13 +324,8 @@ public int setDocument(int docId) {\n \n @Override\n public BytesRef nextValue() {\n- bytes.get(sortedIds[pos++], spare);\n- return spare;\n- }\n-\n- @Override\n- public int currentValueHash() {\n- return spare.hashCode();\n+ bytes.get(sortedIds[pos++], scratch);\n+ return scratch;\n }\n \n @Override\n@@ -738,13 +736,11 @@ static class BytesValues extends org.elasticsearch.index.fielddata.BytesValues {\n \n private final FieldDataSource source;\n private final SearchScript script;\n- private final BytesRef scratch;\n \n public BytesValues(FieldDataSource source, SearchScript script) {\n super(true);\n this.source = source;\n this.script = script;\n- scratch = new BytesRef();\n }\n \n @Override", "filename": "src/main/java/org/elasticsearch/search/aggregations/support/FieldDataSource.java", "status": "modified" }, { "diff": "@@ -37,7 +37,6 @@ public class ScriptBytesValues extends BytesValues implements ScriptValues {\n \n private Iterator<?> iter;\n private Object value;\n- private BytesRef scratch = new BytesRef();\n \n public ScriptBytesValues(SearchScript script) {\n super(true); // assume multi-valued", "filename": "src/main/java/org/elasticsearch/search/aggregations/support/bytes/ScriptBytesValues.java", "status": "modified" }, { "diff": "@@ -0,0 +1,141 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.search.aggregations.support;\n+\n+import org.apache.lucene.index.AtomicReaderContext;\n+import org.apache.lucene.search.Scorer;\n+import org.apache.lucene.util.BytesRef;\n+import org.elasticsearch.index.fielddata.BytesValues;\n+import org.elasticsearch.script.SearchScript;\n+import org.elasticsearch.test.ElasticsearchTestCase;\n+import org.junit.Test;\n+\n+import java.util.Map;\n+\n+public class FieldDataSourceTests extends ElasticsearchTestCase {\n+\n+ private static BytesValues randomBytesValues() {\n+ final boolean multiValued = randomBoolean();\n+ return new BytesValues(multiValued) {\n+ @Override\n+ public int setDocument(int docId) {\n+ return randomInt(multiValued ? 10 : 1);\n+ }\n+ @Override\n+ public BytesRef nextValue() {\n+ scratch.copyChars(randomAsciiOfLength(10));\n+ return scratch;\n+ }\n+\n+ };\n+ }\n+\n+ private static SearchScript randomScript() {\n+ return new SearchScript() {\n+\n+ @Override\n+ public void setNextVar(String name, Object value) {\n+ }\n+\n+ @Override\n+ public Object run() {\n+ return randomAsciiOfLength(5);\n+ }\n+\n+ @Override\n+ public Object unwrap(Object value) {\n+ return value;\n+ }\n+\n+ @Override\n+ public void setNextReader(AtomicReaderContext reader) {\n+ }\n+\n+ @Override\n+ public void setScorer(Scorer scorer) {\n+ }\n+\n+ @Override\n+ public void setNextDocId(int doc) {\n+ }\n+\n+ @Override\n+ public void setNextSource(Map<String, Object> source) {\n+ }\n+\n+ @Override\n+ public void setNextScore(float score) {\n+ }\n+\n+ @Override\n+ public float runAsFloat() {\n+ throw new UnsupportedOperationException();\n+ }\n+\n+ @Override\n+ public long runAsLong() {\n+ throw new UnsupportedOperationException();\n+ }\n+\n+ @Override\n+ public double runAsDouble() {\n+ throw new UnsupportedOperationException();\n+ }\n+\n+ };\n+ }\n+\n+ private static void assertConsistent(BytesValues values) {\n+ for (int i = 0; i < 10; ++i) {\n+ final int valueCount = values.setDocument(i);\n+ for (int j = 0; j < valueCount; ++j) {\n+ final BytesRef term = values.nextValue();\n+ assertEquals(term.hashCode(), values.currentValueHash());\n+ assertTrue(term.bytesEquals(values.copyShared()));\n+ }\n+ }\n+ }\n+\n+ @Test\n+ public void bytesValuesWithScript() {\n+ final BytesValues values = randomBytesValues();\n+ FieldDataSource source = new FieldDataSource.Bytes() {\n+\n+ @Override\n+ public BytesValues bytesValues() {\n+ return values;\n+ }\n+\n+ @Override\n+ public MetaData metaData() {\n+ throw new UnsupportedOperationException();\n+ }\n+\n+ };\n+ SearchScript script = randomScript();\n+ assertConsistent(new FieldDataSource.WithScript.BytesValues(source, script));\n+ }\n+\n+ @Test\n+ public void sortedUniqueBytesValues() {\n+ assertConsistent(new FieldDataSource.Bytes.SortedAndUnique.SortedUniqueBytesValues(randomBytesValues()));\n+ }\n+\n+}", "filename": "src/test/java/org/elasticsearch/search/aggregations/support/FieldDataSourceTests.java", "status": "added" } ] }
{ "body": "When doing a bulk operation, if one of the items holds an invalid index name, the operation returns a top level error (HTTP error in the _bulk API or an exception in case of the Java API).\nIt's expected it will return the error as part of the bulk result, as done for other types of errors. The error should be returned for the specific item and not fail the entire operation. \n\nExample: \n\n```\n curl -XPOST \"http://localhost:9200/_bulk\" -d'\n { \"index\" : { \"_index\" : \"INVALID.NAME\", \"_type\" : \"type1\", \"_id\" : \"1\"} }\n { \"field1\" : \"value1\" }\n '\n```\n\nReturns:\n\n```\n{\n \"error\": \"InvalidIndexNameException[[INVALID.NAME] Invalid index name [INVALID.NAME], must be lowercase]\",\n \"status\": 400\n}\n```\n", "comments": [], "number": 4987, "title": "Bulk operation throws exception on invalid index name " }
{ "body": "Before a bulk request is executed, missing indices are being created by default.\nIf this fails, the whole request is failed.\n\nThis patch changes the behaviour to not fail the whole request, but rather all\nrequests using the index where the creation has failed.\n\nTODO: Need someone to review if this kind of exception handling is correct, or if there are accidentally too much exceptions being ignored.\nTODO: Need someone to review if never failing a bulk request in the `onFailure()` method is ok. I cant think of exceptions, but I think they might exist :-)\n\nCloses #4987\n", "number": 4995, "review_comments": [ { "body": "Add another else with an exception to be future proof?\n", "created_at": "2014-02-03T15:52:31Z" }, { "body": "Unsure of the gain I get here. I would need to catch it again somewhere, as I dont want to fail the whole request?\n", "created_at": "2014-02-03T16:04:01Z" }, { "body": "it's only as a safe guard for the future, or a warning for someone using totally the wrong classes. I think it's OK in these cases to fail the entire request? (unlink non-programmatic errors) \n", "created_at": "2014-02-03T16:31:47Z" }, { "body": "looks like this failed has become redundant?\n", "created_at": "2014-02-03T16:38:32Z" }, { "body": "Nit picking: indices is only used in the creation test, maybe move it's building to be under the if with autoCreateIndex.needToCheck? (slightly faster execution when auto creation is disabled)\n", "created_at": "2014-02-03T16:44:01Z" }, { "body": "Seeing the rest of the code, this pattern is all over the file - so no need to be nit picky about this one (or change it every where). Also the requests may be null if multiple indices have failed creation (maybe good to add to the test)\n", "created_at": "2014-02-03T16:48:11Z" }, { "body": "should we put an assert down here since it seems we should catch all cases?\n", "created_at": "2014-02-11T09:05:29Z" }, { "body": "can this be null?\n", "created_at": "2014-02-11T09:06:19Z" }, { "body": "can you explain why we do this now only if `autoCreateIndex.needToCheck()`\n", "created_at": "2014-02-11T09:07:16Z" }, { "body": "this code only needs to be executed, if indices need to be created (thats why the indices are extracted from there)\n", "created_at": "2014-03-07T09:06:13Z" }, { "body": "if this code is reached, `bulkRequest.requests` has already been iterated before to fill the `indices` set and thus cannot be null\n", "created_at": "2014-03-07T09:09:51Z" } ], "title": "Ensure that index specific failures do not affect whole request" }
{ "commits": [ { "message": "Bulk API: Ensure that specific failures do not affect whole request\n\nBefore a bulk request is executed, missing indices are being created by default.\nIf this fails, the whole request is failed.\n\nThis patch changes the behaviour to not fail the whole request, but rather all\nrequests using the index where the creation has failed.\n\nCloses #4987" } ], "files": [ { "diff": "@@ -22,6 +22,7 @@\n import com.google.common.collect.Lists;\n import com.google.common.collect.Maps;\n import com.google.common.collect.Sets;\n+import org.elasticsearch.ElasticsearchException;\n import org.elasticsearch.ElasticsearchParseException;\n import org.elasticsearch.ExceptionsHelper;\n import org.elasticsearch.action.ActionListener;\n@@ -88,71 +89,101 @@ public TransportBulkAction(Settings settings, ThreadPool threadPool, TransportSe\n @Override\n protected void doExecute(final BulkRequest bulkRequest, final ActionListener<BulkResponse> listener) {\n final long startTime = System.currentTimeMillis();\n- Set<String> indices = Sets.newHashSet();\n- for (ActionRequest request : bulkRequest.requests) {\n- if (request instanceof IndexRequest) {\n- IndexRequest indexRequest = (IndexRequest) request;\n- if (!indices.contains(indexRequest.index())) {\n- indices.add(indexRequest.index());\n- }\n- } else if (request instanceof DeleteRequest) {\n- DeleteRequest deleteRequest = (DeleteRequest) request;\n- if (!indices.contains(deleteRequest.index())) {\n- indices.add(deleteRequest.index());\n- }\n- } else if (request instanceof UpdateRequest) {\n- UpdateRequest updateRequest = (UpdateRequest) request;\n- if (!indices.contains(updateRequest.index())) {\n- indices.add(updateRequest.index());\n+ final AtomicArray<BulkItemResponse> responses = new AtomicArray<BulkItemResponse>(bulkRequest.requests.size());\n+\n+ if (autoCreateIndex.needToCheck()) {\n+ final Set<String> indices = Sets.newHashSet();\n+ for (ActionRequest request : bulkRequest.requests) {\n+ if (request instanceof IndexRequest) {\n+ IndexRequest indexRequest = (IndexRequest) request;\n+ if (!indices.contains(indexRequest.index())) {\n+ indices.add(indexRequest.index());\n+ }\n+ } else if (request instanceof DeleteRequest) {\n+ DeleteRequest deleteRequest = (DeleteRequest) request;\n+ if (!indices.contains(deleteRequest.index())) {\n+ indices.add(deleteRequest.index());\n+ }\n+ } else if (request instanceof UpdateRequest) {\n+ UpdateRequest updateRequest = (UpdateRequest) request;\n+ if (!indices.contains(updateRequest.index())) {\n+ indices.add(updateRequest.index());\n+ }\n+ } else {\n+ throw new ElasticsearchException(\"Parsed unknown request in bulk actions: \" + request.getClass().getSimpleName());\n }\n }\n- }\n \n- if (autoCreateIndex.needToCheck()) {\n final AtomicInteger counter = new AtomicInteger(indices.size());\n- final AtomicBoolean failed = new AtomicBoolean();\n ClusterState state = clusterService.state();\n- for (String index : indices) {\n+ for (final String index : indices) {\n if (autoCreateIndex.shouldAutoCreate(index, state)) {\n createIndexAction.execute(new CreateIndexRequest(index).cause(\"auto(bulk api)\"), new ActionListener<CreateIndexResponse>() {\n @Override\n public void onResponse(CreateIndexResponse result) {\n if (counter.decrementAndGet() == 0) {\n- executeBulk(bulkRequest, startTime, listener);\n+ executeBulk(bulkRequest, startTime, listener, responses);\n }\n }\n \n @Override\n public void onFailure(Throwable e) {\n- if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) {\n- // we have the index, do it\n- if (counter.decrementAndGet() == 0) {\n- executeBulk(bulkRequest, startTime, listener);\n+ if (!(ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException)) {\n+ // fail all requests involving this index, if create didnt work\n+ for (int i = 0; i < bulkRequest.requests.size(); i++) {\n+ ActionRequest request = bulkRequest.requests.get(i);\n+ if (setResponseFailureIfIndexMatches(responses, i, request, index, e)) {\n+ bulkRequest.requests.set(i, null);\n+ }\n }\n- } else if (failed.compareAndSet(false, true)) {\n- listener.onFailure(e);\n+ }\n+ if (counter.decrementAndGet() == 0) {\n+ executeBulk(bulkRequest, startTime, listener, responses);\n }\n }\n });\n } else {\n if (counter.decrementAndGet() == 0) {\n- executeBulk(bulkRequest, startTime, listener);\n+ executeBulk(bulkRequest, startTime, listener, responses);\n }\n }\n }\n } else {\n- executeBulk(bulkRequest, startTime, listener);\n+ executeBulk(bulkRequest, startTime, listener, responses);\n }\n }\n \n- private void executeBulk(final BulkRequest bulkRequest, final long startTime, final ActionListener<BulkResponse> listener) {\n+ private boolean setResponseFailureIfIndexMatches(AtomicArray<BulkItemResponse> responses, int idx, ActionRequest request, String index, Throwable e) {\n+ if (request instanceof IndexRequest) {\n+ IndexRequest indexRequest = (IndexRequest) request;\n+ if (index.equals(indexRequest.index())) {\n+ responses.set(idx, new BulkItemResponse(idx, \"index\", new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), indexRequest.id(), e)));\n+ return true;\n+ }\n+ } else if (request instanceof DeleteRequest) {\n+ DeleteRequest deleteRequest = (DeleteRequest) request;\n+ if (index.equals(deleteRequest.index())) {\n+ responses.set(idx, new BulkItemResponse(idx, \"index\", new BulkItemResponse.Failure(deleteRequest.index(), deleteRequest.type(), deleteRequest.id(), e)));\n+ return true;\n+ }\n+ } else if (request instanceof UpdateRequest) {\n+ UpdateRequest updateRequest = (UpdateRequest) request;\n+ if (index.equals(updateRequest.index())) {\n+ responses.set(idx, new BulkItemResponse(idx, \"index\", new BulkItemResponse.Failure(updateRequest.index(), updateRequest.type(), updateRequest.id(), e)));\n+ return true;\n+ }\n+ } else {\n+ throw new ElasticsearchException(\"Parsed unknown request in bulk actions: \" + request.getClass().getSimpleName());\n+ }\n+ return false;\n+ }\n+\n+ private void executeBulk(final BulkRequest bulkRequest, final long startTime, final ActionListener<BulkResponse> listener, final AtomicArray<BulkItemResponse> responses) {\n ClusterState clusterState = clusterService.state();\n // TODO use timeout to wait here if its blocked...\n clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.WRITE);\n \n MetaData metaData = clusterState.metaData();\n- final AtomicArray<BulkItemResponse> responses = new AtomicArray<>(bulkRequest.requests.size());\n-\n for (int i = 0; i < bulkRequest.requests.size(); i++) {\n ActionRequest request = bulkRequest.requests.get(i);\n if (request instanceof IndexRequest) {", "filename": "src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java", "status": "modified" }, { "diff": "@@ -608,4 +608,21 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure)\n }\n }\n \n+ @Test // issue 4987\n+ public void testThatInvalidIndexNamesShouldNotBreakCompleteBulkRequest() {\n+ int bulkEntryCount = randomIntBetween(10, 50);\n+ BulkRequestBuilder builder = client().prepareBulk();\n+ boolean[] expectedFailures = new boolean[bulkEntryCount];\n+ for (int i = 0; i < bulkEntryCount; i++) {\n+ expectedFailures[i] = randomBoolean();\n+ builder.add(client().prepareIndex().setIndex(expectedFailures[i] ? \"INVALID.NAME\" : \"test\").setType(\"type1\").setId(\"1\").setSource(\"field\", 1));\n+ }\n+ BulkResponse bulkResponse = builder.get();\n+\n+ assertThat(bulkResponse.hasFailures(), is(true));\n+ assertThat(bulkResponse.getItems().length, is(bulkEntryCount));\n+ for (int i = 0; i < bulkEntryCount; i++) {\n+ assertThat(bulkResponse.getItems()[i].isFailed(), is(expectedFailures[i]));\n+ }\n+ }\n }", "filename": "src/test/java/org/elasticsearch/document/BulkTests.java", "status": "modified" } ] }
{ "body": "@martijnvg it seems that #3822 may have introduced a regression with custom similarities as well as the non-default bm25 and drf similarities. For instance, if I were to use bm25 for a child doc field, the Top Children query will not use it. It reverts back to the default similarity. \n\nHere is a set of curl statements for reproducing:\n\n```\n# delete index\ncurl -XDELETE 'http://localhost:9200/top_children_similarity_test/?pretty=true'\n\n# create index with proper parent/child mappings\ncurl -XPUT 'http://localhost:9200/top_children_similarity_test/?pretty=true' -d '{\n \"settings\" : {\n \"index\" : {\n \"number_of_shards\" : 1,\n \"number_of_replicas\" : 0\n }\n },\n \"mappings\": {\n \"author\": {\n \"properties\": {\n \"name\": { \"type\": \"string\" }\n }\n },\n \"post\": {\n \"_parent\": { \"type\": \"author\" },\n \"properties\": {\n \"content\": { \"type\": \"string\" }\n }\n }\n }\n}'\n\n# add data\ncurl -XPUT 'http://localhost:9200/top_children_similarity_test/author/1?pretty=true' -d '{\n \"name\": \"George P. Stathis\"\n}'\ncurl -XPUT 'http://localhost:9200/top_children_similarity_test/post/1?parent=1&pretty=true' -d '{\n \"post\" : {\n \"content\": \"Lorem ipsum dolor sit amet.\"\n }\n}'\ncurl -XPUT 'http://localhost:9200/top_children_similarity_test/post/2?parent=1&pretty=true' -d '{\n \"post\" : {\n \"content\": \"Lorem ipsum dolor sit amet again!\"\n }\n}'\n\necho \" \"\necho \"Sleep for two secs to allow for indexing\"\nsleep 2\n\n# Search posts directly\necho \" \"\necho \"Run query against child docs\"\ncurl 'http://localhost:9200/top_children_similarity_test/post/_search?pretty=1' -d '{\n \"query\" : {\n \"query_string\" : {\n \"default_field\": \"content\",\n \"query\" : \"Lorem ipsum\"\n }\n }\n}' | grep '_score'\n\necho \" \"\necho \"Two docs should have matched with scores 0.61871845 and 0.53033006\"\n\n# Search with top_children\necho \" \"\necho \"Run same query as top_children query in 'sum' mode\"\ncurl 'http://localhost:9200/top_children_similarity_test/_search?pretty=1' -d '{\n \"query\": {\n \"top_children\": {\n \"type\": \"post\",\n \"query\": {\n \"query_string\": {\n \"query\": \"Lorem ipsum\"\n }\n },\n \"score\": \"sum\"\n }\n }\n}' | grep '_score'\n\necho \" \"\necho \"One parent doc should have matched with score 1.1490486 (i.e. 0.61871845 + 0.53033006)\"\nsleep 5\n\n# delete index\necho \" \"\necho \"Start over and re-index posts using BM25 similarity\"\ncurl -XDELETE 'http://localhost:9200/top_children_similarity_test/?pretty=true'\n\n# create index with proper parent/child mappings\ncurl -XPUT 'http://localhost:9200/top_children_similarity_test/?pretty=true' -d '{\n \"settings\" : {\n \"index\" : {\n \"number_of_shards\" : 1,\n \"number_of_replicas\" : 0\n }\n },\n \"mappings\": {\n \"author\": {\n \"properties\": {\n \"name\": { \"type\": \"string\" }\n }\n },\n \"post\": {\n \"_parent\": { \"type\": \"author\" },\n \"properties\": {\n \"content\": { \"type\": \"string\", \"similarity\" : \"BM25\" }\n }\n }\n }\n}'\n\n# add data\ncurl -XPUT 'http://localhost:9200/top_children_similarity_test/author/1?pretty=true' -d '{\n \"name\": \"George P. Stathis\"\n}'\ncurl -XPUT 'http://localhost:9200/top_children_similarity_test/post/1?parent=1&pretty=true' -d '{\n \"post\" : {\n \"content\": \"Lorem ipsum dolor sit amet.\"\n }\n}'\ncurl -XPUT 'http://localhost:9200/top_children_similarity_test/post/2?parent=1&pretty=true' -d '{\n \"post\" : {\n \"content\": \"Lorem ipsum dolor sit amet again!\"\n }\n}'\n\necho \" \"\necho \"Sleep for another two secs to allow for indexing\"\nsleep 2\n\n# Search posts directly\necho \" \"\necho \"Run query against child docs\"\ncurl 'http://localhost:9200/top_children_similarity_test/post/_search?pretty=1' -d '{\n \"query\" : {\n \"query_string\" : {\n \"default_field\": \"content\",\n \"query\" : \"Lorem ipsum\"\n }\n }\n}' | grep '_score'\n\necho \" \"\necho \"NOTE!!! Two docs should now have matched with scores 0.80081946 and 0.67905 because we are using BM25\"\n\n# Search with top_children\necho \" \"\necho \"Run same query as top_children query in 'sum' mode\"\ncurl 'http://localhost:9200/top_children_similarity_test/_search?pretty=1' -d '{\n \"query\": {\n \"top_children\": {\n \"type\": \"post\",\n \"query\": {\n \"query_string\": {\n \"query\": \"Lorem ipsum\"\n }\n },\n \"score\": \"sum\"\n }\n }\n}' | grep '_score'\n\necho \" \"\necho \"NOTE!!! One parent doc matched but with with score 1.1490486 which is the sum of the child doc scores as computed by the default similarity (i.e. 0.61871845 + 0.53033006) not BM25. With BM25, the expected parent doc score should have been 0.80081946 + 0.67905 = 1.47986946\"\n```\n\nThis affects every version from 0.90.6 all the way to 0.90.11-SNAPSHOT.\n\nA proposed fix may be to carry over the similarity configured in the IndexSearcher passed to the createWeight() methods. See https://github.com/gpstathis/elasticsearch/commit/21d4a766dcf6b6d576cf123ea95c3dde428dbe88 for an example. All `org.elasticsearch.index.search.child` package tests pass with these modifications.\n", "comments": [ { "body": "hey @gpstathis this makes a lot of sense to me though! Would you be able to open a PullRequest for this and singe the CLA so we can pull it in? It would be great to have that in the next release which is coming very soon\n", "created_at": "2014-01-31T22:32:01Z" }, { "body": "CLA is signed. Pull request #4979 is open.\n", "created_at": "2014-01-31T23:20:42Z" }, { "body": "Hey @s1monw, @martijnvg, thanks for the lightning fast turnaround! I see most 0.90.11 tickets are closed. The release must be really close? ;-)\n", "created_at": "2014-02-03T15:43:44Z" }, { "body": "@gpstathis _really_ close ;)\n", "created_at": "2014-02-03T15:44:17Z" }, { "body": ":+1: \n", "created_at": "2014-02-03T15:49:47Z" }, { "body": "yeah I guess you had a good timing @gpstathis ;)\n", "created_at": "2014-02-03T15:50:00Z" }, { "body": "https://twitter.com/elasticsearch/status/430368980122222593. Hehe. Nice!\n", "created_at": "2014-02-03T16:33:25Z" } ], "number": 4977, "title": "Parent / child queries force default similarity" }
{ "body": "Closes #4977\n", "number": 4979, "review_comments": [], "title": "Parent / child queries should work with non-default similarities" }
{ "commits": [ { "message": "Parent / child queries should work with non-default similarities" } ], "files": [ { "diff": "@@ -96,6 +96,7 @@ public Weight createWeight(IndexSearcher searcher) throws IOException {\n childQuery = rewrittenChildQuery;\n }\n IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader());\n+ indexSearcher.setSimilarity(searcher.getSimilarity());\n indexSearcher.search(childQuery, collector);\n \n int remaining = collectedUids.v().size();", "filename": "src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java", "status": "modified" }, { "diff": "@@ -154,6 +154,7 @@ public Weight createWeight(IndexSearcher searcher) throws IOException {\n childQuery = rewrittenChildQuery;\n }\n IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader());\n+ indexSearcher.setSimilarity(searcher.getSimilarity());\n indexSearcher.search(childQuery, collector);\n \n int size = uidToScore.v().size();", "filename": "src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java", "status": "modified" }, { "diff": "@@ -23,6 +23,7 @@\n import org.apache.lucene.index.IndexReader;\n import org.apache.lucene.index.MultiReader;\n import org.apache.lucene.search.*;\n+import org.apache.lucene.search.similarities.Similarity;\n import org.apache.lucene.util.Bits;\n import org.elasticsearch.search.internal.SearchContext;\n \n@@ -66,7 +67,9 @@ public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits accept\n try {\n if (!contains(indexReader, context)) {\n multiReader = new MultiReader(new IndexReader[]{indexReader, context.reader()}, false);\n+ Similarity similarity = searcher.getSimilarity();\n searcher = new IndexSearcher(new MultiReader(indexReader, context.reader()));\n+ searcher.setSimilarity(similarity);\n }\n weight = searcher.createNormalizedWeight(query);\n } finally {\n@@ -79,7 +82,9 @@ public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits accept\n if (!contains(indexReader, context)) {\n IndexReader multiReader = new MultiReader(new IndexReader[]{indexReader, context.reader()}, false);\n try {\n+ Similarity similarity = searcher.getSimilarity();\n searcher = new IndexSearcher(multiReader);\n+ searcher.setSimilarity(similarity);\n weight = searcher.createNormalizedWeight(query);\n } finally {\n multiReader.close();", "filename": "src/main/java/org/elasticsearch/index/search/child/DeleteByQueryWrappingFilter.java", "status": "modified" }, { "diff": "@@ -88,6 +88,7 @@ public Weight createWeight(IndexSearcher searcher) throws IOException {\n parentQuery = rewrittenParentQuery = originalParentQuery.rewrite(searcher.getIndexReader());\n }\n IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader());\n+ indexSearcher.setSimilarity(searcher.getSimilarity());\n indexSearcher.search(parentQuery, collector);\n \n if (parents.v().isEmpty()) {", "filename": "src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java", "status": "modified" }, { "diff": "@@ -126,6 +126,7 @@ public Weight createWeight(IndexSearcher searcher) throws IOException {\n parentQuery = rewrittenParentQuery;\n }\n IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader());\n+ indexSearcher.setSimilarity(searcher.getSimilarity());\n indexSearcher.search(parentQuery, collector);\n \n if (uidToScore.v().isEmpty()) {", "filename": "src/main/java/org/elasticsearch/index/search/child/ParentQuery.java", "status": "modified" }, { "diff": "@@ -125,6 +125,7 @@ public Weight createWeight(IndexSearcher searcher) throws IOException {\n }\n \n IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader());\n+ indexSearcher.setSimilarity(searcher.getSimilarity());\n while (true) {\n parentDocs.v().clear();\n TopDocs topChildDocs = indexSearcher.search(childQuery, numChildDocs);", "filename": "src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java", "status": "modified" } ] }
{ "body": "if you run something like this today:\n\n``` json\n\"multi_match\" : {\n \"query\" : \"mastodon aragon\",\n \"fields\" : [ \"Artist\", \"Album\" ],\n \"type\":\"something_that_does_not_exists\"\n}\n```\n\nthe `multi_match` query just defaults to `bool` instead of throwing an exception which it should.\n", "comments": [], "number": 4964, "title": "`multi_match` doesn't complain it `type` is not recognized" }
{ "body": "Closes #4964\n", "number": 4971, "review_comments": [], "title": "Added exception to match and multi-match queries if passed an invalid type param" }
{ "commits": [ { "message": "Added exception if passed an invalid type param\nCloses #4964" } ], "files": [ { "diff": "@@ -92,6 +92,8 @@ public Query parse(QueryParseContext parseContext) throws IOException, QueryPars\n type = MatchQuery.Type.PHRASE;\n } else if (\"phrase_prefix\".equals(tStr) || \"phrasePrefix\".equals(currentFieldName)) {\n type = MatchQuery.Type.PHRASE_PREFIX;\n+ } else {\n+ throw new QueryParsingException(parseContext.index(), \"[match] query does not support type \" + tStr);\n }\n } else if (\"analyzer\".equals(currentFieldName)) {\n String analyzer = parser.text();", "filename": "src/main/java/org/elasticsearch/index/query/MatchQueryParser.java", "status": "modified" }, { "diff": "@@ -76,7 +76,8 @@ public Query parse(QueryParseContext parseContext) throws IOException, QueryPars\n } else if (token.isValue()) {\n extractFieldAndBoost(parseContext, parser, fieldNameWithBoosts);\n } else {\n- throw new QueryParsingException(parseContext.index(), \"[query_string] query does not support [\" + currentFieldName + \"]\");\n+ throw new QueryParsingException(parseContext.index(), \"[\" + NAME + \"] query does not support [\" + currentFieldName\n+ + \"]\");\n }\n } else if (token.isValue()) {\n if (\"query\".equals(currentFieldName)) {\n@@ -89,11 +90,13 @@ public Query parse(QueryParseContext parseContext) throws IOException, QueryPars\n type = MatchQuery.Type.PHRASE;\n } else if (\"phrase_prefix\".equals(tStr) || \"phrasePrefix\".equals(currentFieldName)) {\n type = MatchQuery.Type.PHRASE_PREFIX;\n+ } else {\n+ throw new QueryParsingException(parseContext.index(), \"[\" + NAME + \"] query does not support type \" + tStr);\n }\n } else if (\"analyzer\".equals(currentFieldName)) {\n String analyzer = parser.text();\n if (parseContext.analysisService().analyzer(analyzer) == null) {\n- throw new QueryParsingException(parseContext.index(), \"[match] analyzer [\" + parser.text() + \"] not found\");\n+ throw new QueryParsingException(parseContext.index(), \"[\"+ NAME +\"] analyzer [\" + parser.text() + \"] not found\");\n }\n multiMatchQuery.setAnalyzer(analyzer);\n } else if (\"boost\".equals(currentFieldName)) {", "filename": "src/main/java/org/elasticsearch/index/query/MultiMatchQueryParser.java", "status": "modified" } ] }
{ "body": "In #4846, caching of date range filters which use `now` was disabled. However, that only makes sense as long as there is no rounding happening.\n\nFor instance, this filter does make sense to cache:\n\n```\n{\n \"range\": {\n \"date\": {\n \"gt\": \"now/d\"\n }\n }\n}\n```\n", "comments": [ { "body": "good one :)\n", "created_at": "2014-01-30T03:46:20Z" }, { "body": "It depends. Wouldn't it be just as confusing around midnight? Where just after midnight you might not see data? I agree it's much less of an issue but in certain scenarios you may still get wrong results.\n\nOn Thu, Jan 30, 2014 at 4:46 AM, uboness notifications@github.com wrote:\n\n> ## good one :)\n> \n> Reply to this email directly or view it on GitHub:\n> https://github.com/elasticsearch/elasticsearch/issues/4947#issuecomment-33658031\n", "created_at": "2014-01-30T07:01:33Z" }, { "body": "@bleskes This isn't about wrong results - it's about cache churn. The filter is cached with whatever `now` evaluates to. Typically filtering on eg `now - 1h` makes sense not to cache, because you'll never reuse that value.\n\nBut a filter with `now/d` likely will be reused. Think about this pattern when retrieving logs for the last hour in an index containing logs for the whole month:\n\n```\n\"bool\": {\n \"must\": [\n { \"range\" : {\n \"timestamp\" : {\n \"gt\" : \"now/d\" \n }\n },\n { \"range\" : {\n \"timestamp\" : {\n \"gt\" : \"now-1h\" \n }\n }\n ]\n}\n```\n\nYou want the first filter to be cached, and the second filter not to be cached.\n", "created_at": "2014-01-30T11:05:07Z" }, { "body": "Could we resolve the expression with `now` in it using the current time and make it part of the cache key?\n", "created_at": "2014-01-30T11:26:36Z" }, { "body": "That is what used to happen, but since now is different every time, the next time `now` is used the previous cache key wouldn't be used, this results in thrashing the filter cache (adding entries that never end up being used).\n", "created_at": "2014-01-30T11:28:35Z" }, { "body": "@clintongormley misunderstood then (and learned something about how our internal caching works: @imotov - the caching logic compares the term the expression resolves to)\n", "created_at": "2014-01-30T11:29:00Z" } ], "number": 4947, "title": "Cache date range filters that use `now` with rounding" }
{ "body": "The forceful no cache behaviour for range filter with now date match expression should only be active if no rounding has been specified for `now` in the date range range expression (for example: `now/d`).\n\n Closes #4947\n Relates to #4846\n", "number": 4955, "review_comments": [], "title": "Range filter no cache behaviour for `now` with rounding" }
{ "commits": [ { "message": "The forceful no cache behaviour for range filter with now date match expression should only be active if no rounding has been specified for `now` in the date range range expression (for example: `now/d`).\n\nAlso the automatic now detection in range filters is overrideable by the `_cache` option.\n\n Closes #4947\n Relates to #4846" } ], "files": [ { "diff": "@@ -56,3 +56,6 @@ If caching the *result* of the filter is desired (for example, using the\n same \"teen\" filter with ages between 10 and 20), then it is advisable to\n simply use the <<query-dsl-range-filter,range>>\n filter.\n+\n+If the `now` date math expression is used without rounding then a range numeric filter will never be cached even\n+if `_cache` is set to `true`. Also any filter that wraps this filter will never be cached.\n\\ No newline at end of file", "filename": "docs/reference/query-dsl/filters/numeric-range-filter.asciidoc", "status": "modified" }, { "diff": "@@ -54,3 +54,6 @@ already faceting or sorting by.\n \n The result of the filter is only automatically cached by default if the `execution` is set to `index`. The\n `_cache` can be set to `false` to turn it off.\n+\n+If the `now` date math expression is used without rounding then a range filter will never be cached even if `_cache` is\n+set to `true`. Also any filter that wraps this filter will never be cached.", "filename": "docs/reference/query-dsl/filters/range-filter.asciidoc", "status": "modified" }, { "diff": "@@ -347,24 +347,28 @@ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower\n \n @Override\n public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {\n- boolean nowIsUsed = false;\n+ return rangeFilter(lowerTerm, upperTerm, includeLower, includeUpper, context, false);\n+ }\n+\n+ public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context, boolean explicitCaching) {\n+ boolean cache = explicitCaching;\n Long lowerVal = null;\n Long upperVal = null;\n if (lowerTerm != null) {\n String value = convertToString(lowerTerm);\n- nowIsUsed = value.contains(\"now\");\n+ cache = explicitCaching || !hasNowExpressionWithNoRounding(value);\n lowerVal = parseToMilliseconds(value, context, false);\n }\n if (upperTerm != null) {\n String value = convertToString(upperTerm);\n- nowIsUsed = value.contains(\"now\");\n+ cache = explicitCaching || !hasNowExpressionWithNoRounding(value);\n upperVal = parseToMilliseconds(value, context, includeUpper);\n }\n \n Filter filter = NumericRangeFilter.newLongRange(\n names.indexName(), precisionStep, lowerVal, upperVal, includeLower, includeUpper\n );\n- if (nowIsUsed) {\n+ if (!cache) {\n // We don't cache range filter if `now` date expression is used and also when a compound filter wraps\n // a range filter with a `now` date expressions.\n return NoCacheFilter.wrap(filter);\n@@ -375,24 +379,28 @@ public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLow\n \n @Override\n public Filter rangeFilter(IndexFieldDataService fieldData, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {\n- boolean nowIsUsed = false;\n+ return rangeFilter(fieldData, lowerTerm, upperTerm, includeLower, includeUpper, context, false);\n+ }\n+\n+ public Filter rangeFilter(IndexFieldDataService fieldData, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context, boolean explicitCaching) {\n+ boolean cache = explicitCaching;\n Long lowerVal = null;\n Long upperVal = null;\n if (lowerTerm != null) {\n String value = convertToString(lowerTerm);\n- nowIsUsed = value.contains(\"now\");\n+ cache = explicitCaching || !hasNowExpressionWithNoRounding(value);\n lowerVal = parseToMilliseconds(value, context, false);\n }\n if (upperTerm != null) {\n String value = convertToString(upperTerm);\n- nowIsUsed = value.contains(\"now\");\n+ cache = explicitCaching || !hasNowExpressionWithNoRounding(value);\n upperVal = parseToMilliseconds(value, context, includeUpper);\n }\n \n Filter filter = NumericRangeFieldDataFilter.newLongRange(\n (IndexNumericFieldData<?>) fieldData.getForField(this), lowerVal,upperVal, includeLower, includeUpper\n );\n- if (nowIsUsed) {\n+ if (!cache) {\n // We don't cache range filter if `now` date expression is used and also when a compound filter wraps\n // a range filter with a `now` date expressions.\n return NoCacheFilter.wrap(filter);\n@@ -401,6 +409,33 @@ public Filter rangeFilter(IndexFieldDataService fieldData, Object lowerTerm, Obj\n }\n }\n \n+ private boolean hasNowExpressionWithNoRounding(String value) {\n+ int index = value.indexOf(\"now\");\n+ if (index != -1) {\n+ if (value.length() == 3) {\n+ return true;\n+ } else {\n+ int indexOfPotentialRounding = index + 3;\n+ if (indexOfPotentialRounding >= value.length()) {\n+ return true;\n+ } else {\n+ char potentialRoundingChar;\n+ do {\n+ potentialRoundingChar = value.charAt(indexOfPotentialRounding++);\n+ if (potentialRoundingChar == '/') {\n+ return false; // We found the rounding char, so we shouldn't forcefully disable caching\n+ } else if (potentialRoundingChar == ' ') {\n+ return true; // Next token in the date math expression and no rounding found, so we should not cache.\n+ }\n+ } while (indexOfPotentialRounding < value.length());\n+ return true; // Couldn't find rounding char, so we should not cache\n+ }\n+ }\n+ } else {\n+ return false;\n+ }\n+ }\n+\n @Override\n public Filter nullValueFilter() {\n if (nullValue == null) {", "filename": "src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java", "status": "modified" }, { "diff": "@@ -27,6 +27,7 @@\n import org.elasticsearch.index.cache.filter.support.CacheKeyFilter;\n import org.elasticsearch.index.mapper.FieldMapper;\n import org.elasticsearch.index.mapper.MapperService;\n+import org.elasticsearch.index.mapper.core.DateFieldMapper;\n import org.elasticsearch.index.mapper.core.NumberFieldMapper;\n \n import java.io.IOException;\n@@ -122,11 +123,17 @@ public Filter parse(QueryParseContext parseContext) throws IOException, QueryPar\n MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName);\n if (smartNameFieldMappers != null) {\n if (smartNameFieldMappers.hasMapper()) {\n+ boolean explicitlyCached = cache != null && cache;\n if (execution.equals(\"index\")) {\n if (cache == null) {\n cache = true;\n }\n- filter = smartNameFieldMappers.mapper().rangeFilter(from, to, includeLower, includeUpper, parseContext);\n+ FieldMapper mapper = smartNameFieldMappers.mapper();\n+ if (mapper instanceof DateFieldMapper) {\n+ filter = ((DateFieldMapper) mapper).rangeFilter(from, to, includeLower, includeUpper, parseContext, explicitlyCached);\n+ } else {\n+ filter = mapper.rangeFilter(from, to, includeLower, includeUpper, parseContext);\n+ }\n } else if (\"fielddata\".equals(execution)) {\n if (cache == null) {\n cache = false;\n@@ -135,7 +142,11 @@ public Filter parse(QueryParseContext parseContext) throws IOException, QueryPar\n if (!(mapper instanceof NumberFieldMapper)) {\n throw new QueryParsingException(parseContext.index(), \"[range] filter field [\" + fieldName + \"] is not a numeric type\");\n }\n- filter = ((NumberFieldMapper) mapper).rangeFilter(parseContext.fieldData(), from, to, includeLower, includeUpper, parseContext);\n+ if (mapper instanceof DateFieldMapper) {\n+ filter = ((DateFieldMapper) mapper).rangeFilter(parseContext.fieldData(), from, to, includeLower, includeUpper, parseContext, explicitlyCached);\n+ } else {\n+ filter = ((NumberFieldMapper) mapper).rangeFilter(parseContext.fieldData(), from, to, includeLower, includeUpper, parseContext);\n+ }\n } else {\n throw new QueryParsingException(parseContext.index(), \"[range] filter doesn't support [\" + execution + \"] execution\");\n }", "filename": "src/main/java/org/elasticsearch/index/query/RangeFilterParser.java", "status": "modified" }, { "diff": "@@ -153,11 +153,28 @@ public void testNoFilterParsing() throws IOException {\n assertThat(((XBooleanFilter) ((ConstantScoreQuery) parsedQuery).getFilter()).clauses().get(1).getFilter(), instanceOf(NoCacheFilter.class));\n assertThat(((XBooleanFilter) ((ConstantScoreQuery) parsedQuery).getFilter()).clauses().size(), is(2));\n \n+ query = copyToStringFromClasspath(\"/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now.json\");\n+ parsedQuery = queryParser.parse(query).query();\n+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));\n+ assertThat(((ConstantScoreQuery) parsedQuery).getFilter(), instanceOf(XBooleanFilter.class));\n+ assertThat(((XBooleanFilter) ((ConstantScoreQuery) parsedQuery).getFilter()).clauses().get(1).getFilter(), instanceOf(NoCacheFilter.class));\n+ assertThat(((XBooleanFilter) ((ConstantScoreQuery) parsedQuery).getFilter()).clauses().size(), is(2));\n+\n query = copyToStringFromClasspath(\"/org/elasticsearch/index/query/date_range_in_boolean_cached.json\");\n parsedQuery = queryParser.parse(query).query();\n assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));\n assertThat(((ConstantScoreQuery) parsedQuery).getFilter(), instanceOf(CachedFilter.class));\n \n+ query = copyToStringFromClasspath(\"/org/elasticsearch/index/query/date_range_in_boolean_cached_now_with_rounding.json\");\n+ parsedQuery = queryParser.parse(query).query();\n+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));\n+ assertThat(((ConstantScoreQuery) parsedQuery).getFilter(), instanceOf(CachedFilter.class));\n+\n+ query = copyToStringFromClasspath(\"/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now_with_rounding.json\");\n+ parsedQuery = queryParser.parse(query).query();\n+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));\n+ assertThat(((ConstantScoreQuery) parsedQuery).getFilter(), instanceOf(CachedFilter.class));\n+\n try {\n SearchContext.setCurrent(new TestSearchContext());\n query = copyToStringFromClasspath(\"/org/elasticsearch/index/query/has-child.json\");", "filename": "src/test/java/org/elasticsearch/index/query/IndexQueryParserFilterCachingTests.java", "status": "modified" }, { "diff": "@@ -0,0 +1,26 @@\n+{\n+ \"constant_score\": {\n+ \"filter\": {\n+ \"bool\": {\n+ \"_cache\" : true,\n+ \"must\": [\n+ {\n+ \"term\": {\n+ \"foo\": {\n+ \"value\": \"bar\"\n+ }\n+ }\n+ },\n+ {\n+ \"range\" : {\n+ \"born\" : {\n+ \"gte\": \"2012-01-01\",\n+ \"lte\": \"now+1m+1s\"\n+ }\n+ }\n+ }\n+ ]\n+ }\n+ }\n+ }\n+}\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now.json", "status": "added" }, { "diff": "@@ -0,0 +1,26 @@\n+{\n+ \"constant_score\": {\n+ \"filter\": {\n+ \"bool\": {\n+ \"_cache\" : true,\n+ \"must\": [\n+ {\n+ \"term\": {\n+ \"foo\": {\n+ \"value\": \"bar\"\n+ }\n+ }\n+ },\n+ {\n+ \"range\" : {\n+ \"born\" : {\n+ \"gte\": \"2012-01-01\",\n+ \"lte\": \"now+1m+1s/m\"\n+ }\n+ }\n+ }\n+ ]\n+ }\n+ }\n+ }\n+}\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_complex_now_with_rounding.json", "status": "added" }, { "diff": "@@ -0,0 +1,26 @@\n+{\n+ \"constant_score\": {\n+ \"filter\": {\n+ \"bool\": {\n+ \"_cache\" : true,\n+ \"must\": [\n+ {\n+ \"term\": {\n+ \"foo\": {\n+ \"value\": \"bar\"\n+ }\n+ }\n+ },\n+ {\n+ \"range\" : {\n+ \"born\" : {\n+ \"gte\": \"2012-01-01\",\n+ \"lte\": \"now/d\"\n+ }\n+ }\n+ }\n+ ]\n+ }\n+ }\n+ }\n+}\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/index/query/date_range_in_boolean_cached_now_with_rounding.json", "status": "added" }, { "diff": "@@ -2046,7 +2046,7 @@ public void testRangeFilterNoCacheWithNow() throws Exception {\n .get();\n \n SearchResponse searchResponse = client().prepareSearch(\"test\")\n- .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), FilterBuilders.rangeFilter(\"date\").from(\"2013-01-01\").to(\"now\").cache(true)))\n+ .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), FilterBuilders.rangeFilter(\"date\").from(\"2013-01-01\").to(\"now\")))\n .get();\n assertHitCount(searchResponse, 1l);\n \n@@ -2059,7 +2059,7 @@ public void testRangeFilterNoCacheWithNow() throws Exception {\n matchAllQuery(),\n FilterBuilders.boolFilter().cache(true)\n .must(FilterBuilders.matchAllFilter())\n- .must(FilterBuilders.rangeFilter(\"date\").from(\"2013-01-01\").to(\"now\").cache(true))\n+ .must(FilterBuilders.rangeFilter(\"date\").from(\"2013-01-01\").to(\"now\"))\n ))\n .get();\n assertHitCount(searchResponse, 1l);\n@@ -2068,19 +2068,49 @@ public void testRangeFilterNoCacheWithNow() throws Exception {\n statsResponse = client().admin().indices().prepareStats(\"test\").clear().setFilterCache(true).get();\n assertThat(statsResponse.getIndex(\"test\").getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l));\n \n+\n+ searchResponse = client().prepareSearch(\"test\")\n+ .setQuery(QueryBuilders.filteredQuery(\n+ matchAllQuery(),\n+ FilterBuilders.boolFilter().cache(true)\n+ .must(FilterBuilders.matchAllFilter())\n+ .must(FilterBuilders.rangeFilter(\"date\").from(\"2013-01-01\").to(\"now/d\").cache(true))\n+ ))\n+ .get();\n+ assertHitCount(searchResponse, 1l);\n+ // Now with rounding is used, so we must have something in filter cache\n+ statsResponse = client().admin().indices().prepareStats(\"test\").clear().setFilterCache(true).get();\n+ long filtercacheSize = statsResponse.getIndex(\"test\").getTotal().getFilterCache().getMemorySizeInBytes();\n+ assertThat(filtercacheSize, greaterThan(0l));\n+\n searchResponse = client().prepareSearch(\"test\")\n .setQuery(QueryBuilders.filteredQuery(\n matchAllQuery(),\n FilterBuilders.boolFilter().cache(true)\n .must(FilterBuilders.termFilter(\"field\", \"value\").cache(true))\n+ .must(FilterBuilders.rangeFilter(\"date\").from(\"2013-01-01\").to(\"now\"))\n+ ))\n+ .get();\n+ assertHitCount(searchResponse, 1l);\n+\n+ // and because we use term filter, it is also added to filter cache, so it should contain more than before\n+ statsResponse = client().admin().indices().prepareStats(\"test\").clear().setFilterCache(true).get();\n+ assertThat(statsResponse.getIndex(\"test\").getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(filtercacheSize));\n+ filtercacheSize = statsResponse.getIndex(\"test\").getTotal().getFilterCache().getMemorySizeInBytes();\n+\n+ searchResponse = client().prepareSearch(\"test\")\n+ .setQuery(QueryBuilders.filteredQuery(\n+ matchAllQuery(),\n+ FilterBuilders.boolFilter().cache(true)\n+ .must(FilterBuilders.matchAllFilter())\n .must(FilterBuilders.rangeFilter(\"date\").from(\"2013-01-01\").to(\"now\").cache(true))\n ))\n .get();\n assertHitCount(searchResponse, 1l);\n \n- // filter cache only has a cache entry for the term filter\n+ // The range filter is now explicitly cached, so it now it is in the filter cache.\n statsResponse = client().admin().indices().prepareStats(\"test\").clear().setFilterCache(true).get();\n- assertThat(statsResponse.getIndex(\"test\").getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(0l));\n+ assertThat(statsResponse.getIndex(\"test\").getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(filtercacheSize));\n }\n \n @Test", "filename": "src/test/java/org/elasticsearch/search/query/SimpleQueryTests.java", "status": "modified" } ] }
{ "body": "As stated in documentation, we should support `?source=` parameter in percolate REST operations.\n\nThis is how to reproduce it:\n\n``` sh\ncurl -XDELETE \"http://localhost:9200/test\"\n\ncurl -XPUT \"http://localhost:9200/test/.percolator/1\" -d'\n{\n \"query\" : {\n \"match\" : {\n \"foo\" : \"bar\"\n }\n }\n}'\n\n# This one works\ncurl -XPOST \"http://localhost:9200/test/message/_percolate\" -d '{\n \"doc\" : {\n \"foo\" : \"bar is in foo\"\n }\n}'\n\n# This one gives: BroadcastShardOperationFailedException[[test][2] ]; nested: PercolateException[failed to percolate]; nested: ElasticsearchIllegalArgumentException[Nothing to percolate];\ncurl -XGET \"http://localhost:9200/test/message/_percolate?source=%7B%22doc%22%3A%7B%22foo%22%3A%22bar%20is%20in%20foo%22%7D%7D\"\n```\n", "comments": [], "number": 4903, "title": "percolate REST API should support source parameter" }
{ "body": "In recent changes, we added missing support for `source` parameter in some REST APIs:\n- #4892 : mget\n- #4900 : mpercolate\n- #4901 : msearch\n- #4902 : mtermvectors\n- #4903 : percolate\n\n``` java\n BytesReference content = null;\n if (request.hasContent()) {\n content = request.content();\n } else {\n String source = request.param(\"source\");\n if (source != null) {\n content = new BytesArray(source);\n }\n }\n```\n\nIt's definitely better to have:\n\n``` java\n BytesReference content = request.content();\n if (!request.hasContent()) {\n String source = request.param(\"source\");\n if (source != null) {\n content = new BytesArray(source);\n }\n }\n```\n\nThat said, it could be nice to have a single method to manage it for various REST actions.\n\nCloses #4924.\n", "number": 4932, "review_comments": [ { "body": "hmm when can this be null? sorry I looked at it earlier but the request should never be null no?\n", "created_at": "2014-01-28T17:02:47Z" }, { "body": "Agree. I'm just paranoiac! :-)\n", "created_at": "2014-01-28T17:03:43Z" }, { "body": "YEAH so I'd put an assert in there just for kicks\n", "created_at": "2014-01-28T17:08:09Z" } ], "title": "Fix potential NPE when no source and no body" }
{ "commits": [ { "message": "Fix potential NPE when no source and no body\n\nIn recent changes, we added missing support for `source` parameter in some REST APIs:\n\n* #4892 : mget\n* #4900 : mpercolate\n* #4901 : msearch\n* #4902 : mtermvectors\n* #4903 : percolate\n\n```java\n BytesReference content = null;\n if (request.hasContent()) {\n content = request.content();\n } else {\n String source = request.param(\"source\");\n if (source != null) {\n content = new BytesArray(source);\n }\n }\n```\n\nIt's definitely better to have:\n\n```java\n BytesReference content = request.content();\n if (!request.hasContent()) {\n String source = request.param(\"source\");\n if (source != null) {\n content = new BytesArray(source);\n }\n }\n```\n\nThat said, it could be nice to have a single method to manage it for various REST actions.\n\nCloses #4924." } ], "files": [ { "diff": "@@ -24,12 +24,11 @@\n import org.elasticsearch.action.get.MultiGetResponse;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.rest.*;\n+import org.elasticsearch.rest.action.support.RestActions;\n import org.elasticsearch.search.fetch.source.FetchSourceContext;\n \n import java.io.IOException;\n@@ -73,18 +72,8 @@ public void handleRequest(final RestRequest request, final RestChannel channel)\n \n FetchSourceContext defaultFetchSource = FetchSourceContext.parseFromRestRequest(request);\n \n- BytesReference content = null;\n- if (request.hasContent()) {\n- content = request.content();\n- } else {\n- String source = request.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n try {\n- multiGetRequest.add(request.param(\"index\"), request.param(\"type\"), sFields, defaultFetchSource, request.param(\"routing\"), content, allowExplicitIndex);\n+ multiGetRequest.add(request.param(\"index\"), request.param(\"type\"), sFields, defaultFetchSource, request.param(\"routing\"), RestActions.getRestContent(request), allowExplicitIndex);\n } catch (Exception e) {\n try {\n XContentBuilder builder = restContentBuilder(request);", "filename": "src/main/java/org/elasticsearch/rest/action/get/RestMultiGetAction.java", "status": "modified" }, { "diff": "@@ -24,12 +24,11 @@\n import org.elasticsearch.action.support.IndicesOptions;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.rest.*;\n+import org.elasticsearch.rest.action.support.RestActions;\n \n import java.io.IOException;\n \n@@ -66,18 +65,8 @@ public void handleRequest(final RestRequest restRequest, final RestChannel restC\n multiPercolateRequest.indices(Strings.splitStringByCommaToArray(restRequest.param(\"index\")));\n multiPercolateRequest.documentType(restRequest.param(\"type\"));\n \n- BytesReference content = null;\n- if (restRequest.hasContent()) {\n- content = restRequest.content();\n- } else {\n- String source = restRequest.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n try {\n- multiPercolateRequest.add(content, restRequest.contentUnsafe(), allowExplicitIndex);\n+ multiPercolateRequest.add(RestActions.getRestContent(restRequest), restRequest.contentUnsafe(), allowExplicitIndex);\n } catch (Exception e) {\n try {\n restChannel.sendResponse(new XContentThrowableRestResponse(restRequest, e));", "filename": "src/main/java/org/elasticsearch/rest/action/percolate/RestMultiPercolateAction.java", "status": "modified" }, { "diff": "@@ -26,8 +26,6 @@\n import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n@@ -71,18 +69,7 @@ void parseDocPercolate(PercolateRequest percolateRequest, RestRequest restReques\n percolateRequest.documentType(restRequest.param(\"type\"));\n percolateRequest.routing(restRequest.param(\"routing\"));\n percolateRequest.preference(restRequest.param(\"preference\"));\n-\n- BytesReference content = null;\n- if (restRequest.hasContent()) {\n- content = restRequest.content();\n- } else {\n- String source = restRequest.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n- percolateRequest.source(content, restRequest.contentUnsafe());\n+ percolateRequest.source(RestActions.getRestContent(restRequest), restRequest.contentUnsafe());\n \n percolateRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, percolateRequest.indicesOptions()));\n executePercolate(percolateRequest, restRequest, restChannel);", "filename": "src/main/java/org/elasticsearch/rest/action/percolate/RestPercolateAction.java", "status": "modified" }, { "diff": "@@ -25,12 +25,11 @@\n import org.elasticsearch.action.support.IndicesOptions;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.rest.*;\n+import org.elasticsearch.rest.action.support.RestActions;\n \n import java.io.IOException;\n \n@@ -69,18 +68,8 @@ public void handleRequest(final RestRequest request, final RestChannel channel)\n String[] types = Strings.splitStringByCommaToArray(request.param(\"type\"));\n IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, multiSearchRequest.indicesOptions());\n \n- BytesReference content = null;\n- if (request.hasContent()) {\n- content = request.content();\n- } else {\n- String source = request.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n try {\n- multiSearchRequest.add(content, request.contentUnsafe(), indices, types, request.param(\"search_type\"), request.param(\"routing\"), indicesOptions, allowExplicitIndex);\n+ multiSearchRequest.add(RestActions.getRestContent(request), request.contentUnsafe(), indices, types, request.param(\"search_type\"), request.param(\"routing\"), indicesOptions, allowExplicitIndex);\n } catch (Exception e) {\n try {\n XContentBuilder builder = restContentBuilder(request);", "filename": "src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java", "status": "modified" }, { "diff": "@@ -23,6 +23,8 @@\n import org.elasticsearch.action.ShardOperationFailedException;\n import org.elasticsearch.action.support.QuerySourceBuilder;\n import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;\n+import org.elasticsearch.common.bytes.BytesArray;\n+import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentBuilderString;\n import org.elasticsearch.index.query.QueryBuilders;\n@@ -101,4 +103,25 @@ public static QuerySourceBuilder parseQuerySource(RestRequest request) {\n }\n return new QuerySourceBuilder().setQuery(queryBuilder);\n }\n+\n+ /**\n+ * Get Rest content from either payload or source parameter\n+ * @param request Rest request\n+ * @return rest content\n+ */\n+ public static BytesReference getRestContent(RestRequest request) {\n+ if (request == null) {\n+ return null;\n+ }\n+\n+ BytesReference content = request.content();\n+ if (!request.hasContent()) {\n+ String source = request.param(\"source\");\n+ if (source != null) {\n+ content = new BytesArray(source);\n+ }\n+ }\n+\n+ return content;\n+ }\n }", "filename": "src/main/java/org/elasticsearch/rest/action/support/RestActions.java", "status": "modified" }, { "diff": "@@ -25,12 +25,11 @@\n import org.elasticsearch.action.termvector.TermVectorRequest;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.rest.*;\n+import org.elasticsearch.rest.action.support.RestActions;\n \n import static org.elasticsearch.rest.RestRequest.Method.GET;\n import static org.elasticsearch.rest.RestRequest.Method.POST;\n@@ -61,16 +60,8 @@ public void handleRequest(final RestRequest request, final RestChannel channel)\n RestTermVectorAction.readURIParameters(template, request);\n multiTermVectorsRequest.ids(Strings.commaDelimitedListToStringArray(request.param(\"ids\")));\n \n- BytesReference content = request.content();\n- if (!request.hasContent()) {\n- String source = request.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n try {\n- multiTermVectorsRequest.add(template, content);\n+ multiTermVectorsRequest.add(template, RestActions.getRestContent(request));\n } catch (Throwable t) {\n try {\n channel.sendResponse(new XContentThrowableRestResponse(request, t));", "filename": "src/main/java/org/elasticsearch/rest/action/termvector/RestMultiTermVectorsAction.java", "status": "modified" } ] }
{ "body": "As stated in documentation, we should support `?source=` parameter in msearch REST operations.\n\nThis is how to reproduce it:\n\n``` sh\ncurl -XDELETE \"http://localhost:9200/test\"\n\ncurl -XPOST \"http://localhost:9200/test/type/1?refresh\" -d'{\n \"foo\": \"bar\"\n}'\n\n# This one works\ncurl -XPOST \"http://localhost:9200/test/type/_mtermvectors\" -d'\n{\n \"ids\" : [\"1\"]\n}'\n\n# This one gives: \"ActionRequestValidationException[Validation Failed: 1: multi term vectors: no documents requested;]\"\ncurl -XGET \"http://localhost:9200/test/type/_mtermvectors?source=%7B%22ids%22%3A%5B%221%22%5D%7D\"\n```\n", "comments": [], "number": 4902, "title": "mtermvectors REST API should support source parameter" }
{ "body": "In recent changes, we added missing support for `source` parameter in some REST APIs:\n- #4892 : mget\n- #4900 : mpercolate\n- #4901 : msearch\n- #4902 : mtermvectors\n- #4903 : percolate\n\n``` java\n BytesReference content = null;\n if (request.hasContent()) {\n content = request.content();\n } else {\n String source = request.param(\"source\");\n if (source != null) {\n content = new BytesArray(source);\n }\n }\n```\n\nIt's definitely better to have:\n\n``` java\n BytesReference content = request.content();\n if (!request.hasContent()) {\n String source = request.param(\"source\");\n if (source != null) {\n content = new BytesArray(source);\n }\n }\n```\n\nThat said, it could be nice to have a single method to manage it for various REST actions.\n\nCloses #4924.\n", "number": 4932, "review_comments": [ { "body": "hmm when can this be null? sorry I looked at it earlier but the request should never be null no?\n", "created_at": "2014-01-28T17:02:47Z" }, { "body": "Agree. I'm just paranoiac! :-)\n", "created_at": "2014-01-28T17:03:43Z" }, { "body": "YEAH so I'd put an assert in there just for kicks\n", "created_at": "2014-01-28T17:08:09Z" } ], "title": "Fix potential NPE when no source and no body" }
{ "commits": [ { "message": "Fix potential NPE when no source and no body\n\nIn recent changes, we added missing support for `source` parameter in some REST APIs:\n\n* #4892 : mget\n* #4900 : mpercolate\n* #4901 : msearch\n* #4902 : mtermvectors\n* #4903 : percolate\n\n```java\n BytesReference content = null;\n if (request.hasContent()) {\n content = request.content();\n } else {\n String source = request.param(\"source\");\n if (source != null) {\n content = new BytesArray(source);\n }\n }\n```\n\nIt's definitely better to have:\n\n```java\n BytesReference content = request.content();\n if (!request.hasContent()) {\n String source = request.param(\"source\");\n if (source != null) {\n content = new BytesArray(source);\n }\n }\n```\n\nThat said, it could be nice to have a single method to manage it for various REST actions.\n\nCloses #4924." } ], "files": [ { "diff": "@@ -24,12 +24,11 @@\n import org.elasticsearch.action.get.MultiGetResponse;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.rest.*;\n+import org.elasticsearch.rest.action.support.RestActions;\n import org.elasticsearch.search.fetch.source.FetchSourceContext;\n \n import java.io.IOException;\n@@ -73,18 +72,8 @@ public void handleRequest(final RestRequest request, final RestChannel channel)\n \n FetchSourceContext defaultFetchSource = FetchSourceContext.parseFromRestRequest(request);\n \n- BytesReference content = null;\n- if (request.hasContent()) {\n- content = request.content();\n- } else {\n- String source = request.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n try {\n- multiGetRequest.add(request.param(\"index\"), request.param(\"type\"), sFields, defaultFetchSource, request.param(\"routing\"), content, allowExplicitIndex);\n+ multiGetRequest.add(request.param(\"index\"), request.param(\"type\"), sFields, defaultFetchSource, request.param(\"routing\"), RestActions.getRestContent(request), allowExplicitIndex);\n } catch (Exception e) {\n try {\n XContentBuilder builder = restContentBuilder(request);", "filename": "src/main/java/org/elasticsearch/rest/action/get/RestMultiGetAction.java", "status": "modified" }, { "diff": "@@ -24,12 +24,11 @@\n import org.elasticsearch.action.support.IndicesOptions;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.rest.*;\n+import org.elasticsearch.rest.action.support.RestActions;\n \n import java.io.IOException;\n \n@@ -66,18 +65,8 @@ public void handleRequest(final RestRequest restRequest, final RestChannel restC\n multiPercolateRequest.indices(Strings.splitStringByCommaToArray(restRequest.param(\"index\")));\n multiPercolateRequest.documentType(restRequest.param(\"type\"));\n \n- BytesReference content = null;\n- if (restRequest.hasContent()) {\n- content = restRequest.content();\n- } else {\n- String source = restRequest.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n try {\n- multiPercolateRequest.add(content, restRequest.contentUnsafe(), allowExplicitIndex);\n+ multiPercolateRequest.add(RestActions.getRestContent(restRequest), restRequest.contentUnsafe(), allowExplicitIndex);\n } catch (Exception e) {\n try {\n restChannel.sendResponse(new XContentThrowableRestResponse(restRequest, e));", "filename": "src/main/java/org/elasticsearch/rest/action/percolate/RestMultiPercolateAction.java", "status": "modified" }, { "diff": "@@ -26,8 +26,6 @@\n import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n@@ -71,18 +69,7 @@ void parseDocPercolate(PercolateRequest percolateRequest, RestRequest restReques\n percolateRequest.documentType(restRequest.param(\"type\"));\n percolateRequest.routing(restRequest.param(\"routing\"));\n percolateRequest.preference(restRequest.param(\"preference\"));\n-\n- BytesReference content = null;\n- if (restRequest.hasContent()) {\n- content = restRequest.content();\n- } else {\n- String source = restRequest.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n- percolateRequest.source(content, restRequest.contentUnsafe());\n+ percolateRequest.source(RestActions.getRestContent(restRequest), restRequest.contentUnsafe());\n \n percolateRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, percolateRequest.indicesOptions()));\n executePercolate(percolateRequest, restRequest, restChannel);", "filename": "src/main/java/org/elasticsearch/rest/action/percolate/RestPercolateAction.java", "status": "modified" }, { "diff": "@@ -25,12 +25,11 @@\n import org.elasticsearch.action.support.IndicesOptions;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.rest.*;\n+import org.elasticsearch.rest.action.support.RestActions;\n \n import java.io.IOException;\n \n@@ -69,18 +68,8 @@ public void handleRequest(final RestRequest request, final RestChannel channel)\n String[] types = Strings.splitStringByCommaToArray(request.param(\"type\"));\n IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, multiSearchRequest.indicesOptions());\n \n- BytesReference content = null;\n- if (request.hasContent()) {\n- content = request.content();\n- } else {\n- String source = request.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n try {\n- multiSearchRequest.add(content, request.contentUnsafe(), indices, types, request.param(\"search_type\"), request.param(\"routing\"), indicesOptions, allowExplicitIndex);\n+ multiSearchRequest.add(RestActions.getRestContent(request), request.contentUnsafe(), indices, types, request.param(\"search_type\"), request.param(\"routing\"), indicesOptions, allowExplicitIndex);\n } catch (Exception e) {\n try {\n XContentBuilder builder = restContentBuilder(request);", "filename": "src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java", "status": "modified" }, { "diff": "@@ -23,6 +23,8 @@\n import org.elasticsearch.action.ShardOperationFailedException;\n import org.elasticsearch.action.support.QuerySourceBuilder;\n import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;\n+import org.elasticsearch.common.bytes.BytesArray;\n+import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentBuilderString;\n import org.elasticsearch.index.query.QueryBuilders;\n@@ -101,4 +103,25 @@ public static QuerySourceBuilder parseQuerySource(RestRequest request) {\n }\n return new QuerySourceBuilder().setQuery(queryBuilder);\n }\n+\n+ /**\n+ * Get Rest content from either payload or source parameter\n+ * @param request Rest request\n+ * @return rest content\n+ */\n+ public static BytesReference getRestContent(RestRequest request) {\n+ if (request == null) {\n+ return null;\n+ }\n+\n+ BytesReference content = request.content();\n+ if (!request.hasContent()) {\n+ String source = request.param(\"source\");\n+ if (source != null) {\n+ content = new BytesArray(source);\n+ }\n+ }\n+\n+ return content;\n+ }\n }", "filename": "src/main/java/org/elasticsearch/rest/action/support/RestActions.java", "status": "modified" }, { "diff": "@@ -25,12 +25,11 @@\n import org.elasticsearch.action.termvector.TermVectorRequest;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.rest.*;\n+import org.elasticsearch.rest.action.support.RestActions;\n \n import static org.elasticsearch.rest.RestRequest.Method.GET;\n import static org.elasticsearch.rest.RestRequest.Method.POST;\n@@ -61,16 +60,8 @@ public void handleRequest(final RestRequest request, final RestChannel channel)\n RestTermVectorAction.readURIParameters(template, request);\n multiTermVectorsRequest.ids(Strings.commaDelimitedListToStringArray(request.param(\"ids\")));\n \n- BytesReference content = request.content();\n- if (!request.hasContent()) {\n- String source = request.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n try {\n- multiTermVectorsRequest.add(template, content);\n+ multiTermVectorsRequest.add(template, RestActions.getRestContent(request));\n } catch (Throwable t) {\n try {\n channel.sendResponse(new XContentThrowableRestResponse(request, t));", "filename": "src/main/java/org/elasticsearch/rest/action/termvector/RestMultiTermVectorsAction.java", "status": "modified" } ] }
{ "body": "As stated in documentation, we should support `?source=` parameter in msearch REST operations.\n\nThis is how to reproduce it:\n\n``` sh\ncurl -XDELETE \"http://localhost:9200/test\"\n\ncurl -XPOST \"http://localhost:9200/test/type/1?refresh\" -d'{\n \"foo\": \"bar\"\n}'\n\ncat requests\n{}\n{\"query\" : {\"match_all\" : {}}}\n\n# This one works\ncurl -XGET localhost:9200/_msearch --data-binary @requests\n\n# This one gives: {\"error\":\"Failed to derive xcontent from org.elasticsearch.common.bytes.BytesArray@0\"}\ncurl -XGET \"http://localhost:9200/test/type/_mget?source=%7B%7D%0A%7B%22query%22%3A%7B%22match_all%22%3A%7B%7D%7D%7D%0A\"\n```\n", "comments": [], "number": 4901, "title": "msearch REST API should support source parameter" }
{ "body": "In recent changes, we added missing support for `source` parameter in some REST APIs:\n- #4892 : mget\n- #4900 : mpercolate\n- #4901 : msearch\n- #4902 : mtermvectors\n- #4903 : percolate\n\n``` java\n BytesReference content = null;\n if (request.hasContent()) {\n content = request.content();\n } else {\n String source = request.param(\"source\");\n if (source != null) {\n content = new BytesArray(source);\n }\n }\n```\n\nIt's definitely better to have:\n\n``` java\n BytesReference content = request.content();\n if (!request.hasContent()) {\n String source = request.param(\"source\");\n if (source != null) {\n content = new BytesArray(source);\n }\n }\n```\n\nThat said, it could be nice to have a single method to manage it for various REST actions.\n\nCloses #4924.\n", "number": 4932, "review_comments": [ { "body": "hmm when can this be null? sorry I looked at it earlier but the request should never be null no?\n", "created_at": "2014-01-28T17:02:47Z" }, { "body": "Agree. I'm just paranoiac! :-)\n", "created_at": "2014-01-28T17:03:43Z" }, { "body": "YEAH so I'd put an assert in there just for kicks\n", "created_at": "2014-01-28T17:08:09Z" } ], "title": "Fix potential NPE when no source and no body" }
{ "commits": [ { "message": "Fix potential NPE when no source and no body\n\nIn recent changes, we added missing support for `source` parameter in some REST APIs:\n\n* #4892 : mget\n* #4900 : mpercolate\n* #4901 : msearch\n* #4902 : mtermvectors\n* #4903 : percolate\n\n```java\n BytesReference content = null;\n if (request.hasContent()) {\n content = request.content();\n } else {\n String source = request.param(\"source\");\n if (source != null) {\n content = new BytesArray(source);\n }\n }\n```\n\nIt's definitely better to have:\n\n```java\n BytesReference content = request.content();\n if (!request.hasContent()) {\n String source = request.param(\"source\");\n if (source != null) {\n content = new BytesArray(source);\n }\n }\n```\n\nThat said, it could be nice to have a single method to manage it for various REST actions.\n\nCloses #4924." } ], "files": [ { "diff": "@@ -24,12 +24,11 @@\n import org.elasticsearch.action.get.MultiGetResponse;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.rest.*;\n+import org.elasticsearch.rest.action.support.RestActions;\n import org.elasticsearch.search.fetch.source.FetchSourceContext;\n \n import java.io.IOException;\n@@ -73,18 +72,8 @@ public void handleRequest(final RestRequest request, final RestChannel channel)\n \n FetchSourceContext defaultFetchSource = FetchSourceContext.parseFromRestRequest(request);\n \n- BytesReference content = null;\n- if (request.hasContent()) {\n- content = request.content();\n- } else {\n- String source = request.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n try {\n- multiGetRequest.add(request.param(\"index\"), request.param(\"type\"), sFields, defaultFetchSource, request.param(\"routing\"), content, allowExplicitIndex);\n+ multiGetRequest.add(request.param(\"index\"), request.param(\"type\"), sFields, defaultFetchSource, request.param(\"routing\"), RestActions.getRestContent(request), allowExplicitIndex);\n } catch (Exception e) {\n try {\n XContentBuilder builder = restContentBuilder(request);", "filename": "src/main/java/org/elasticsearch/rest/action/get/RestMultiGetAction.java", "status": "modified" }, { "diff": "@@ -24,12 +24,11 @@\n import org.elasticsearch.action.support.IndicesOptions;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.rest.*;\n+import org.elasticsearch.rest.action.support.RestActions;\n \n import java.io.IOException;\n \n@@ -66,18 +65,8 @@ public void handleRequest(final RestRequest restRequest, final RestChannel restC\n multiPercolateRequest.indices(Strings.splitStringByCommaToArray(restRequest.param(\"index\")));\n multiPercolateRequest.documentType(restRequest.param(\"type\"));\n \n- BytesReference content = null;\n- if (restRequest.hasContent()) {\n- content = restRequest.content();\n- } else {\n- String source = restRequest.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n try {\n- multiPercolateRequest.add(content, restRequest.contentUnsafe(), allowExplicitIndex);\n+ multiPercolateRequest.add(RestActions.getRestContent(restRequest), restRequest.contentUnsafe(), allowExplicitIndex);\n } catch (Exception e) {\n try {\n restChannel.sendResponse(new XContentThrowableRestResponse(restRequest, e));", "filename": "src/main/java/org/elasticsearch/rest/action/percolate/RestMultiPercolateAction.java", "status": "modified" }, { "diff": "@@ -26,8 +26,6 @@\n import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n@@ -71,18 +69,7 @@ void parseDocPercolate(PercolateRequest percolateRequest, RestRequest restReques\n percolateRequest.documentType(restRequest.param(\"type\"));\n percolateRequest.routing(restRequest.param(\"routing\"));\n percolateRequest.preference(restRequest.param(\"preference\"));\n-\n- BytesReference content = null;\n- if (restRequest.hasContent()) {\n- content = restRequest.content();\n- } else {\n- String source = restRequest.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n- percolateRequest.source(content, restRequest.contentUnsafe());\n+ percolateRequest.source(RestActions.getRestContent(restRequest), restRequest.contentUnsafe());\n \n percolateRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, percolateRequest.indicesOptions()));\n executePercolate(percolateRequest, restRequest, restChannel);", "filename": "src/main/java/org/elasticsearch/rest/action/percolate/RestPercolateAction.java", "status": "modified" }, { "diff": "@@ -25,12 +25,11 @@\n import org.elasticsearch.action.support.IndicesOptions;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.rest.*;\n+import org.elasticsearch.rest.action.support.RestActions;\n \n import java.io.IOException;\n \n@@ -69,18 +68,8 @@ public void handleRequest(final RestRequest request, final RestChannel channel)\n String[] types = Strings.splitStringByCommaToArray(request.param(\"type\"));\n IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, multiSearchRequest.indicesOptions());\n \n- BytesReference content = null;\n- if (request.hasContent()) {\n- content = request.content();\n- } else {\n- String source = request.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n try {\n- multiSearchRequest.add(content, request.contentUnsafe(), indices, types, request.param(\"search_type\"), request.param(\"routing\"), indicesOptions, allowExplicitIndex);\n+ multiSearchRequest.add(RestActions.getRestContent(request), request.contentUnsafe(), indices, types, request.param(\"search_type\"), request.param(\"routing\"), indicesOptions, allowExplicitIndex);\n } catch (Exception e) {\n try {\n XContentBuilder builder = restContentBuilder(request);", "filename": "src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java", "status": "modified" }, { "diff": "@@ -23,6 +23,8 @@\n import org.elasticsearch.action.ShardOperationFailedException;\n import org.elasticsearch.action.support.QuerySourceBuilder;\n import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;\n+import org.elasticsearch.common.bytes.BytesArray;\n+import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentBuilderString;\n import org.elasticsearch.index.query.QueryBuilders;\n@@ -101,4 +103,25 @@ public static QuerySourceBuilder parseQuerySource(RestRequest request) {\n }\n return new QuerySourceBuilder().setQuery(queryBuilder);\n }\n+\n+ /**\n+ * Get Rest content from either payload or source parameter\n+ * @param request Rest request\n+ * @return rest content\n+ */\n+ public static BytesReference getRestContent(RestRequest request) {\n+ if (request == null) {\n+ return null;\n+ }\n+\n+ BytesReference content = request.content();\n+ if (!request.hasContent()) {\n+ String source = request.param(\"source\");\n+ if (source != null) {\n+ content = new BytesArray(source);\n+ }\n+ }\n+\n+ return content;\n+ }\n }", "filename": "src/main/java/org/elasticsearch/rest/action/support/RestActions.java", "status": "modified" }, { "diff": "@@ -25,12 +25,11 @@\n import org.elasticsearch.action.termvector.TermVectorRequest;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.rest.*;\n+import org.elasticsearch.rest.action.support.RestActions;\n \n import static org.elasticsearch.rest.RestRequest.Method.GET;\n import static org.elasticsearch.rest.RestRequest.Method.POST;\n@@ -61,16 +60,8 @@ public void handleRequest(final RestRequest request, final RestChannel channel)\n RestTermVectorAction.readURIParameters(template, request);\n multiTermVectorsRequest.ids(Strings.commaDelimitedListToStringArray(request.param(\"ids\")));\n \n- BytesReference content = request.content();\n- if (!request.hasContent()) {\n- String source = request.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n try {\n- multiTermVectorsRequest.add(template, content);\n+ multiTermVectorsRequest.add(template, RestActions.getRestContent(request));\n } catch (Throwable t) {\n try {\n channel.sendResponse(new XContentThrowableRestResponse(request, t));", "filename": "src/main/java/org/elasticsearch/rest/action/termvector/RestMultiTermVectorsAction.java", "status": "modified" } ] }
{ "body": "As stated in documentation, we should support `?source=` parameter in mpercolate REST operations.\n\nThis is how to reproduce it:\n\n``` sh\ncurl -XDELETE \"http://localhost:9200/test\"\n\ncurl -XPUT \"http://localhost:9200/test/.percolator/1\" -d'\n{\n \"query\" : {\n \"match\" : {\n \"foo\" : \"bar\"\n }\n }\n}'\n\n# This one works\ncurl -XPOST \"http://localhost:9200/test/message/_mpercolate\" -d '\n{\"percolate\" : {}}\n{\"doc\" : {\"foo\" : \"bar is in foo\"}}\n'\n\n# This one gives: BroadcastShardOperationFailedException[[test][2] ]; nested: PercolateException[failed to percolate]; nested: ElasticsearchIllegalArgumentException[Nothing to percolate];\ncurl -XGET \"http://localhost:9200/test/message/_mpercolate?source=%7B%22percolate%22%3A%7B%7D%7D%0A%7B%22doc%22%3A%7B%22foo%22%3A%22bar is in foo%22%7D%7D%0A\"\n```\n", "comments": [], "number": 4900, "title": "mpercolate REST API should support source parameter" }
{ "body": "In recent changes, we added missing support for `source` parameter in some REST APIs:\n- #4892 : mget\n- #4900 : mpercolate\n- #4901 : msearch\n- #4902 : mtermvectors\n- #4903 : percolate\n\n``` java\n BytesReference content = null;\n if (request.hasContent()) {\n content = request.content();\n } else {\n String source = request.param(\"source\");\n if (source != null) {\n content = new BytesArray(source);\n }\n }\n```\n\nIt's definitely better to have:\n\n``` java\n BytesReference content = request.content();\n if (!request.hasContent()) {\n String source = request.param(\"source\");\n if (source != null) {\n content = new BytesArray(source);\n }\n }\n```\n\nThat said, it could be nice to have a single method to manage it for various REST actions.\n\nCloses #4924.\n", "number": 4932, "review_comments": [ { "body": "hmm when can this be null? sorry I looked at it earlier but the request should never be null no?\n", "created_at": "2014-01-28T17:02:47Z" }, { "body": "Agree. I'm just paranoiac! :-)\n", "created_at": "2014-01-28T17:03:43Z" }, { "body": "YEAH so I'd put an assert in there just for kicks\n", "created_at": "2014-01-28T17:08:09Z" } ], "title": "Fix potential NPE when no source and no body" }
{ "commits": [ { "message": "Fix potential NPE when no source and no body\n\nIn recent changes, we added missing support for `source` parameter in some REST APIs:\n\n* #4892 : mget\n* #4900 : mpercolate\n* #4901 : msearch\n* #4902 : mtermvectors\n* #4903 : percolate\n\n```java\n BytesReference content = null;\n if (request.hasContent()) {\n content = request.content();\n } else {\n String source = request.param(\"source\");\n if (source != null) {\n content = new BytesArray(source);\n }\n }\n```\n\nIt's definitely better to have:\n\n```java\n BytesReference content = request.content();\n if (!request.hasContent()) {\n String source = request.param(\"source\");\n if (source != null) {\n content = new BytesArray(source);\n }\n }\n```\n\nThat said, it could be nice to have a single method to manage it for various REST actions.\n\nCloses #4924." } ], "files": [ { "diff": "@@ -24,12 +24,11 @@\n import org.elasticsearch.action.get.MultiGetResponse;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.rest.*;\n+import org.elasticsearch.rest.action.support.RestActions;\n import org.elasticsearch.search.fetch.source.FetchSourceContext;\n \n import java.io.IOException;\n@@ -73,18 +72,8 @@ public void handleRequest(final RestRequest request, final RestChannel channel)\n \n FetchSourceContext defaultFetchSource = FetchSourceContext.parseFromRestRequest(request);\n \n- BytesReference content = null;\n- if (request.hasContent()) {\n- content = request.content();\n- } else {\n- String source = request.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n try {\n- multiGetRequest.add(request.param(\"index\"), request.param(\"type\"), sFields, defaultFetchSource, request.param(\"routing\"), content, allowExplicitIndex);\n+ multiGetRequest.add(request.param(\"index\"), request.param(\"type\"), sFields, defaultFetchSource, request.param(\"routing\"), RestActions.getRestContent(request), allowExplicitIndex);\n } catch (Exception e) {\n try {\n XContentBuilder builder = restContentBuilder(request);", "filename": "src/main/java/org/elasticsearch/rest/action/get/RestMultiGetAction.java", "status": "modified" }, { "diff": "@@ -24,12 +24,11 @@\n import org.elasticsearch.action.support.IndicesOptions;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.rest.*;\n+import org.elasticsearch.rest.action.support.RestActions;\n \n import java.io.IOException;\n \n@@ -66,18 +65,8 @@ public void handleRequest(final RestRequest restRequest, final RestChannel restC\n multiPercolateRequest.indices(Strings.splitStringByCommaToArray(restRequest.param(\"index\")));\n multiPercolateRequest.documentType(restRequest.param(\"type\"));\n \n- BytesReference content = null;\n- if (restRequest.hasContent()) {\n- content = restRequest.content();\n- } else {\n- String source = restRequest.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n try {\n- multiPercolateRequest.add(content, restRequest.contentUnsafe(), allowExplicitIndex);\n+ multiPercolateRequest.add(RestActions.getRestContent(restRequest), restRequest.contentUnsafe(), allowExplicitIndex);\n } catch (Exception e) {\n try {\n restChannel.sendResponse(new XContentThrowableRestResponse(restRequest, e));", "filename": "src/main/java/org/elasticsearch/rest/action/percolate/RestMultiPercolateAction.java", "status": "modified" }, { "diff": "@@ -26,8 +26,6 @@\n import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n@@ -71,18 +69,7 @@ void parseDocPercolate(PercolateRequest percolateRequest, RestRequest restReques\n percolateRequest.documentType(restRequest.param(\"type\"));\n percolateRequest.routing(restRequest.param(\"routing\"));\n percolateRequest.preference(restRequest.param(\"preference\"));\n-\n- BytesReference content = null;\n- if (restRequest.hasContent()) {\n- content = restRequest.content();\n- } else {\n- String source = restRequest.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n- percolateRequest.source(content, restRequest.contentUnsafe());\n+ percolateRequest.source(RestActions.getRestContent(restRequest), restRequest.contentUnsafe());\n \n percolateRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, percolateRequest.indicesOptions()));\n executePercolate(percolateRequest, restRequest, restChannel);", "filename": "src/main/java/org/elasticsearch/rest/action/percolate/RestPercolateAction.java", "status": "modified" }, { "diff": "@@ -25,12 +25,11 @@\n import org.elasticsearch.action.support.IndicesOptions;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.rest.*;\n+import org.elasticsearch.rest.action.support.RestActions;\n \n import java.io.IOException;\n \n@@ -69,18 +68,8 @@ public void handleRequest(final RestRequest request, final RestChannel channel)\n String[] types = Strings.splitStringByCommaToArray(request.param(\"type\"));\n IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, multiSearchRequest.indicesOptions());\n \n- BytesReference content = null;\n- if (request.hasContent()) {\n- content = request.content();\n- } else {\n- String source = request.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n try {\n- multiSearchRequest.add(content, request.contentUnsafe(), indices, types, request.param(\"search_type\"), request.param(\"routing\"), indicesOptions, allowExplicitIndex);\n+ multiSearchRequest.add(RestActions.getRestContent(request), request.contentUnsafe(), indices, types, request.param(\"search_type\"), request.param(\"routing\"), indicesOptions, allowExplicitIndex);\n } catch (Exception e) {\n try {\n XContentBuilder builder = restContentBuilder(request);", "filename": "src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java", "status": "modified" }, { "diff": "@@ -23,6 +23,8 @@\n import org.elasticsearch.action.ShardOperationFailedException;\n import org.elasticsearch.action.support.QuerySourceBuilder;\n import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;\n+import org.elasticsearch.common.bytes.BytesArray;\n+import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentBuilderString;\n import org.elasticsearch.index.query.QueryBuilders;\n@@ -101,4 +103,25 @@ public static QuerySourceBuilder parseQuerySource(RestRequest request) {\n }\n return new QuerySourceBuilder().setQuery(queryBuilder);\n }\n+\n+ /**\n+ * Get Rest content from either payload or source parameter\n+ * @param request Rest request\n+ * @return rest content\n+ */\n+ public static BytesReference getRestContent(RestRequest request) {\n+ if (request == null) {\n+ return null;\n+ }\n+\n+ BytesReference content = request.content();\n+ if (!request.hasContent()) {\n+ String source = request.param(\"source\");\n+ if (source != null) {\n+ content = new BytesArray(source);\n+ }\n+ }\n+\n+ return content;\n+ }\n }", "filename": "src/main/java/org/elasticsearch/rest/action/support/RestActions.java", "status": "modified" }, { "diff": "@@ -25,12 +25,11 @@\n import org.elasticsearch.action.termvector.TermVectorRequest;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.rest.*;\n+import org.elasticsearch.rest.action.support.RestActions;\n \n import static org.elasticsearch.rest.RestRequest.Method.GET;\n import static org.elasticsearch.rest.RestRequest.Method.POST;\n@@ -61,16 +60,8 @@ public void handleRequest(final RestRequest request, final RestChannel channel)\n RestTermVectorAction.readURIParameters(template, request);\n multiTermVectorsRequest.ids(Strings.commaDelimitedListToStringArray(request.param(\"ids\")));\n \n- BytesReference content = request.content();\n- if (!request.hasContent()) {\n- String source = request.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n try {\n- multiTermVectorsRequest.add(template, content);\n+ multiTermVectorsRequest.add(template, RestActions.getRestContent(request));\n } catch (Throwable t) {\n try {\n channel.sendResponse(new XContentThrowableRestResponse(request, t));", "filename": "src/main/java/org/elasticsearch/rest/action/termvector/RestMultiTermVectorsAction.java", "status": "modified" } ] }
{ "body": "As stated in documentation, we should support `?source=` parameter in mget REST operations.\n\nThis is how to reproduce it:\n\n``` sh\ncurl -XDELETE \"http://localhost:9200/test\"\n\ncurl -XPOST \"http://localhost:9200/test/type/1?refresh\" -d'{\n \"foo\": \"bar\"\n}'\n\n# This one works\ncurl -XPOST \"http://localhost:9200/test/type/_mget\" -d'{\n \"ids\": [\"1\"]\n}'\n\n# This one gives: {\"error\":\"Failed to derive xcontent from org.elasticsearch.common.bytes.BytesArray@0\"}\ncurl -XGET \"http://localhost:9200/test/type/_mget?source=%7B%22ids%22%3A%20%5B%221%22%5D%7D\"\n```\n", "comments": [ { "body": "I just ran into this:\n\n```\nElasticsearch::Transport::Transport::Errors::BadRequest: [400] {\"error\":\"ElasticsearchParseException[Failed to derive xcontent from org.elasticsearch.common.bytes.BytesArray@1]\",\"status\":400}\ngems/elasticsearch-transport-1.0.6/lib/elasticsearch/transport/transport/base.rb:132 __raise_transport_error\ngems/elasticsearch-transport-1.0.6/lib/elasticsearch/transport/transport/base.rb:224 perform_request\ngems/elasticsearch-transport-1.0.6/lib/elasticsearch/transport/transport/http/manticore.rb:33 perform_request\ngems/elasticsearch-transport-1.0.6/lib/elasticsearch/transport/client.rb:111 perform_request\ngems/elasticsearch-api-1.0.6/lib/elasticsearch/api/actions/mget.rb:70 mget\n```\n\nI tried using:\n\n``` ruby\n :transport_class => Elasticsearch::Transport::Transport::HTTP::Manticore\n```\n\nIs this a possible regression?\n", "created_at": "2014-10-27T02:43:27Z" } ], "number": 4892, "title": "mget REST API should support source parameter" }
{ "body": "In recent changes, we added missing support for `source` parameter in some REST APIs:\n- #4892 : mget\n- #4900 : mpercolate\n- #4901 : msearch\n- #4902 : mtermvectors\n- #4903 : percolate\n\n``` java\n BytesReference content = null;\n if (request.hasContent()) {\n content = request.content();\n } else {\n String source = request.param(\"source\");\n if (source != null) {\n content = new BytesArray(source);\n }\n }\n```\n\nIt's definitely better to have:\n\n``` java\n BytesReference content = request.content();\n if (!request.hasContent()) {\n String source = request.param(\"source\");\n if (source != null) {\n content = new BytesArray(source);\n }\n }\n```\n\nThat said, it could be nice to have a single method to manage it for various REST actions.\n\nCloses #4924.\n", "number": 4932, "review_comments": [ { "body": "hmm when can this be null? sorry I looked at it earlier but the request should never be null no?\n", "created_at": "2014-01-28T17:02:47Z" }, { "body": "Agree. I'm just paranoiac! :-)\n", "created_at": "2014-01-28T17:03:43Z" }, { "body": "YEAH so I'd put an assert in there just for kicks\n", "created_at": "2014-01-28T17:08:09Z" } ], "title": "Fix potential NPE when no source and no body" }
{ "commits": [ { "message": "Fix potential NPE when no source and no body\n\nIn recent changes, we added missing support for `source` parameter in some REST APIs:\n\n* #4892 : mget\n* #4900 : mpercolate\n* #4901 : msearch\n* #4902 : mtermvectors\n* #4903 : percolate\n\n```java\n BytesReference content = null;\n if (request.hasContent()) {\n content = request.content();\n } else {\n String source = request.param(\"source\");\n if (source != null) {\n content = new BytesArray(source);\n }\n }\n```\n\nIt's definitely better to have:\n\n```java\n BytesReference content = request.content();\n if (!request.hasContent()) {\n String source = request.param(\"source\");\n if (source != null) {\n content = new BytesArray(source);\n }\n }\n```\n\nThat said, it could be nice to have a single method to manage it for various REST actions.\n\nCloses #4924." } ], "files": [ { "diff": "@@ -24,12 +24,11 @@\n import org.elasticsearch.action.get.MultiGetResponse;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.rest.*;\n+import org.elasticsearch.rest.action.support.RestActions;\n import org.elasticsearch.search.fetch.source.FetchSourceContext;\n \n import java.io.IOException;\n@@ -73,18 +72,8 @@ public void handleRequest(final RestRequest request, final RestChannel channel)\n \n FetchSourceContext defaultFetchSource = FetchSourceContext.parseFromRestRequest(request);\n \n- BytesReference content = null;\n- if (request.hasContent()) {\n- content = request.content();\n- } else {\n- String source = request.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n try {\n- multiGetRequest.add(request.param(\"index\"), request.param(\"type\"), sFields, defaultFetchSource, request.param(\"routing\"), content, allowExplicitIndex);\n+ multiGetRequest.add(request.param(\"index\"), request.param(\"type\"), sFields, defaultFetchSource, request.param(\"routing\"), RestActions.getRestContent(request), allowExplicitIndex);\n } catch (Exception e) {\n try {\n XContentBuilder builder = restContentBuilder(request);", "filename": "src/main/java/org/elasticsearch/rest/action/get/RestMultiGetAction.java", "status": "modified" }, { "diff": "@@ -24,12 +24,11 @@\n import org.elasticsearch.action.support.IndicesOptions;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.rest.*;\n+import org.elasticsearch.rest.action.support.RestActions;\n \n import java.io.IOException;\n \n@@ -66,18 +65,8 @@ public void handleRequest(final RestRequest restRequest, final RestChannel restC\n multiPercolateRequest.indices(Strings.splitStringByCommaToArray(restRequest.param(\"index\")));\n multiPercolateRequest.documentType(restRequest.param(\"type\"));\n \n- BytesReference content = null;\n- if (restRequest.hasContent()) {\n- content = restRequest.content();\n- } else {\n- String source = restRequest.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n try {\n- multiPercolateRequest.add(content, restRequest.contentUnsafe(), allowExplicitIndex);\n+ multiPercolateRequest.add(RestActions.getRestContent(restRequest), restRequest.contentUnsafe(), allowExplicitIndex);\n } catch (Exception e) {\n try {\n restChannel.sendResponse(new XContentThrowableRestResponse(restRequest, e));", "filename": "src/main/java/org/elasticsearch/rest/action/percolate/RestMultiPercolateAction.java", "status": "modified" }, { "diff": "@@ -26,8 +26,6 @@\n import org.elasticsearch.action.support.broadcast.BroadcastOperationThreading;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n@@ -71,18 +69,7 @@ void parseDocPercolate(PercolateRequest percolateRequest, RestRequest restReques\n percolateRequest.documentType(restRequest.param(\"type\"));\n percolateRequest.routing(restRequest.param(\"routing\"));\n percolateRequest.preference(restRequest.param(\"preference\"));\n-\n- BytesReference content = null;\n- if (restRequest.hasContent()) {\n- content = restRequest.content();\n- } else {\n- String source = restRequest.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n- percolateRequest.source(content, restRequest.contentUnsafe());\n+ percolateRequest.source(RestActions.getRestContent(restRequest), restRequest.contentUnsafe());\n \n percolateRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, percolateRequest.indicesOptions()));\n executePercolate(percolateRequest, restRequest, restChannel);", "filename": "src/main/java/org/elasticsearch/rest/action/percolate/RestPercolateAction.java", "status": "modified" }, { "diff": "@@ -25,12 +25,11 @@\n import org.elasticsearch.action.support.IndicesOptions;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.rest.*;\n+import org.elasticsearch.rest.action.support.RestActions;\n \n import java.io.IOException;\n \n@@ -69,18 +68,8 @@ public void handleRequest(final RestRequest request, final RestChannel channel)\n String[] types = Strings.splitStringByCommaToArray(request.param(\"type\"));\n IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, multiSearchRequest.indicesOptions());\n \n- BytesReference content = null;\n- if (request.hasContent()) {\n- content = request.content();\n- } else {\n- String source = request.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n try {\n- multiSearchRequest.add(content, request.contentUnsafe(), indices, types, request.param(\"search_type\"), request.param(\"routing\"), indicesOptions, allowExplicitIndex);\n+ multiSearchRequest.add(RestActions.getRestContent(request), request.contentUnsafe(), indices, types, request.param(\"search_type\"), request.param(\"routing\"), indicesOptions, allowExplicitIndex);\n } catch (Exception e) {\n try {\n XContentBuilder builder = restContentBuilder(request);", "filename": "src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java", "status": "modified" }, { "diff": "@@ -23,6 +23,8 @@\n import org.elasticsearch.action.ShardOperationFailedException;\n import org.elasticsearch.action.support.QuerySourceBuilder;\n import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse;\n+import org.elasticsearch.common.bytes.BytesArray;\n+import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentBuilderString;\n import org.elasticsearch.index.query.QueryBuilders;\n@@ -101,4 +103,25 @@ public static QuerySourceBuilder parseQuerySource(RestRequest request) {\n }\n return new QuerySourceBuilder().setQuery(queryBuilder);\n }\n+\n+ /**\n+ * Get Rest content from either payload or source parameter\n+ * @param request Rest request\n+ * @return rest content\n+ */\n+ public static BytesReference getRestContent(RestRequest request) {\n+ if (request == null) {\n+ return null;\n+ }\n+\n+ BytesReference content = request.content();\n+ if (!request.hasContent()) {\n+ String source = request.param(\"source\");\n+ if (source != null) {\n+ content = new BytesArray(source);\n+ }\n+ }\n+\n+ return content;\n+ }\n }", "filename": "src/main/java/org/elasticsearch/rest/action/support/RestActions.java", "status": "modified" }, { "diff": "@@ -25,12 +25,11 @@\n import org.elasticsearch.action.termvector.TermVectorRequest;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Strings;\n-import org.elasticsearch.common.bytes.BytesArray;\n-import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.rest.*;\n+import org.elasticsearch.rest.action.support.RestActions;\n \n import static org.elasticsearch.rest.RestRequest.Method.GET;\n import static org.elasticsearch.rest.RestRequest.Method.POST;\n@@ -61,16 +60,8 @@ public void handleRequest(final RestRequest request, final RestChannel channel)\n RestTermVectorAction.readURIParameters(template, request);\n multiTermVectorsRequest.ids(Strings.commaDelimitedListToStringArray(request.param(\"ids\")));\n \n- BytesReference content = request.content();\n- if (!request.hasContent()) {\n- String source = request.param(\"source\");\n- if (source != null) {\n- content = new BytesArray(source);\n- }\n- }\n-\n try {\n- multiTermVectorsRequest.add(template, content);\n+ multiTermVectorsRequest.add(template, RestActions.getRestContent(request));\n } catch (Throwable t) {\n try {\n channel.sendResponse(new XContentThrowableRestResponse(request, t));", "filename": "src/main/java/org/elasticsearch/rest/action/termvector/RestMultiTermVectorsAction.java", "status": "modified" } ] }
{ "body": "we currently use the number of hot threads that we are interested in as the value for iterating over the actual hot threads which can lead to AIOOB is the actual number of threads is less than the given number.\n\nwhich will result in an exception like this:\n\n```\nCaused by: java.lang.IndexOutOfBoundsException: Index: 93, Size: 93\n at java.util.ArrayList.RangeCheck(ArrayList.java:547)\n at java.util.ArrayList.get(ArrayList.java:322)\n at org.elasticsearch.monitor.jvm.HotThreads.innerDetect(HotThreads.java:149)\n at org.elasticsearch.monitor.jvm.HotThreads.detect(HotThreads.java:75)\n at org.elasticsearch.action.admin.cluster.node.hotthreads.TransportNodesHotThreadsAction.nodeOperation(TransportNodesHotThreadsAction.java:101)\n ... 5 more\n```\n", "comments": [], "number": 4927, "title": "HotThreads fail with AIOOB if busiestThreads > actual threads " }
{ "body": "We currently use the number of hot threads that we are\ninterested in as the value for iterating over the actual\nhot threads which can lead to AIOOB is the actual number\nof threads is less than the given number.\n\nCloses #4927\n", "number": 4928, "review_comments": [], "title": "Use num of actual threads if busiestThreads is larger" }
{ "commits": [ { "message": "Use num of actual threads if busiestThreads is larger\n\nWe currently use the number of hot threads that we are\ninterested in as the value for iterating over the actual\nhot threads which can lead to AIOOB is the actual number\nof threads is less than the given number.\n\nCloses #4927" } ], "files": [ { "diff": "@@ -130,6 +130,7 @@ private String innerDetect() throws Exception {\n }\n // sort by delta CPU time on thread.\n List<MyThreadInfo> hotties = new ArrayList<MyThreadInfo>(threadInfos.values());\n+ final int busiestThreads = Math.min(this.busiestThreads, hotties.size());\n // skip that for now\n CollectionUtil.introSort(hotties, new Comparator<MyThreadInfo>() {\n public int compare(MyThreadInfo o1, MyThreadInfo o2) {", "filename": "src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java", "status": "modified" }, { "diff": "@@ -29,6 +29,7 @@\n import java.util.Map;\n import java.util.concurrent.CountDownLatch;\n import java.util.concurrent.ExecutionException;\n+import java.util.concurrent.atomic.AtomicBoolean;\n \n import static org.elasticsearch.index.query.FilterBuilders.andFilter;\n import static org.elasticsearch.index.query.FilterBuilders.notFilter;\n@@ -37,6 +38,7 @@\n import static org.elasticsearch.index.query.QueryBuilders.termQuery;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;\n import static org.hamcrest.CoreMatchers.equalTo;\n+import static org.hamcrest.CoreMatchers.is;\n import static org.hamcrest.CoreMatchers.notNullValue;\n \n /**\n@@ -50,7 +52,7 @@ public void testHotThreadsDontFail() throws ExecutionException, InterruptedExcep\n */\n createIndex(\"test\");\n final int iters = atLeast(2);\n-\n+ final AtomicBoolean hasErrors = new AtomicBoolean(false);\n for (int i = 0; i < iters; i++) {\n final String type;\n NodesHotThreadsRequestBuilder nodesHotThreadsRequestBuilder = client().admin().cluster().prepareNodesHotThreads();\n@@ -59,7 +61,7 @@ public void testHotThreadsDontFail() throws ExecutionException, InterruptedExcep\n nodesHotThreadsRequestBuilder.setInterval(timeValue);\n }\n if (randomBoolean()) {\n- nodesHotThreadsRequestBuilder.setThreads(randomIntBetween(1, 100));\n+ nodesHotThreadsRequestBuilder.setThreads(rarely() ? randomIntBetween(500, 5000) : randomIntBetween(1, 500));\n }\n if (randomBoolean()) {\n switch (randomIntBetween(0, 2)) {\n@@ -82,6 +84,7 @@ public void testHotThreadsDontFail() throws ExecutionException, InterruptedExcep\n nodesHotThreadsRequestBuilder.execute(new ActionListener<NodesHotThreadsResponse>() {\n @Override\n public void onResponse(NodesHotThreadsResponse nodeHotThreads) {\n+ boolean success = false;\n try {\n assertThat(nodeHotThreads, notNullValue());\n Map<String,NodeHotThreads> nodesMap = nodeHotThreads.getNodesMap();\n@@ -90,14 +93,19 @@ public void onResponse(NodesHotThreadsResponse nodeHotThreads) {\n assertNotNull(ht.getHotThreads());\n //logger.info(ht.getHotThreads());\n }\n+ success = true;\n } finally {\n+ if (!success) {\n+ hasErrors.set(true);\n+ }\n latch.countDown();\n }\n }\n \n @Override\n public void onFailure(Throwable e) {\n logger.error(\"FAILED\", e);\n+ hasErrors.set(true);\n latch.countDown();\n fail();\n }\n@@ -120,6 +128,7 @@ public void onFailure(Throwable e) {\n 3l);\n }\n latch.await();\n+ assertThat(hasErrors.get(), is(false));\n }\n }\n }", "filename": "src/test/java/org/elasticsearch/action/admin/HotThreadsTest.java", "status": "modified" } ] }
{ "body": "This request:\n\n```\nGET /_cluster/state/nodes\n```\n\nshould only return the `nodes` element, but it still returns `metadata`, `routing_table` etc. They're empty, but still present.\n", "comments": [ { "body": "Good catch, the toXContent serialization still checks for the old filter_ parameters.. will fix and add tests\n", "created_at": "2014-01-24T19:13:47Z" }, { "body": "fixed, also added a bunch of yaml tests, maybe you can take a look\n", "created_at": "2014-01-25T12:33:26Z" } ], "number": 4885, "title": "Filtered cluster state still returns stub elements" }
{ "body": "In order to make sure, that only the requested data is returned to the client,\na couple of fixes have been applied in the ClusterState.toXContent() method.\nAlso some tests were added to the yaml test suite\n\nCloses #4885\n", "number": 4889, "review_comments": [], "title": "Cluster state toXContent serialization only returns needed data" }
{ "commits": [ { "message": "Cluster state toXContent serialization only returns needed data\n\nIn order to make sure, that only the requested data is returned to the client,\na couple of fixes have been applied in the ClusterState.toXContent() method.\nAlso some tests were added to the yaml test suite\n\nCloses #4885" } ], "files": [ { "diff": "@@ -16,7 +16,7 @@\n },\n \"metric\" : {\n \"type\" : \"list\",\n- \"options\" : [\"_all\", \"blocks\", \"index_templates\", \"metadata\", \"nodes\", \"routing_table\"],\n+ \"options\" : [\"_all\", \"blocks\", \"metadata\", \"nodes\", \"routing_table\"],\n \"description\" : \"Limit the information returned to the specified metrics\"\n }\n },\n@@ -29,6 +29,10 @@\n \"type\": \"time\",\n \"description\": \"Specify timeout for connection to master\"\n },\n+ \"index_templates\": {\n+ \"type\": \"list\",\n+ \"description\": \"A comma separated list to return specific index templates when returning metadata\"\n+ },\n \"flat_settings\": {\n \"type\": \"boolean\",\n \"description\": \"Return settings in flat format (default: false)\"", "filename": "rest-api-spec/api/cluster.state.json", "status": "modified" }, { "diff": "@@ -0,0 +1,164 @@\n+setup:\n+ - do:\n+ index:\n+ index: testidx\n+ type: testtype\n+ id: testing_document\n+ body:\n+ \"text\" : \"The quick brown fox is brown.\"\n+ - do:\n+ indices.refresh: {}\n+\n+---\n+\"Filtering the cluster state by blocks should return the blocks field even if the response is empty\":\n+ - do:\n+ cluster.state:\n+ metric: [ blocks ] \n+ \n+ - is_true: blocks\n+ - is_false: nodes\n+ - is_false: metadata\n+ - is_false: routing_table\n+ - is_false: routing_nodes\n+ - is_false: allocations\n+ - length: { blocks: 0 }\n+\n+---\n+\"Filtering the cluster state by blocks should return the blocks\":\n+# read only index\n+# TODO: can this cause issues leaving it read only when deleting it in teardown\n+ - do:\n+ indices.put_settings:\n+ index: testidx\n+ body:\n+ index.blocks.read_only: true\n+ - do:\n+ cluster.state:\n+ metric: [ blocks ] \n+\n+ - is_true: blocks\n+ - is_false: nodes\n+ - is_false: metadata\n+ - is_false: routing_table\n+ - is_false: routing_nodes\n+ - is_false: allocations\n+ - length: { blocks: 1 }\n+\n+---\n+\"Filtering the cluster state by nodes only should work\":\n+ - do:\n+ cluster.state:\n+ metric: [ nodes ] \n+ \n+ - is_false: blocks\n+ - is_true: nodes\n+ - is_false: metadata\n+ - is_false: routing_table\n+ - is_false: routing_nodes\n+ - is_false: allocations\n+\n+---\n+\"Filtering the cluster state by metadata only should work\":\n+ - do:\n+ cluster.state:\n+ metric: [ metadata ] \n+ \n+ - is_false: blocks\n+ - is_false: nodes\n+ - is_true: metadata\n+ - is_false: routing_table\n+ - is_false: routing_nodes\n+ - is_false: allocations\n+\n+\n+---\n+\"Filtering the cluster state by routing table only should work\":\n+ - do:\n+ cluster.state:\n+ metric: [ routing_table ] \n+ \n+ - is_false: blocks\n+ - is_false: nodes\n+ - is_false: metadata\n+ - is_true: routing_table\n+ - is_true: routing_nodes\n+ - is_true: allocations\n+\n+\n+---\n+\"Filtering the cluster state for specific index templates should work \":\n+ - do:\n+ indices.put_template:\n+ name: test1\n+ body:\n+ template: test-*\n+ settings:\n+ number_of_shards: 1\n+ \n+ - do:\n+ indices.put_template:\n+ name: test2\n+ body:\n+ template: test-*\n+ settings:\n+ number_of_shards: 2\n+\n+ - do:\n+ indices.put_template:\n+ name: foo\n+ body:\n+ template: foo-*\n+ settings:\n+ number_of_shards: 3\n+ - do:\n+ cluster.state:\n+ metric: [ metadata ]\n+ index_templates: [ test1, test2 ]\n+ \n+ - is_false: blocks\n+ - is_false: nodes\n+ - is_true: metadata\n+ - is_false: routing_table\n+ - is_false: routing_nodes\n+ - is_false: allocations\n+ - is_true: metadata.templates.test1\n+ - is_true: metadata.templates.test2\n+ - is_false: metadata.templates.foo\n+\n+---\n+\"Filtering the cluster state by indices should work in routing table and metadata\":\n+ - do:\n+ index:\n+ index: another\n+ type: type\n+ id: testing_document\n+ body:\n+ \"text\" : \"The quick brown fox is brown.\"\n+ \n+ - do:\n+ indices.refresh: {}\n+\n+ - do:\n+ cluster.state:\n+ metric: [ routing_table, metadata ]\n+ index: [ testidx ]\n+ \n+ - is_false: metadata.indices.another\n+ - is_false: routing_table.indices.another\n+ - is_true: metadata.indices.testidx\n+ - is_true: routing_table.indices.testidx\n+\n+---\n+\"Filtering the cluster state using _all for indices and metrics should work\":\n+ - do:\n+ cluster.state:\n+ metric: [ '_all' ] \n+ index: [ '_all' ] \n+ \n+ - is_true: blocks\n+ - is_true: nodes\n+ - is_true: metadata\n+ - is_true: routing_table\n+ - is_true: routing_nodes\n+ - is_true: allocations\n+", "filename": "rest-api-spec/test/cluster.state/20_filtering.yaml", "status": "added" }, { "diff": "@@ -35,6 +35,7 @@\n import org.elasticsearch.cluster.routing.allocation.AllocationExplanation;\n import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;\n import org.elasticsearch.common.Nullable;\n+import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.collect.ImmutableOpenMap;\n import org.elasticsearch.common.compress.CompressedString;\n import org.elasticsearch.common.io.stream.BytesStreamInput;\n@@ -50,10 +51,7 @@\n import org.elasticsearch.index.shard.ShardId;\n \n import java.io.IOException;\n-import java.util.HashMap;\n-import java.util.List;\n-import java.util.Locale;\n-import java.util.Map;\n+import java.util.*;\n \n /**\n *\n@@ -234,12 +232,14 @@ public String toString() {\n \n @Override\n public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {\n- if (!params.paramAsBoolean(\"filter_nodes\", false)) {\n+ Set<String> metrics = Strings.splitStringByCommaToSet(params.param(\"metric\", \"_all\"));\n+ boolean isAllMetricsOnly = metrics.size() == 1 && metrics.contains(\"_all\");\n+\n+ if (isAllMetricsOnly || metrics.contains(\"nodes\")) {\n builder.field(\"master_node\", nodes().masterNodeId());\n }\n \n- // blocks\n- if (!params.paramAsBoolean(\"filter_blocks\", false)) {\n+ if (isAllMetricsOnly || metrics.contains(\"blocks\")) {\n builder.startObject(\"blocks\");\n \n if (!blocks().global().isEmpty()) {\n@@ -266,7 +266,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n }\n \n // nodes\n- if (!params.paramAsBoolean(\"filter_nodes\", false)) {\n+ if (isAllMetricsOnly || metrics.contains(\"nodes\")) {\n builder.startObject(\"nodes\");\n for (DiscoveryNode node : nodes()) {\n builder.startObject(node.id(), XContentBuilder.FieldCaseConversion.NONE);\n@@ -285,7 +285,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n }\n \n // meta data\n- if (!params.paramAsBoolean(\"filter_metadata\", false)) {\n+ if (isAllMetricsOnly || metrics.contains(\"metadata\")) {\n builder.startObject(\"metadata\");\n \n builder.startObject(\"templates\");\n@@ -371,7 +371,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n }\n \n // routing table\n- if (!params.paramAsBoolean(\"filter_routing_table\", false)) {\n+ if (isAllMetricsOnly || metrics.contains(\"routing_table\")) {\n builder.startObject(\"routing_table\");\n builder.startObject(\"indices\");\n for (IndexRoutingTable indexRoutingTable : routingTable()) {\n@@ -392,7 +392,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n }\n \n // routing nodes\n- if (!params.paramAsBoolean(\"filter_routing_table\", false)) {\n+ if (isAllMetricsOnly || metrics.contains(\"routing_table\")) {\n builder.startObject(\"routing_nodes\");\n builder.startArray(\"unassigned\");\n for (ShardRouting shardRouting : readOnlyRoutingNodes().unassigned()) {\n@@ -413,7 +413,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n builder.endObject();\n }\n \n- if (!params.paramAsBoolean(\"filter_routing_table\", false)) {\n+ if (isAllMetricsOnly || metrics.contains(\"routing_table\")) {\n builder.startArray(\"allocations\");\n for (Map.Entry<ShardId, List<AllocationExplanation.NodeExplanation>> entry : allocationExplanation().explanations().entrySet()) {\n builder.startObject();\n@@ -435,7 +435,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n builder.endArray();\n }\n \n- if (!params.paramAsBoolean(\"filter_customs\", false)) {\n+ if (isAllMetricsOnly || metrics.contains(\"customs\")) {\n for (ObjectObjectCursor<String, Custom> cursor : customs) {\n builder.startObject(cursor.key);\n lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params);", "filename": "src/main/java/org/elasticsearch/cluster/ClusterState.java", "status": "modified" } ] }
{ "body": "Only including the `matches` key can be cumbersome for anyone parsing the percolator response. Also it is inconsistent with the search api, where the `hits` key is always returned regardless if there are any hits.\n\nSo we should always include the `matches` key in the percolator response.\n", "comments": [], "number": 4881, "title": "Percolator response should always return the `matches` key" }
{ "body": "Relates to #4881\n", "number": 4882, "review_comments": [], "title": "Percolator response always returns the `matches` key." }
{ "commits": [ { "message": "Percolator response now always returns the `matches` key.\n\nCloses #4881" } ], "files": [ { "diff": "@@ -0,0 +1,31 @@\n+---\n+\"Basic percolation tests on an empty cluster\":\n+\n+ - do:\n+ indices.create:\n+ index: test_index\n+\n+ - do:\n+ indices.refresh: {}\n+\n+ - do:\n+ percolate:\n+ index: test_index\n+ type: test_type\n+ body:\n+ doc:\n+ foo: bar\n+\n+ - match: {'total': 0}\n+ - match: {'matches': []}\n+\n+ - do:\n+ count_percolate:\n+ index: test_index\n+ type: test_type\n+ body:\n+ doc:\n+ foo: bar\n+\n+ - is_false: matches\n+ - match: {'total': 0}", "filename": "rest-api-spec/test/percolate/17_empty.yaml", "status": "added" }, { "diff": "@@ -42,7 +42,7 @@\n */\n public class PercolateResponse extends BroadcastOperationResponse implements Iterable<PercolateResponse.Match>, ToXContent {\n \n- private static final Match[] EMPTY = new Match[0];\n+ public static final Match[] EMPTY = new Match[0];\n \n private long tookInMillis;\n private Match[] matches;\n@@ -60,10 +60,10 @@ public PercolateResponse(int totalShards, int successfulShards, int failedShards\n this.aggregations = aggregations;\n }\n \n- public PercolateResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures, long tookInMillis) {\n+ public PercolateResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures, long tookInMillis, Match[] matches) {\n super(totalShards, successfulShards, failedShards, shardFailures);\n this.tookInMillis = tookInMillis;\n- this.matches = EMPTY;\n+ this.matches = matches;\n }\n \n PercolateResponse() {\n@@ -116,7 +116,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n RestActions.buildBroadcastShardsHeader(builder, this);\n \n builder.field(Fields.TOTAL, count);\n- if (matches.length != 0) {\n+ if (matches != null) {\n builder.startArray(Fields.MATCHES);\n boolean justIds = \"ids\".equals(params.param(\"percolate_format\"));\n if (justIds) {", "filename": "src/main/java/org/elasticsearch/action/percolate/PercolateResponse.java", "status": "modified" }, { "diff": "@@ -155,7 +155,8 @@ public static PercolateResponse reduce(PercolateRequest request, AtomicReference\n \n if (shardResults == null) {\n long tookInMillis = System.currentTimeMillis() - request.startTime;\n- return new PercolateResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures, tookInMillis);\n+ PercolateResponse.Match[] matches = request.onlyCount() ? null : PercolateResponse.EMPTY;\n+ return new PercolateResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures, tookInMillis, matches);\n } else {\n PercolatorService.ReduceResult result = percolatorService.reduce(percolatorTypeId, shardResults);\n long tookInMillis = System.currentTimeMillis() - request.startTime;", "filename": "src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java", "status": "modified" }, { "diff": "@@ -811,7 +811,7 @@ public final static class ReduceResult {\n \n public ReduceResult(long count, InternalFacets reducedFacets, InternalAggregations reducedAggregations) {\n this.count = count;\n- this.matches = EMPTY;\n+ this.matches = null;\n this.reducedFacets = reducedFacets;\n this.reducedAggregations = reducedAggregations;\n }", "filename": "src/main/java/org/elasticsearch/percolator/PercolatorService.java", "status": "modified" } ] }
{ "body": "I'm trying to create a date_histogram for recent events, where days where no events happen are still shown.\n\n```\n{\n \"aggs\": {\n \"events_last_week\": {\n \"filter\": {\n \"range\": {\n \"@timestamp\": {\n \"from\": \"2014-01-10\"\n }\n }\n },\n \"aggs\": {\n \"events_last_week_histogram\": {\n \"date_histogram\": {\n \"min_doc_count\": 0,\n \"field\": \"@timestamp\",\n \"format\": \"yyyy-MM-dd\",\n \"interval\": \"1d\"\n }\n }\n }\n }\n }\n}\n```\n\nI get a response like this\n\n```\n\"aggregations\": {\n \"events_last_week\": {\n \"doc_count\": 33861,\n \"events_last_week_histogram\": [\n {\n \"key_as_string\": \"2014-01-10\",\n \"key\": 1389744000000,\n \"doc_count\": 2120\n }, {\n \"key_as_string\": \"2014-01-16\",\n \"key\": 1389830400000,\n \"doc_count\": 3823\n }, {\n \"key_as_string\": \"2014-01-17\",\n \"key\": 1389916800000,\n \"doc_count\": 27918\n }\n ]\n }\n}\n```\n\nThe empty days are not returned. If I construct the query without the filter, the empty days are returned correctly.\n\nThere is also an issue even when the empty days are returned correctly without the filter. If, for example, today is \"2014-01-22\", and the latest timestamp in my data is \"2014-01-17\", then the 5 days between these two dates are not returned as empty buckets, though all the empty buckets prior to \"2014-01-17\" are returned correctly.\n", "comments": [ { "body": "@cmaitchison\n\nI can't really reproduce it, I ran the same queries as you and I get the right responses. What es version are you working with? we introduced `min_doc_count` on `1.0.0.RC1`\n\n> There is also an issue even when the empty days are returned correctly without the filter. If, for example, today is \"2014-01-22\", and the latest timestamp in my data is \"2014-01-17\", then the 5 days between these two dates are not returned as empty buckets, though all the empty buckets prior to \"2014-01-17\" are returned correctly.\n\nthe gaps that are filled are based on the dates in the documents you're aggregating... so the first histogram bucket will be based on the earliest date in the document set and the last bucket will be based on the latest date in the set... then we fill in all gaps between these two buckets.\n\nwe can consider adding a \"range\" settings to the histograms which will enable to define the value range (or date range in case of `date_histogram`) on which the buckets will be created. In your case, that'll mean that if you define a range of the form `\"range\": { \"to\" : \"now\" }` along with `\"min_doc_count\" : 0` we'll return all the empty buckets until `now` (beyond the dates in the document set)\n", "created_at": "2014-01-22T10:27:13Z" }, { "body": "@cmaitchison scratch that... I finally managed to reproduce it (it happens when you have a single shard)... will work on a fix\n", "created_at": "2014-01-22T10:31:54Z" }, { "body": "Wow, nice find! I would never have thought to have mentioned that.\n\n> On 22 Jan 2014, at 21:32, uboness notifications@github.com wrote:\n> \n> @cmaitchison scratch that... I finally managed to reproduce it (it happens when you have a single shard)... will work on a fix\n> \n> —\n> Reply to this email directly or view it on GitHub.\n", "created_at": "2014-01-22T10:53:55Z" }, { "body": "Also related to this title, I've found that `min_doc_count=0` does not work if _all_ of the buckets would be empty after applying the filter. I can reproduce this issue on an index with 2 shards.\n\n```\n{\n \"aggs\": {\n \"filtered_events\": {\n \"filter\": {\n \"and\": [\n {\n \"range\": {\n \"@timestamp\": {\n \"from\": 1390267500000,\n \"to\": 1390267560000\n }\n }\n }\n ]\n },\n \"aggs\": {\n \"filtered_events_histogram\": {\n \"date_histogram\": {\n \"min_doc_count\": 0,\n \"field\": \"@timestamp\",\n \"interval\": \"1s\"\n }\n }\n }\n }\n }\n}\n```\n\nThe above query should return 60 results, 1 for each second in the minute. If any events are found in that minute then 60 results are returned. If no events are found in that minute then 0 results are returned, when you would expect 60 empty buckets.\n\nMy use case is zooming in on a series on a chart. The zero value results are very helpful to know where to plot the zeros on the x-axis.\n", "created_at": "2014-01-23T05:17:41Z" }, { "body": "Another related issue I am finding is that sometimes the intervals do not go back far enough.\n\n```\n{\n \"aggs\": {\n \"events_last_week\": {\n \"filter\": {\n \"and\": [\n {\n \"range\": {\n \"@timestamp\": {\n \"from\": 1390267432894,\n \"to\": 1390267547037\n }\n }\n }\n ]\n },\n \"aggs\": {\n \"events_last_week_histogram\": {\n \"date_histogram\": {\n \"min_doc_count\": 0,\n \"field\": \"@timestamp\",\n \"interval\": \"second\"\n }\n }\n }\n }\n }\n}\n```\n\nreturns exactly\n\n```\n{\n \"aggregations\": {\n \"events_last_week\": {\n \"doc_count\": 1099,\n \"events_last_week_histogram\": [\n {\n \"key\": 1390267526000,\n \"doc_count\": 12\n },\n {\n \"key\": 1390267527000,\n \"doc_count\": 0\n },\n {\n \"key\": 1390267528000,\n \"doc_count\": 29\n },\n {\n \"key\": 1390267529000,\n \"doc_count\": 32\n },\n {\n \"key\": 1390267530000,\n \"doc_count\": 58\n },\n {\n \"key\": 1390267531000,\n \"doc_count\": 64\n },\n {\n \"key\": 1390267532000,\n \"doc_count\": 35\n },\n {\n \"key\": 1390267533000,\n \"doc_count\": 36\n },\n {\n \"key\": 1390267534000,\n \"doc_count\": 43\n },\n {\n \"key\": 1390267535000,\n \"doc_count\": 52\n },\n {\n \"key\": 1390267536000,\n \"doc_count\": 58\n },\n {\n \"key\": 1390267537000,\n \"doc_count\": 62\n },\n {\n \"key\": 1390267538000,\n \"doc_count\": 76\n },\n {\n \"key\": 1390267539000,\n \"doc_count\": 70\n },\n {\n \"key\": 1390267540000,\n \"doc_count\": 53\n },\n {\n \"key\": 1390267541000,\n \"doc_count\": 72\n },\n {\n \"key\": 1390267542000,\n \"doc_count\": 81\n },\n {\n \"key\": 1390267543000,\n \"doc_count\": 48\n },\n {\n \"key\": 1390267544000,\n \"doc_count\": 88\n },\n {\n \"key\": 1390267545000,\n \"doc_count\": 45\n },\n {\n \"key\": 1390267546000,\n \"doc_count\": 83\n },\n {\n \"key\": 1390267547000,\n \"doc_count\": 2\n }\n ]\n }\n }\n}\n```\n\nBut it is missing all of the empty buckets between 1390267432894 and 1390267526000. Again, this is with a 2 shard index on 1.0.0RC1.\n", "created_at": "2014-01-23T05:59:27Z" }, { "body": "@cmaitchison as I mentioned above, the histogram operates on the dataset and extracts the min/max of the histogram from the documents (the earliest/latest). There is no direct relations between the filter aggregation and the histogram aggregations (aggregations are unaware of other aggregations in their hierarchy). We could potentially add a `range` feature to histogram, but if we do it'll have to be post 1.0.\n\nIn the first example you gave, there are no documents in that minute, there are no buckets (as we can't determine the min/max values). For the second example, it might be that the first document in the doc set has a later timestamp than the `from` one in the filter.\n", "created_at": "2014-01-23T15:41:03Z" }, { "body": "Thanks, @uboness, for your help and excellent explanation. `range` on histogram is definitely a feature I would use. For now I can fill in the gaps on the client-side. Thanks again.\n", "created_at": "2014-01-23T21:13:08Z" }, { "body": "@cmaitchison no worries... thank you for the bug report! important one!\n", "created_at": "2014-01-23T21:18:54Z" }, { "body": "I'm interested in hard range boundaries (returning empty buckets to fill gaps between from and to in the case of missing documents) as well. Is there an issue tracking this, or shall I raise one?\n", "created_at": "2014-01-28T18:22:17Z" }, { "body": "For anyone who arrived to this thread via Google, hard ranges is supported via the extended_bounds param. http://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html\n", "created_at": "2015-04-28T02:13:54Z" }, { "body": "I'm now experiencing the same issue as reported running es 1.6.0 \n\n```\nhistogram = {\n invervals: {\n date_histogram: {\n field: 'called_at',\n interval: 'day',\n order: { _key: \"asc\" },\n min_doc_count: 0 # doesn't appear to have any impact on the final result.\n },\n aggs: stats\n }\n}\n```\n", "created_at": "2015-06-18T14:40:27Z" }, { "body": "it looks like when nesting a date_histogram within a term aggregation there is no way for the min_doc_count to auto fill the zero results.\n\n```\naggs: {\n groups: {\n terms: {\n min_doc_count: 0\n script: '...'\n },\n aggs: {\n invervals: {\n date_histogram: {\n field: 'called_at',\n interval: 'day',\n order: { _key: \"asc\" },\n min_doc_count: 0 # doesn't appear to have any impact on the final result.\n },\n aggs: stats\n }\n }\n}\n```\n", "created_at": "2015-06-18T15:47:14Z" }, { "body": "@taf2 please could you open an issue with a complete recreation which explains the problem?\n", "created_at": "2015-06-18T19:49:24Z" }, { "body": "Is this bug still there? I am trying to do the same exact thing as the OP right now.", "created_at": "2020-04-08T18:45:03Z" }, { "body": "me too! :)", "created_at": "2020-04-20T23:03:30Z" }, { "body": "And me either. :)", "created_at": "2020-08-12T12:34:30Z" }, { "body": "Hi, i found the same issue but it could be workaround adding the object extended_bounds to the date_histogram aggregation, something like this:\r\n\r\n{\"extended_bounds\":{\"min\":\"+timeInit+\",\"max\":\"+timeFin+\"}} where timeInit and timeFin are the same period specified in the range filter in miliseconds\r\n\r\nI hope this can help somebody.", "created_at": "2020-11-30T12:37:25Z" } ], "number": 4843, "title": "min_doc_count=0 doesn't work with a date_histogram with a filter" }
{ "body": "...ard, the reduce call was not propagated properly down the agg hierarchy.\n\nClosed: #4843\n", "number": 4869, "review_comments": [ { "body": "should we tests on several shards as well?\n", "created_at": "2014-01-23T16:53:43Z" } ], "title": "Fixed an issue where there are sub aggregations executing on a single shard" }
{ "commits": [ { "message": "Fixed an issue where there are sug aggregations executing on a single shard, the reduce call was not propagated properly down the agg hierarchy.\n\nClosed: #4843" } ], "files": [ { "diff": "@@ -115,15 +115,15 @@ public <A extends Aggregation> A get(String name) {\n /**\n * Reduces the given lists of addAggregation.\n *\n- * @param aggregationsList A list of addAggregation to reduce\n+ * @param aggregationsList A list of aggregation to reduce\n * @return The reduced addAggregation\n */\n public static InternalAggregations reduce(List<InternalAggregations> aggregationsList, CacheRecycler cacheRecycler) {\n if (aggregationsList.isEmpty()) {\n return null;\n }\n \n- // first we collect all addAggregation of the same type and list them together\n+ // first we collect all aggregations of the same type and list them together\n \n Map<String, List<InternalAggregation>> aggByName = new HashMap<String, List<InternalAggregation>>();\n for (InternalAggregations aggregations : aggregationsList) {\n@@ -150,6 +150,17 @@ public static InternalAggregations reduce(List<InternalAggregations> aggregation\n return result;\n }\n \n+ /**\n+ * Reduces this aggregations, effectively propagates the reduce to all the sub aggregations\n+ * @param cacheRecycler\n+ */\n+ public void reduce(CacheRecycler cacheRecycler) {\n+ for (int i = 0; i < aggregations.size(); i++) {\n+ InternalAggregation aggregation = aggregations.get(i);\n+ aggregations.set(i, aggregation.reduce(new InternalAggregation.ReduceContext(ImmutableList.of(aggregation), cacheRecycler)));\n+ }\n+ }\n+\n /** The fields required to write this addAggregation to xcontent */\n static class Fields {\n public static final XContentBuilderString AGGREGATIONS = new XContentBuilderString(\"aggregations\");", "filename": "src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java", "status": "modified" }, { "diff": "@@ -64,7 +64,9 @@ public InternalAggregations getAggregations() {\n public InternalAggregation reduce(ReduceContext reduceContext) {\n List<InternalAggregation> aggregations = reduceContext.aggregations();\n if (aggregations.size() == 1) {\n- return aggregations.get(0);\n+ B reduced = ((B) aggregations.get(0));\n+ reduced.aggregations.reduce(reduceContext.cacheRecycler());\n+ return reduced;\n }\n B reduced = null;\n List<InternalAggregations> subAggregationsList = new ArrayList<InternalAggregations>(aggregations.size());", "filename": "src/main/java/org/elasticsearch/search/aggregations/bucket/SingleBucketAggregation.java", "status": "modified" }, { "diff": "@@ -26,15 +26,14 @@\n \n /**\n * Creates an aggregation based on bucketing points into GeoHashes\n- *\n */\n public class GeoHashGridBuilder extends AggregationBuilder<GeoHashGridBuilder> {\n \n \n private String field;\n- private int precision=GeoHashGridParser.DEFAULT_PRECISION;\n- private int requiredSize=GeoHashGridParser.DEFAULT_MAX_NUM_CELLS;\n- private int shardSize=0;\n+ private int precision = GeoHashGridParser.DEFAULT_PRECISION;\n+ private int requiredSize = GeoHashGridParser.DEFAULT_MAX_NUM_CELLS;\n+ private int shardSize = 0;\n \n public GeoHashGridBuilder(String name) {\n super(name, InternalGeoHashGrid.TYPE.name());\n@@ -46,18 +45,19 @@ public GeoHashGridBuilder field(String field) {\n }\n \n public GeoHashGridBuilder precision(int precision) {\n- if((precision<1)||(precision>12))\n- {\n- throw new ElasticsearchIllegalArgumentException(\"Invalid geohash aggregation precision of \"+precision\n- +\"must be between 1 and 12\");\n+ if ((precision < 1) || (precision > 12)) {\n+ throw new ElasticsearchIllegalArgumentException(\"Invalid geohash aggregation precision of \" + precision\n+ + \"must be between 1 and 12\");\n }\n this.precision = precision;\n return this;\n }\n+\n public GeoHashGridBuilder size(int requiredSize) {\n this.requiredSize = requiredSize;\n return this;\n }\n+\n public GeoHashGridBuilder shardSize(int shardSize) {\n this.shardSize = shardSize;\n return this;", "filename": "src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridBuilder.java", "status": "modified" }, { "diff": "@@ -102,7 +102,10 @@ public int compareTo(Bucket other) {\n }\n public Bucket reduce(List<? extends Bucket> buckets, CacheRecycler cacheRecycler) {\n if (buckets.size() == 1) {\n- return buckets.get(0);\n+ // we still need to reduce the sub aggs\n+ Bucket bucket = buckets.get(0);\n+ bucket.aggregations.reduce(cacheRecycler);\n+ return bucket;\n }\n Bucket reduced = null;\n List<InternalAggregations> aggregationsList = new ArrayList<InternalAggregations>(buckets.size());\n@@ -166,7 +169,7 @@ public InternalGeoHashGrid reduce(ReduceContext reduceContext) {\n List<InternalAggregation> aggregations = reduceContext.aggregations();\n if (aggregations.size() == 1) {\n InternalGeoHashGrid grid = (InternalGeoHashGrid) aggregations.get(0);\n- grid.trimExcessEntries();\n+ grid.trimExcessEntries(reduceContext.cacheRecycler());\n return grid;\n }\n InternalGeoHashGrid reduced = null;\n@@ -227,21 +230,14 @@ public int getNumberOfBuckets() {\n }\n \n \n- protected void trimExcessEntries() {\n- if (requiredSize >= buckets.size()) {\n- return;\n- }\n-\n- if (buckets instanceof List) {\n- buckets = ((List) buckets).subList(0, requiredSize);\n- return;\n- }\n-\n+ protected void trimExcessEntries(CacheRecycler cacheRecycler) {\n int i = 0;\n for (Iterator<Bucket> iter = buckets.iterator(); iter.hasNext();) {\n- iter.next();\n+ Bucket bucket = iter.next();\n if (i++ >= requiredSize) {\n iter.remove();\n+ } else {\n+ bucket.aggregations.reduce(cacheRecycler);\n }\n }\n }", "filename": "src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java", "status": "modified" }, { "diff": "@@ -77,7 +77,10 @@ public Aggregations getAggregations() {\n \n Bucket reduce(List<Bucket> buckets, CacheRecycler cacheRecycler) {\n if (buckets.size() == 1) {\n- return buckets.get(0);\n+ // we only need to reduce the sub aggregations\n+ Bucket bucket = buckets.get(0);\n+ bucket.aggregations.reduce(cacheRecycler);\n+ return bucket;\n }\n List<InternalAggregations> aggregations = new ArrayList<InternalAggregations>(buckets.size());\n Bucket reduced = null;\n@@ -172,21 +175,27 @@ public InternalAggregation reduce(ReduceContext reduceContext) {\n List<InternalAggregation> aggregations = reduceContext.aggregations();\n if (aggregations.size() == 1) {\n \n+ AbstractHistogramBase<B> histo = (AbstractHistogramBase<B>) aggregations.get(0);\n+\n if (minDocCount == 1) {\n- return aggregations.get(0);\n+ for (B bucket : histo.buckets) {\n+ ((Bucket) bucket).aggregations.reduce(reduceContext.cacheRecycler());\n+ }\n+ return histo;\n }\n \n- AbstractHistogramBase histo = (AbstractHistogramBase) aggregations.get(0);\n+\n CollectionUtil.introSort(histo.buckets, order.asc ? InternalOrder.KEY_ASC.comparator() : InternalOrder.KEY_DESC.comparator());\n- List<HistogramBase.Bucket> list = order.asc ? histo.buckets : Lists.reverse(histo.buckets);\n+ List<B> list = order.asc ? histo.buckets : Lists.reverse(histo.buckets);\n HistogramBase.Bucket prevBucket = null;\n- ListIterator<HistogramBase.Bucket> iter = list.listIterator();\n+ ListIterator<B> iter = list.listIterator();\n if (minDocCount == 0) {\n // we need to fill the gaps with empty buckets\n while (iter.hasNext()) {\n // look ahead on the next bucket without advancing the iter\n // so we'll be able to insert elements at the right position\n HistogramBase.Bucket nextBucket = list.get(iter.nextIndex());\n+ ((Bucket) nextBucket).aggregations.reduce(reduceContext.cacheRecycler());\n if (prevBucket != null) {\n long key = emptyBucketInfo.rounding.nextRoundingValue(prevBucket.getKey());\n while (key != nextBucket.getKey()) {\n@@ -198,8 +207,11 @@ public InternalAggregation reduce(ReduceContext reduceContext) {\n }\n } else {\n while (iter.hasNext()) {\n- if (iter.next().getDocCount() < minDocCount) {\n+ Bucket bucket = (Bucket) iter.next();\n+ if (bucket.getDocCount() < minDocCount) {\n iter.remove();\n+ } else {\n+ bucket.aggregations.reduce(reduceContext.cacheRecycler());\n }\n }\n }", "filename": "src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AbstractHistogramBase.java", "status": "modified" }, { "diff": "@@ -86,7 +86,10 @@ public Aggregations getAggregations() {\n \n Bucket reduce(List<Bucket> ranges, CacheRecycler cacheRecycler) {\n if (ranges.size() == 1) {\n- return ranges.get(0);\n+ // we stil need to call reduce on all the sub aggregations\n+ Bucket bucket = ranges.get(0);\n+ bucket.aggregations.reduce(cacheRecycler);\n+ return bucket;\n }\n Bucket reduced = null;\n List<InternalAggregations> aggregationsList = Lists.newArrayListWithCapacity(ranges.size());\n@@ -196,7 +199,11 @@ public List<B> buckets() {\n public AbstractRangeBase reduce(ReduceContext reduceContext) {\n List<InternalAggregation> aggregations = reduceContext.aggregations();\n if (aggregations.size() == 1) {\n- return (AbstractRangeBase) aggregations.get(0);\n+ AbstractRangeBase<B> reduced = (AbstractRangeBase<B>) aggregations.get(0);\n+ for (B bucket : reduced.buckets()) {\n+ ((Bucket) bucket).aggregations.reduce(reduceContext.cacheRecycler());\n+ }\n+ return reduced;\n }\n List<List<Bucket>> rangesList = null;\n for (InternalAggregation aggregation : aggregations) {", "filename": "src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBase.java", "status": "modified" }, { "diff": "@@ -107,7 +107,7 @@ public InternalTerms reduce(ReduceContext reduceContext) {\n List<InternalAggregation> aggregations = reduceContext.aggregations();\n if (aggregations.size() == 1) {\n InternalTerms terms = (InternalTerms) aggregations.get(0);\n- terms.trimExcessEntries();\n+ terms.trimExcessEntries(reduceContext.cacheRecycler());\n return terms;\n }\n InternalTerms reduced = null;", "filename": "src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java", "status": "modified" }, { "diff": "@@ -63,7 +63,9 @@ public Aggregations getAggregations() {\n \n public Bucket reduce(List<? extends Bucket> buckets, CacheRecycler cacheRecycler) {\n if (buckets.size() == 1) {\n- return buckets.get(0);\n+ Bucket bucket = buckets.get(0);\n+ bucket.aggregations.reduce(cacheRecycler);\n+ return bucket;\n }\n Bucket reduced = null;\n List<InternalAggregations> aggregationsList = new ArrayList<InternalAggregations>(buckets.size());\n@@ -124,12 +126,11 @@ public InternalTerms reduce(ReduceContext reduceContext) {\n List<InternalAggregation> aggregations = reduceContext.aggregations();\n if (aggregations.size() == 1) {\n InternalTerms terms = (InternalTerms) aggregations.get(0);\n- terms.trimExcessEntries();\n+ terms.trimExcessEntries(reduceContext.cacheRecycler());\n return terms;\n }\n- InternalTerms reduced = null;\n \n- // TODO: would it be better to use a hppc map and then directly work on the backing array instead of using a PQ?\n+ InternalTerms reduced = null;\n \n Map<Text, List<InternalTerms.Bucket>> buckets = null;\n for (InternalAggregation aggregation : aggregations) {\n@@ -175,14 +176,15 @@ public InternalTerms reduce(ReduceContext reduceContext) {\n return reduced;\n }\n \n- final void trimExcessEntries() {\n+ final void trimExcessEntries(CacheRecycler cacheRecycler) {\n final List<Bucket> newBuckets = Lists.newArrayList();\n for (Bucket b : buckets) {\n if (newBuckets.size() >= requiredSize) {\n break;\n }\n if (b.docCount >= minDocCount) {\n newBuckets.add(b);\n+ b.aggregations.reduce(cacheRecycler);\n }\n }\n buckets = newBuckets;", "filename": "src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java", "status": "modified" }, { "diff": "@@ -104,7 +104,7 @@ public InternalTerms reduce(ReduceContext reduceContext) {\n List<InternalAggregation> aggregations = reduceContext.aggregations();\n if (aggregations.size() == 1) {\n InternalTerms terms = (InternalTerms) aggregations.get(0);\n- terms.trimExcessEntries();\n+ terms.trimExcessEntries(reduceContext.cacheRecycler());\n return terms;\n }\n InternalTerms reduced = null;", "filename": "src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java", "status": "modified" }, { "diff": "@@ -64,14 +64,14 @@ private IndexRequestBuilder indexCity(String name, String latLon) throws Excepti\n source = source.endObject();\n return client().prepareIndex(\"idx\", \"type\").setSource(source);\n }\n- \n-\n- ObjectIntMap<String>expectedDocCountsForGeoHash=null;\n- int highestPrecisionGeohash=12;\n- int numRandomPoints=100;\n- \n- String smallestGeoHash=null;\n- \n+\n+\n+ ObjectIntMap<String> expectedDocCountsForGeoHash = null;\n+ int highestPrecisionGeohash = 12;\n+ int numRandomPoints = 100;\n+\n+ String smallestGeoHash = null;\n+\n @Before\n public void init() throws Exception {\n prepareCreate(\"idx\")\n@@ -82,24 +82,24 @@ public void init() throws Exception {\n \n List<IndexRequestBuilder> cities = new ArrayList<IndexRequestBuilder>();\n Random random = getRandom();\n- expectedDocCountsForGeoHash=new ObjectIntOpenHashMap<String>(numRandomPoints*2);\n+ expectedDocCountsForGeoHash = new ObjectIntOpenHashMap<String>(numRandomPoints * 2);\n for (int i = 0; i < numRandomPoints; i++) {\n //generate random point\n- double lat=(180d*random.nextDouble())-90d;\n- double lng=(360d*random.nextDouble())-180d;\n- String randomGeoHash=GeoHashUtils.encode(lat, lng,highestPrecisionGeohash);\n+ double lat = (180d * random.nextDouble()) - 90d;\n+ double lng = (360d * random.nextDouble()) - 180d;\n+ String randomGeoHash = GeoHashUtils.encode(lat, lng, highestPrecisionGeohash);\n //Index at the highest resolution\n- cities.add(indexCity(randomGeoHash, lat+\", \"+lng));\n- expectedDocCountsForGeoHash.put(randomGeoHash, expectedDocCountsForGeoHash.getOrDefault(randomGeoHash, 0)+1); \n+ cities.add(indexCity(randomGeoHash, lat + \", \" + lng));\n+ expectedDocCountsForGeoHash.put(randomGeoHash, expectedDocCountsForGeoHash.getOrDefault(randomGeoHash, 0) + 1);\n //Update expected doc counts for all resolutions..\n- for (int precision = highestPrecisionGeohash-1; precision >0; precision--) {\n- String hash=GeoHashUtils.encode(lat, lng,precision);\n- if((smallestGeoHash==null)||(hash.length()<smallestGeoHash.length())) {\n- smallestGeoHash=hash;\n+ for (int precision = highestPrecisionGeohash - 1; precision > 0; precision--) {\n+ String hash = GeoHashUtils.encode(lat, lng, precision);\n+ if ((smallestGeoHash == null) || (hash.length() < smallestGeoHash.length())) {\n+ smallestGeoHash = hash;\n }\n- expectedDocCountsForGeoHash.put(hash, expectedDocCountsForGeoHash.getOrDefault(hash, 0)+1); \n+ expectedDocCountsForGeoHash.put(hash, expectedDocCountsForGeoHash.getOrDefault(hash, 0) + 1);\n }\n- } \n+ }\n indexRandom(true, cities);\n }\n \n@@ -111,23 +111,24 @@ public void simple() throws Exception {\n .addAggregation(geohashGrid(\"geohashgrid\")\n .field(\"location\")\n .precision(precision)\n- )\n- .execute().actionGet();\n+ )\n+ .execute().actionGet();\n \n assertThat(response.getFailedShards(), equalTo(0));\n \n GeoHashGrid geoGrid = response.getAggregations().get(\"geohashgrid\");\n- for (GeoHashGrid.Bucket cell : geoGrid ){\n- String geohash=cell.getGeoHash();\n+ for (GeoHashGrid.Bucket cell : geoGrid) {\n+ String geohash = cell.getGeoHash();\n \n- long bucketCount=cell.getDocCount();\n- int expectedBucketCount=expectedDocCountsForGeoHash.get(geohash);\n+ long bucketCount = cell.getDocCount();\n+ int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash);\n assertNotSame(bucketCount, 0);\n- assertEquals(\"Geohash \"+geohash+\" has wrong doc count \",\n- expectedBucketCount,bucketCount);\n+ assertEquals(\"Geohash \" + geohash + \" has wrong doc count \",\n+ expectedBucketCount, bucketCount);\n }\n }\n }\n+\n @Test\n public void filtered() throws Exception {\n GeoBoundingBoxFilterBuilder bbox = new GeoBoundingBoxFilterBuilder(\"location\");\n@@ -136,79 +137,79 @@ public void filtered() throws Exception {\n SearchResponse response = client().prepareSearch(\"idx\")\n .addAggregation(\n AggregationBuilders.filter(\"filtered\").filter(bbox)\n- .subAggregation(\n- geohashGrid(\"geohashgrid\")\n- .field(\"location\")\n- .precision(precision)\n- )\n- )\n- .execute().actionGet();\n+ .subAggregation(\n+ geohashGrid(\"geohashgrid\")\n+ .field(\"location\")\n+ .precision(precision)\n+ )\n+ )\n+ .execute().actionGet();\n \n assertThat(response.getFailedShards(), equalTo(0));\n- \n- \n- Filter filter =response.getAggregations().get(\"filtered\");\n \n- GeoHashGrid geoGrid = filter.getAggregations().get(\"geohashgrid\");\n- for (GeoHashGrid.Bucket cell : geoGrid ){\n+\n+ Filter filter = response.getAggregations().get(\"filtered\");\n+\n+ GeoHashGrid geoGrid = filter.getAggregations().get(\"geohashgrid\");\n+ for (GeoHashGrid.Bucket cell : geoGrid) {\n String geohash = cell.getGeoHash();\n- long bucketCount=cell.getDocCount();\n- int expectedBucketCount=expectedDocCountsForGeoHash.get(geohash);\n+ long bucketCount = cell.getDocCount();\n+ int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash);\n assertNotSame(bucketCount, 0);\n assertTrue(\"Buckets must be filtered\", geohash.startsWith(smallestGeoHash));\n- assertEquals(\"Geohash \"+geohash+\" has wrong doc count \",\n- expectedBucketCount,bucketCount);\n- \n+ assertEquals(\"Geohash \" + geohash + \" has wrong doc count \",\n+ expectedBucketCount, bucketCount);\n+\n }\n }\n }\n- \n+\n @Test\n public void unmapped() throws Exception {\n client().admin().cluster().prepareHealth(\"idx_unmapped\").setWaitForYellowStatus().execute().actionGet();\n- \n- \n+\n+\n for (int precision = 1; precision <= highestPrecisionGeohash; precision++) {\n SearchResponse response = client().prepareSearch(\"idx_unmapped\")\n .addAggregation(geohashGrid(\"geohashgrid\")\n .field(\"location\")\n .precision(precision)\n- )\n- .execute().actionGet();\n+ )\n+ .execute().actionGet();\n \n assertThat(response.getFailedShards(), equalTo(0));\n \n GeoHashGrid geoGrid = response.getAggregations().get(\"geohashgrid\");\n assertThat(geoGrid.getNumberOfBuckets(), equalTo(0));\n- } \n+ }\n \n }\n- \n+\n @Test\n public void partiallyUnmapped() throws Exception {\n for (int precision = 1; precision <= highestPrecisionGeohash; precision++) {\n- SearchResponse response = client().prepareSearch(\"idx\",\"idx_unmapped\")\n+ SearchResponse response = client().prepareSearch(\"idx\", \"idx_unmapped\")\n .addAggregation(geohashGrid(\"geohashgrid\")\n .field(\"location\")\n .precision(precision)\n- )\n- .execute().actionGet();\n+ )\n+ .execute().actionGet();\n \n assertThat(response.getFailedShards(), equalTo(0));\n \n GeoHashGrid geoGrid = response.getAggregations().get(\"geohashgrid\");\n- for (GeoHashGrid.Bucket cell : geoGrid ){\n- String geohash=cell.getGeoHash();\n+ for (GeoHashGrid.Bucket cell : geoGrid) {\n+ String geohash = cell.getGeoHash();\n \n- long bucketCount=cell.getDocCount();\n- int expectedBucketCount=expectedDocCountsForGeoHash.get(geohash);\n+ long bucketCount = cell.getDocCount();\n+ int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash);\n assertNotSame(bucketCount, 0);\n- assertEquals(\"Geohash \"+geohash+\" has wrong doc count \",\n- expectedBucketCount,bucketCount);\n+ assertEquals(\"Geohash \" + geohash + \" has wrong doc count \",\n+ expectedBucketCount, bucketCount);\n }\n }\n- } \n- \n+ }\n+\n @Test\n public void testTopMatch() throws Exception {\n for (int precision = 1; precision <= highestPrecisionGeohash; precision++) {\n@@ -218,29 +219,28 @@ public void testTopMatch() throws Exception {\n .size(1)\n .shardSize(100)\n .precision(precision)\n- )\n- .execute().actionGet();\n+ )\n+ .execute().actionGet();\n \n assertThat(response.getFailedShards(), equalTo(0));\n \n GeoHashGrid geoGrid = response.getAggregations().get(\"geohashgrid\");\n //Check we only have one bucket with the best match for that resolution\n assertThat(geoGrid.getNumberOfBuckets(), equalTo(1));\n- for (GeoHashGrid.Bucket cell : geoGrid ){\n- String geohash=cell.getGeoHash();\n- long bucketCount=cell.getDocCount();\n- int expectedBucketCount=0;\n+ for (GeoHashGrid.Bucket cell : geoGrid) {\n+ String geohash = cell.getGeoHash();\n+ long bucketCount = cell.getDocCount();\n+ int expectedBucketCount = 0;\n for (ObjectIntCursor<String> cursor : expectedDocCountsForGeoHash) {\n- if(cursor.key.length()==precision)\n- {\n- expectedBucketCount=Math.max(expectedBucketCount, cursor.value);\n+ if (cursor.key.length() == precision) {\n+ expectedBucketCount = Math.max(expectedBucketCount, cursor.value);\n }\n }\n assertNotSame(bucketCount, 0);\n- assertEquals(\"Geohash \"+geohash+\" has wrong doc count \",\n- expectedBucketCount,bucketCount);\n+ assertEquals(\"Geohash \" + geohash + \" has wrong doc count \",\n+ expectedBucketCount, bucketCount);\n }\n }\n- } \n+ }\n \n }", "filename": "src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java", "status": "modified" }, { "diff": "@@ -0,0 +1,326 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.elasticsearch.search.aggregations.bucket;\n+\n+import org.elasticsearch.action.index.IndexRequestBuilder;\n+import org.elasticsearch.action.search.SearchResponse;\n+import org.elasticsearch.common.geo.GeoHashUtils;\n+import org.elasticsearch.common.settings.ImmutableSettings;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.index.query.FilterBuilders;\n+import org.elasticsearch.index.query.QueryBuilders;\n+import org.elasticsearch.search.aggregations.bucket.filter.Filter;\n+import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGrid;\n+import org.elasticsearch.search.aggregations.bucket.global.Global;\n+import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogram;\n+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;\n+import org.elasticsearch.search.aggregations.bucket.missing.Missing;\n+import org.elasticsearch.search.aggregations.bucket.nested.Nested;\n+import org.elasticsearch.search.aggregations.bucket.range.Range;\n+import org.elasticsearch.search.aggregations.bucket.range.date.DateRange;\n+import org.elasticsearch.search.aggregations.bucket.range.ipv4.IPv4Range;\n+import org.elasticsearch.search.aggregations.bucket.terms.Terms;\n+import org.elasticsearch.test.ElasticsearchIntegrationTest;\n+import org.junit.Before;\n+import org.junit.Test;\n+\n+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;\n+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;\n+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;\n+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;\n+import static org.hamcrest.Matchers.equalTo;\n+\n+/**\n+ * Tests making sure that the reduce is propagated to all aggregations in the hierarchy when executing on a single shard\n+ * These tests are based on the date histogram in combination of min_doc_count=0. In order for the date histogram to\n+ * compute empty buckets, its {@code reduce()} method must be called. So by adding the date histogram under other buckets,\n+ * we can make sure that the reduce is properly propagated by checking that empty buckets were created.\n+ */\n+@ClusterScope(scope = Scope.TEST)\n+public class SingleShardReduceTests extends ElasticsearchIntegrationTest {\n+\n+ @Override\n+ protected Settings nodeSettings(int nodeOrdinal) {\n+ return ImmutableSettings.builder()\n+ .put(\"index.number_of_shards\", 1)\n+ .put(\"index.number_of_replicas\", randomIntBetween(0, 1))\n+ .build();\n+ }\n+\n+ private IndexRequestBuilder indexDoc(String date, int value) throws Exception {\n+ return client().prepareIndex(\"idx\", \"type\").setSource(jsonBuilder()\n+ .startObject()\n+ .field(\"value\", value)\n+ .field(\"ip\", \"10.0.0.\" + value)\n+ .field(\"location\", GeoHashUtils.encode(52, 5, 12))\n+ .field(\"date\", date)\n+ .field(\"term-l\", 1)\n+ .field(\"term-d\", 1.5)\n+ .field(\"term-s\", \"term\")\n+ .startObject(\"nested\")\n+ .field(\"date\", date)\n+ .endObject()\n+ .endObject());\n+ }\n+\n+ @Before\n+ public void init() throws Exception {\n+ prepareCreate(\"idx\")\n+ .addMapping(\"type\", \"nested\", \"type=nested\", \"ip\", \"type=ip\", \"location\", \"type=geo_point\")\n+ .setSettings(indexSettings())\n+ .execute().actionGet();\n+\n+ indexRandom(true,\n+ indexDoc(\"2014-01-01\", 1),\n+ indexDoc(\"2014-01-02\", 2),\n+ indexDoc(\"2014-01-04\", 3));\n+ ensureSearchable();\n+ }\n+\n+ @Test\n+ public void testGlobal() throws Exception {\n+\n+ SearchResponse response = client().prepareSearch(\"idx\")\n+ .setQuery(QueryBuilders.matchAllQuery())\n+ .addAggregation(global(\"global\")\n+ .subAggregation(dateHistogram(\"histo\").field(\"date\").interval(DateHistogram.Interval.DAY).minDocCount(0)))\n+ .execute().actionGet();\n+\n+ assertSearchResponse(response);\n+\n+ Global global = response.getAggregations().get(\"global\");\n+ DateHistogram histo = global.getAggregations().get(\"histo\");\n+ assertThat(histo.buckets().size(), equalTo(4));\n+ }\n+\n+ @Test\n+ public void testFilter() throws Exception {\n+\n+ SearchResponse response = client().prepareSearch(\"idx\")\n+ .setQuery(QueryBuilders.matchAllQuery())\n+ .addAggregation(filter(\"filter\").filter(FilterBuilders.matchAllFilter())\n+ .subAggregation(dateHistogram(\"histo\").field(\"date\").interval(DateHistogram.Interval.DAY).minDocCount(0)))\n+ .execute().actionGet();\n+\n+ assertSearchResponse(response);\n+\n+ Filter filter = response.getAggregations().get(\"filter\");\n+ DateHistogram histo = filter.getAggregations().get(\"histo\");\n+ assertThat(histo.buckets().size(), equalTo(4));\n+ }\n+\n+ @Test\n+ public void testMissing() throws Exception {\n+\n+ SearchResponse response = client().prepareSearch(\"idx\")\n+ .setQuery(QueryBuilders.matchAllQuery())\n+ .addAggregation(missing(\"missing\").field(\"foobar\")\n+ .subAggregation(dateHistogram(\"histo\").field(\"date\").interval(DateHistogram.Interval.DAY).minDocCount(0)))\n+ .execute().actionGet();\n+\n+ assertSearchResponse(response);\n+\n+ Missing missing = response.getAggregations().get(\"missing\");\n+ DateHistogram histo = missing.getAggregations().get(\"histo\");\n+ assertThat(histo.buckets().size(), equalTo(4));\n+ }\n+\n+ @Test\n+ public void testGlobalWithFilterWithMissing() throws Exception {\n+\n+ SearchResponse response = client().prepareSearch(\"idx\")\n+ .setQuery(QueryBuilders.matchAllQuery())\n+ .addAggregation(global(\"global\")\n+ .subAggregation(filter(\"filter\").filter(FilterBuilders.matchAllFilter())\n+ .subAggregation(missing(\"missing\").field(\"foobar\")\n+ .subAggregation(dateHistogram(\"histo\").field(\"date\").interval(DateHistogram.Interval.DAY).minDocCount(0)))))\n+ .execute().actionGet();\n+\n+ assertSearchResponse(response);\n+\n+ Global global = response.getAggregations().get(\"global\");\n+ Filter filter = global.getAggregations().get(\"filter\");\n+ Missing missing = filter.getAggregations().get(\"missing\");\n+ DateHistogram histo = missing.getAggregations().get(\"histo\");\n+ assertThat(histo.buckets().size(), equalTo(4));\n+ }\n+\n+ @Test\n+ public void testNested() throws Exception {\n+\n+ SearchResponse response = client().prepareSearch(\"idx\")\n+ .setQuery(QueryBuilders.matchAllQuery())\n+ .addAggregation(nested(\"nested\").path(\"nested\")\n+ .subAggregation(dateHistogram(\"histo\").field(\"nested.date\").interval(DateHistogram.Interval.DAY).minDocCount(0)))\n+ .execute().actionGet();\n+\n+ assertSearchResponse(response);\n+\n+ Nested nested = response.getAggregations().get(\"nested\");\n+ DateHistogram histo = nested.getAggregations().get(\"histo\");\n+ assertThat(histo.buckets().size(), equalTo(4));\n+ }\n+\n+ @Test\n+ public void testStringTerms() throws Exception {\n+\n+ SearchResponse response = client().prepareSearch(\"idx\")\n+ .setQuery(QueryBuilders.matchAllQuery())\n+ .addAggregation(terms(\"terms\").field(\"term-s\")\n+ .subAggregation(dateHistogram(\"histo\").field(\"date\").interval(DateHistogram.Interval.DAY).minDocCount(0)))\n+ .execute().actionGet();\n+\n+ assertSearchResponse(response);\n+\n+ Terms terms = response.getAggregations().get(\"terms\");\n+ DateHistogram histo = terms.getByTerm(\"term\").getAggregations().get(\"histo\");\n+ assertThat(histo.buckets().size(), equalTo(4));\n+ }\n+\n+ @Test\n+ public void testLongTerms() throws Exception {\n+\n+ SearchResponse response = client().prepareSearch(\"idx\")\n+ .setQuery(QueryBuilders.matchAllQuery())\n+ .addAggregation(terms(\"terms\").field(\"term-l\")\n+ .subAggregation(dateHistogram(\"histo\").field(\"date\").interval(DateHistogram.Interval.DAY).minDocCount(0)))\n+ .execute().actionGet();\n+\n+ assertSearchResponse(response);\n+\n+ Terms terms = response.getAggregations().get(\"terms\");\n+ DateHistogram histo = terms.getByTerm(\"1\").getAggregations().get(\"histo\");\n+ assertThat(histo.buckets().size(), equalTo(4));\n+ }\n+\n+ @Test\n+ public void testDoubleTerms() throws Exception {\n+\n+ SearchResponse response = client().prepareSearch(\"idx\")\n+ .setQuery(QueryBuilders.matchAllQuery())\n+ .addAggregation(terms(\"terms\").field(\"term-d\")\n+ .subAggregation(dateHistogram(\"histo\").field(\"date\").interval(DateHistogram.Interval.DAY).minDocCount(0)))\n+ .execute().actionGet();\n+\n+ assertSearchResponse(response);\n+\n+ Terms terms = response.getAggregations().get(\"terms\");\n+ DateHistogram histo = terms.getByTerm(\"1.5\").getAggregations().get(\"histo\");\n+ assertThat(histo.buckets().size(), equalTo(4));\n+ }\n+\n+ @Test\n+ public void testRange() throws Exception {\n+\n+ SearchResponse response = client().prepareSearch(\"idx\")\n+ .setQuery(QueryBuilders.matchAllQuery())\n+ .addAggregation(range(\"range\").field(\"value\").addRange(\"r1\", 0, 10)\n+ .subAggregation(dateHistogram(\"histo\").field(\"date\").interval(DateHistogram.Interval.DAY).minDocCount(0)))\n+ .execute().actionGet();\n+\n+ assertSearchResponse(response);\n+\n+ Range range = response.getAggregations().get(\"range\");\n+ DateHistogram histo = range.getByKey(\"r1\").getAggregations().get(\"histo\");\n+ assertThat(histo.buckets().size(), equalTo(4));\n+ }\n+\n+ @Test\n+ public void testDateRange() throws Exception {\n+\n+ SearchResponse response = client().prepareSearch(\"idx\")\n+ .setQuery(QueryBuilders.matchAllQuery())\n+ .addAggregation(dateRange(\"range\").field(\"date\").addRange(\"r1\", \"2014-01-01\", \"2014-01-10\")\n+ .subAggregation(dateHistogram(\"histo\").field(\"date\").interval(DateHistogram.Interval.DAY).minDocCount(0)))\n+ .execute().actionGet();\n+\n+ assertSearchResponse(response);\n+\n+ DateRange range = response.getAggregations().get(\"range\");\n+ DateHistogram histo = range.getByKey(\"r1\").getAggregations().get(\"histo\");\n+ assertThat(histo.buckets().size(), equalTo(4));\n+ }\n+\n+ @Test\n+ public void testIpRange() throws Exception {\n+\n+ SearchResponse response = client().prepareSearch(\"idx\")\n+ .setQuery(QueryBuilders.matchAllQuery())\n+ .addAggregation(ipRange(\"range\").field(\"ip\").addRange(\"r1\", \"10.0.0.1\", \"10.0.0.10\")\n+ .subAggregation(dateHistogram(\"histo\").field(\"date\").interval(DateHistogram.Interval.DAY).minDocCount(0)))\n+ .execute().actionGet();\n+\n+ assertSearchResponse(response);\n+\n+ IPv4Range range = response.getAggregations().get(\"range\");\n+ DateHistogram histo = range.getByKey(\"r1\").getAggregations().get(\"histo\");\n+ assertThat(histo.buckets().size(), equalTo(4));\n+ }\n+\n+ @Test\n+ public void testHistogram() throws Exception {\n+\n+ SearchResponse response = client().prepareSearch(\"idx\")\n+ .setQuery(QueryBuilders.matchAllQuery())\n+ .addAggregation(histogram(\"topHisto\").field(\"value\").interval(5)\n+ .subAggregation(dateHistogram(\"histo\").field(\"date\").interval(DateHistogram.Interval.DAY).minDocCount(0)))\n+ .execute().actionGet();\n+\n+ assertSearchResponse(response);\n+\n+ Histogram topHisto = response.getAggregations().get(\"topHisto\");\n+ DateHistogram histo = topHisto.getByKey(0).getAggregations().get(\"histo\");\n+ assertThat(histo.buckets().size(), equalTo(4));\n+ }\n+\n+ @Test\n+ public void testDateHistogram() throws Exception {\n+\n+ SearchResponse response = client().prepareSearch(\"idx\")\n+ .setQuery(QueryBuilders.matchAllQuery())\n+ .addAggregation(dateHistogram(\"topHisto\").field(\"date\").interval(DateHistogram.Interval.MONTH)\n+ .subAggregation(dateHistogram(\"histo\").field(\"date\").interval(DateHistogram.Interval.DAY).minDocCount(0)))\n+ .execute().actionGet();\n+\n+ assertSearchResponse(response);\n+\n+ DateHistogram topHisto = response.getAggregations().get(\"topHisto\");\n+ DateHistogram histo = topHisto.iterator().next().getAggregations().get(\"histo\");\n+ assertThat(histo.buckets().size(), equalTo(4));\n+ }\n+\n+ @Test\n+ public void testGeoHashGrid() throws Exception {\n+\n+ SearchResponse response = client().prepareSearch(\"idx\")\n+ .setQuery(QueryBuilders.matchAllQuery())\n+ .addAggregation(geohashGrid(\"grid\").field(\"location\")\n+ .subAggregation(dateHistogram(\"histo\").field(\"date\").interval(DateHistogram.Interval.DAY).minDocCount(0)))\n+ .execute().actionGet();\n+\n+ assertSearchResponse(response);\n+\n+ GeoHashGrid grid = response.getAggregations().get(\"grid\");\n+ DateHistogram histo = grid.iterator().next().getAggregations().get(\"histo\");\n+ assertThat(histo.buckets().size(), equalTo(4));\n+ }\n+\n+\n+}", "filename": "src/test/java/org/elasticsearch/search/aggregations/bucket/SingleShardReduceTests.java", "status": "added" } ] }
{ "body": "There are some dups and need more aliases...\n", "comments": [], "number": 4852, "title": "cat headers are inconsistent, incomplete" }
{ "body": "Remove duplicate cache columns, add aliases, normalize header names across actions.\n\nFor example:\n\n```\n==== nodes ====\nid | id,nodeId | unique node id\npid | p | process id\nhost | h | host name\nip | i | ip address\nport | po | bound transport port\nversion | v | es version\nbuild | b | es build hash\njdk | j | jdk version\ndisk.avail | d,disk,diskAvail | available disk space\nheap.percent | hp,heapPercent | used heap ratio\nheap.max | hm,heapMax | max configured heap\nram.percent | rp,ramPercent | used machine memory ratio\nram.max | rm,ramMax | total machine memory\nload | l | most recent load avg\nuptime | u | node uptime\nnode.type | type,dc,nodeType | d:data node, c:client node\nmaster | m | m:master-eligible, *:current master\nname | n | node name\ncompletion.size | cs,completionSize | size of completion\nfielddata.memory_size | fm,fielddataMemory | used fielddata cache\nfielddata.evictions | fe,fielddataEvictions | fielddata evictions\nfilter_cache.memory_size | fcm,filterCacheMemory | used filter cache\n```\n\nCloses #4852\n", "number": 4853, "review_comments": [], "title": "Clean up cat headers" }
{ "commits": [ { "message": "Remove duplicate cache columns" }, { "message": "Normalize cat headers, add aliases\n\nCloses #4852." } ], "files": [ { "diff": "@@ -91,11 +91,11 @@ void documentation(StringBuilder sb) {\n Table getTableWithHeader(RestRequest request) {\n final Table table = new Table();\n table.startHeaders();\n- table.addCell(\"alias\", \"desc:alias name\");\n- table.addCell(\"index\", \"desc:index alias points to\");\n- table.addCell(\"filter\", \"desc:filter\");\n- table.addCell(\"indexRouting\", \"desc:index routing\");\n- table.addCell(\"searchRouting\", \"desc:search routing\");\n+ table.addCell(\"alias\", \"alias:a;desc:alias name\");\n+ table.addCell(\"index\", \"alias:i,idx;desc:index alias points to\");\n+ table.addCell(\"filter\", \"alias:f,fi;desc:filter\");\n+ table.addCell(\"routing.index\", \"alias:ri,routingIndex;desc:index routing\");\n+ table.addCell(\"routing.search\", \"alias:rs,routingSearch;desc:search routing\");\n table.endHeaders();\n return table;\n }", "filename": "src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java", "status": "modified" }, { "diff": "@@ -114,14 +114,14 @@ public void onFailure(Throwable e) {\n Table getTableWithHeader(final RestRequest request) {\n final Table table = new Table();\n table.startHeaders();\n- table.addCell(\"shards\", \"text-align:right;desc:number of shards on node\");\n- table.addCell(\"diskUsed\", \"text-align:right;desc:disk used (total, not just ES)\");\n- table.addCell(\"diskAvail\", \"text-align:right;desc:disk available\");\n- table.addCell(\"diskTotal\", \"text-align:right;desc:total capacity of all volumes\");\n- table.addCell(\"diskPercent\", \"text-align:right;desc:percent disk used\");\n- table.addCell(\"host\", \"desc:host of node\");\n+ table.addCell(\"shards\", \"alias:s;text-align:right;desc:number of shards on node\");\n+ table.addCell(\"disk.used\", \"alias:du,diskUsed;text-align:right;desc:disk used (total, not just ES)\");\n+ table.addCell(\"disk.avail\", \"alias:da,diskAvail;text-align:right;desc:disk available\");\n+ table.addCell(\"disk.total\", \"alias:dt,diskTotal;text-align:right;desc:total capacity of all volumes\");\n+ table.addCell(\"disk.percent\", \"alias:dp,diskPercent;text-align:right;desc:percent disk used\");\n+ table.addCell(\"host\", \"alias:h;desc:host of node\");\n table.addCell(\"ip\", \"desc:ip of node\");\n- table.addCell(\"node\", \"desc:name of node\");\n+ table.addCell(\"node\", \"alias:n;desc:name of node\");\n table.endHeaders();\n return table;\n }", "filename": "src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java", "status": "modified" }, { "diff": "@@ -99,9 +99,9 @@ public void onFailure(Throwable t) {\n Table getTableWithHeader(final RestRequest request) {\n Table table = new Table();\n table.startHeaders();\n- table.addCell(\"epoch\", \"desc:seconds since 1970-01-01 00:00:00, that the count was executed\");\n- table.addCell(\"timestamp\", \"desc:time that the count was executed\");\n- table.addCell(\"count\", \"desc:the document count\");\n+ table.addCell(\"epoch\", \"alias:t,time;desc:seconds since 1970-01-01 00:00:00, that the count was executed\");\n+ table.addCell(\"timestamp\", \"alias:ts,hms;desc:time that the count was executed\");\n+ table.addCell(\"count\", \"alias:dc,docs.count,docsCount;desc:the document count\");\n table.endHeaders();\n return table;\n }", "filename": "src/main/java/org/elasticsearch/rest/action/cat/RestCountAction.java", "status": "modified" }, { "diff": "@@ -82,17 +82,17 @@ public void onFailure(Throwable e) {\n Table getTableWithHeader(final RestRequest request) {\n Table t = new Table();\n t.startHeaders();\n- t.addCell(\"epoch\", \"desc:seconds since 1970-01-01 00:00:00, that the count was executed\");\n- t.addCell(\"timestamp\", \"desc:time that the count was executed\");\n- t.addCell(\"cluster\", \"desc:cluster name\");\n- t.addCell(\"status\", \"desc:health status\");\n- t.addCell(\"nodeTotal\", \"text-align:right;desc:total number of nodes\");\n- t.addCell(\"nodeData\", \"text-align:right;desc:number of nodes that can store data\");\n- t.addCell(\"shards\", \"text-align:right;desc:total number of shards\");\n- t.addCell(\"pri\", \"text-align:right;desc:number of primary shards\");\n- t.addCell(\"relo\", \"text-align:right;desc:number of relocating nodes\");\n- t.addCell(\"init\", \"text-align:right;desc:number of initializing nodes\");\n- t.addCell(\"unassign\", \"text-align:right;desc:number of unassigned shards\");\n+ t.addCell(\"epoch\", \"alias:t,time;desc:seconds since 1970-01-01 00:00:00\");\n+ t.addCell(\"timestamp\", \"alias:ts,hms,hhmmss;desc:time in HH:MM:SS\");\n+ t.addCell(\"cluster\", \"alias:cl;desc:cluster name\");\n+ t.addCell(\"status\", \"alias:st;desc:health status\");\n+ t.addCell(\"node.total\", \"alias:nt,nodeTotal;text-align:right;desc:total number of nodes\");\n+ t.addCell(\"node.data\", \"alias:nd,nodeData;text-align:right;desc:number of nodes that can store data\");\n+ t.addCell(\"shards\", \"alias:t,sh,shards.total,shardsTotal;text-align:right;desc:total number of shards\");\n+ t.addCell(\"pri\", \"alias:p,shards.primary,shardsPrimary;text-align:right;desc:number of primary shards\");\n+ t.addCell(\"relo\", \"alias:r,shards.relocating,shardsRelocating;text-align:right;desc:number of relocating nodes\");\n+ t.addCell(\"init\", \"alias:i,shards.initializing,shardsInitializing;text-align:right;desc:number of initializing nodes\");\n+ t.addCell(\"unassign\", \"alias:u,shards.unassigned,shardsUnassigned;text-align:right;desc:number of unassigned shards\");\n t.endHeaders();\n \n return t;", "filename": "src/main/java/org/elasticsearch/rest/action/cat/RestHealthAction.java", "status": "modified" }, { "diff": "@@ -126,12 +126,12 @@ public void onFailure(Throwable e) {\n Table getTableWithHeader(final RestRequest request) {\n Table table = new Table();\n table.startHeaders();\n- table.addCell(\"health\", \"desc:current health status\");\n- table.addCell(\"index\", \"desc:index name\");\n- table.addCell(\"pri\", \"text-align:right;desc:number of primary shards\");\n- table.addCell(\"rep\", \"text-align:right;desc:number of replica shards\");\n- table.addCell(\"docs.count\", \"alias:docsCount;text-align:right;desc:available docs\");\n- table.addCell(\"docs.deleted\", \"alias:docsDeleted;text-align:right;desc:deleted docs\");\n+ table.addCell(\"health\", \"alias:h;desc:current health status\");\n+ table.addCell(\"index\", \"alias:i,idx;desc:index name\");\n+ table.addCell(\"pri\", \"alias:p,shards.primary,shardsPrimary;text-align:right;desc:number of primary shards\");\n+ table.addCell(\"rep\", \"alias:r,shards.replica,shardsReplica;text-align:right;desc:number of replica shards\");\n+ table.addCell(\"docs.count\", \"alias:dc,docsCount;text-align:right;desc:available docs\");\n+ table.addCell(\"docs.deleted\", \"alias:dd,docsDeleted;text-align:right;desc:deleted docs\");\n \n table.addCell(\"store.size\", \"sibling:pri;alias:ss,storeSize;text-align:right;desc:store size of primaries & replicas\");\n table.addCell(\"pri.store.size\", \"text-align:right;desc:store size of primaries\");", "filename": "src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java", "status": "modified" }, { "diff": "@@ -84,9 +84,9 @@ Table getTableWithHeader(final RestRequest request) {\n Table table = new Table();\n table.startHeaders()\n .addCell(\"id\", \"desc:node id\")\n- .addCell(\"host\", \"desc:host name\")\n+ .addCell(\"host\", \"alias:h;desc:host name\")\n .addCell(\"ip\", \"desc:ip address \")\n- .addCell(\"node\", \"desc:node name\")\n+ .addCell(\"node\", \"alias:n;desc:node name\")\n .endHeaders();\n return table;\n }", "filename": "src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java", "status": "modified" }, { "diff": "@@ -124,30 +124,26 @@ public void onFailure(Throwable e) {\n Table getTableWithHeader(final RestRequest request) {\n Table table = new Table();\n table.startHeaders();\n- table.addCell(\"nodeId\", \"default:false;desc:unique node id\");\n- table.addCell(\"pid\", \"default:false;desc:process id\");\n- table.addCell(\"host\", \"desc:host name\");\n- table.addCell(\"ip\", \"desc:ip address\");\n- table.addCell(\"port\", \"default:false;desc:bound transport port\");\n+ table.addCell(\"id\", \"default:false;alias:id,nodeId;desc:unique node id\");\n+ table.addCell(\"pid\", \"default:false;alias:p;desc:process id\");\n+ table.addCell(\"host\", \"alias:h;desc:host name\");\n+ table.addCell(\"ip\", \"alias:i;desc:ip address\");\n+ table.addCell(\"port\", \"default:false;alias:po;desc:bound transport port\");\n \n table.addCell(\"version\", \"default:false;alias:v;desc:es version\");\n table.addCell(\"build\", \"default:false;alias:b;desc:es build hash\");\n- table.addCell(\"jdk\", \"default:false;desc:jdk version\");\n- table.addCell(\"diskAvail\", \"default:false;text-align:right;desc:available disk space\");\n- table.addCell(\"heapPercent\", \"text-align:right;desc:used heap ratio\");\n- table.addCell(\"heapMax\", \"default:false;text-align:right;desc:max configured heap\");\n- table.addCell(\"ramPercent\", \"text-align:right;desc:used machine memory ratio\");\n- table.addCell(\"ramMax\", \"default:false;text-align:right;desc:total machine memory\");\n-\n- table.addCell(\"fielddata\", \"default:false;text-align:right;desc:used fielddata cache\");\n- table.addCell(\"filter\", \"default:false;text-align:right;desc:used filter cache\");\n- table.addCell(\"idCache\", \"default:false;text-align:right;desc:used id cache\");\n-\n- table.addCell(\"load\", \"text-align:right;desc:most recent load avg\");\n- table.addCell(\"uptime\", \"default:false;text-align:right;desc:node uptime\");\n- table.addCell(\"data/client\", \"desc:d:data node, c:client node\");\n- table.addCell(\"master\", \"desc:m:master-eligible, *:current master\");\n- table.addCell(\"name\", \"desc:node name\");\n+ table.addCell(\"jdk\", \"default:false;alias:j;desc:jdk version\");\n+ table.addCell(\"disk.avail\", \"default:false;alias:d,disk,diskAvail;text-align:right;desc:available disk space\");\n+ table.addCell(\"heap.percent\", \"alias:hp,heapPercent;text-align:right;desc:used heap ratio\");\n+ table.addCell(\"heap.max\", \"default:false;alias:hm,heapMax;text-align:right;desc:max configured heap\");\n+ table.addCell(\"ram.percent\", \"alias:rp,ramPercent;text-align:right;desc:used machine memory ratio\");\n+ table.addCell(\"ram.max\", \"default:false;alias:rm,ramMax;text-align:right;desc:total machine memory\");\n+\n+ table.addCell(\"load\", \"alias:l;text-align:right;desc:most recent load avg\");\n+ table.addCell(\"uptime\", \"default:false;alias:u;text-align:right;desc:node uptime\");\n+ table.addCell(\"node.role\", \"alias:r,role,dc,nodeRole;desc:d:data node, c:client node\");\n+ table.addCell(\"master\", \"alias:m;desc:m:master-eligible, *:current master\");\n+ table.addCell(\"name\", \"alias:n;desc:node name\");\n \n table.addCell(\"completion.size\", \"alias:cs,completionSize;default:false;text-align:right;desc:size of completion\");\n \n@@ -241,10 +237,6 @@ private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoR\n table.addCell(stats == null ? null : stats.getOs().mem() == null ? null : stats.getOs().mem().usedPercent());\n table.addCell(info == null ? null : info.getOs().mem() == null ? null : info.getOs().mem().total()); // sigar fails to load in IntelliJ\n \n- table.addCell(stats == null ? null : stats.getIndices().getFieldData().getMemorySize());\n- table.addCell(stats == null ? null : stats.getIndices().getFilterCache().getMemorySize());\n- table.addCell(stats == null ? null : stats.getIndices().getIdCache().getMemorySize());\n-\n table.addCell(stats == null ? null : stats.getOs() == null ? null : stats.getOs().getLoadAverage().length < 1 ? null : String.format(Locale.ROOT, \"%.2f\", stats.getOs().getLoadAverage()[0]));\n table.addCell(stats == null ? null : stats.getJvm().uptime());\n table.addCell(node.clientNode() ? \"c\" : node.dataNode() ? \"d\" : \"-\");", "filename": "src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java", "status": "modified" }, { "diff": "@@ -77,10 +77,10 @@ public void onFailure(Throwable e) {\n Table getTableWithHeader(final RestRequest request) {\n Table t = new Table();\n t.startHeaders();\n- t.addCell(\"insertOrder\", \"text-align:right;desc:task insertion order\");\n- t.addCell(\"timeInQueue\", \"text-align:right;desc:how long task has been in queue\");\n- t.addCell(\"priority\", \"desc:task priority\");\n- t.addCell(\"source\", \"desc:task source\");\n+ t.addCell(\"insertOrder\", \"alias:o;text-align:right;desc:task insertion order\");\n+ t.addCell(\"timeInQueue\", \"alias:t;text-align:right;desc:how long task has been in queue\");\n+ t.addCell(\"priority\", \"alias:p;desc:task priority\");\n+ t.addCell(\"source\", \"alias:s;desc:task source\");\n t.endHeaders();\n return t;\n }", "filename": "src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java", "status": "modified" }, { "diff": "@@ -134,14 +134,14 @@ public void onFailure(Throwable e) {\n @Override\n Table getTableWithHeader(RestRequest request) {\n Table t = new Table();\n- t.startHeaders().addCell(\"index\", \"desc:index name\")\n- .addCell(\"shard\", \"desc:shard name\")\n- .addCell(\"target\", \"text-align:right;desc:bytes of source shard\")\n- .addCell(\"recovered\", \"text-align:right;desc:bytes recovered so far\")\n- .addCell(\"%\", \"text-align:right;desc:percent recovered so far\")\n- .addCell(\"host\", \"desc:node host where source shard lives\")\n+ t.startHeaders().addCell(\"index\", \"alias:i,idx;desc:index name\")\n+ .addCell(\"shard\", \"alias:s,sh;desc:shard name\")\n+ .addCell(\"target\", \"alias:t;text-align:right;desc:bytes of source shard\")\n+ .addCell(\"recovered\", \"alias:r;text-align:right;desc:bytes recovered so far\")\n+ .addCell(\"percent\", \"alias:per,ratio;text-align:right;desc:percent recovered so far\")\n+ .addCell(\"host\", \"alias:h;desc:node host where source shard lives\")\n .addCell(\"ip\", \"desc:node ip where source shard lives\")\n- .addCell(\"node\", \"desc:node name where source shard lives\")\n+ .addCell(\"node\", \"alias:n;desc:node name where source shard lives\")\n .endHeaders();\n return t;\n }", "filename": "src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java", "status": "modified" }, { "diff": "@@ -105,14 +105,14 @@ public void onFailure(Throwable e) {\n Table getTableWithHeader(final RestRequest request) {\n Table table = new Table();\n table.startHeaders()\n- .addCell(\"index\", \"default:true;desc:index name\")\n- .addCell(\"shard\", \"default:true;desc:shard name\")\n- .addCell(\"prirep\", \"alias:pr,primaryOrReplica;default:true;desc:primary or replica\")\n- .addCell(\"state\", \"default:true;desc:shard state\")\n- .addCell(\"docs\", \"text-align:right;desc:number of docs in shard\")\n- .addCell(\"store\", \"text-align:right;desc:store size of shard (how much disk it uses)\")\n+ .addCell(\"index\", \"default:true;alias:i,idx;desc:index name\")\n+ .addCell(\"shard\", \"default:true;alias:s,sh;desc:shard name\")\n+ .addCell(\"prirep\", \"alias:p,pr,primaryOrReplica;default:true;desc:primary or replica\")\n+ .addCell(\"state\", \"default:true;alias:st;desc:shard state\")\n+ .addCell(\"docs\", \"alias:d,dc;text-align:right;desc:number of docs in shard\")\n+ .addCell(\"store\", \"alias:sto;text-align:right;desc:store size of shard (how much disk it uses)\")\n .addCell(\"ip\", \"default:true;desc:ip of node where it lives\")\n- .addCell(\"node\", \"default:true;desc:name of node where it lives\");\n+ .addCell(\"node\", \"default:true;alias:n;desc:name of node where it lives\");\n \n table.addCell(\"completion.size\", \"alias:cs,completionSize;default:false;text-align:right;desc:size of completion\");\n ", "filename": "src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java", "status": "modified" } ] }
{ "body": "I mistakenly tried to allocate a shard on a node which is not a data-node and this resulted in a cryptical `NullPointerException`. We should throw a meaningful exception instead.\n", "comments": [ { "body": "hey this should go into the 1.0 release branch as well right?\n", "created_at": "2014-01-21T16:11:58Z" }, { "body": "I was just going to ask about it. :-)\n", "created_at": "2014-01-21T16:12:24Z" } ], "number": 4833, "title": "Allocation on a non-data node causes a NPE" }
{ "body": "Today, it would fail with a NullPointerException.\n\nClose #4833\n", "number": 4834, "review_comments": [], "title": "Throw an ElasticsearchIllegalArgumentException when allocating on a non-data node." }
{ "commits": [ { "message": "Throw an ElasticsearchIllegalArgumentException when allocating on a non-data node.\n\nToday, it would fail with a NullPointerException.\n\nClose #4833" } ], "files": [ { "diff": "@@ -21,6 +21,7 @@\n \n import org.elasticsearch.ElasticsearchException;\n import org.elasticsearch.ElasticsearchIllegalArgumentException;\n+import org.elasticsearch.ElasticsearchIllegalStateException;\n import org.elasticsearch.ElasticsearchParseException;\n import org.elasticsearch.cluster.node.DiscoveryNode;\n import org.elasticsearch.cluster.routing.MutableShardRouting;\n@@ -183,6 +184,14 @@ public void execute(RoutingAllocation allocation) throws ElasticsearchException\n }\n \n RoutingNode routingNode = allocation.routingNodes().node(discoNode.id());\n+ if (routingNode == null) {\n+ if (!discoNode.dataNode()) {\n+ throw new ElasticsearchIllegalArgumentException(\"Allocation can only be done on data nodes, not [\" + node + \"]\");\n+ } else {\n+ throw new ElasticsearchIllegalStateException(\"Could not find [\" + node + \"] among the routing nodes\");\n+ }\n+ }\n+\n Decision decision = allocation.deciders().canAllocate(shardRouting, routingNode, allocation);\n if (decision.type() == Decision.Type.NO) {\n throw new ElasticsearchIllegalArgumentException(\"[allocate] allocation of \" + shardId + \" on node \" + discoNode + \" is not allowed, reason: \" + decision);", "filename": "src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateAllocationCommand.java", "status": "modified" }, { "diff": "@@ -19,6 +19,7 @@\n \n package org.elasticsearch.cluster.routing.allocation;\n \n+import com.google.common.collect.ImmutableMap;\n import org.elasticsearch.ElasticsearchIllegalArgumentException;\n import org.elasticsearch.cluster.ClusterState;\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n@@ -117,6 +118,7 @@ public void allocateCommand() {\n .put(newNode(\"node1\"))\n .put(newNode(\"node2\"))\n .put(newNode(\"node3\"))\n+ .put(newNode(\"node4\", ImmutableMap.of(\"data\", Boolean.FALSE.toString())))\n ).build();\n RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);\n clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();\n@@ -129,6 +131,13 @@ public void allocateCommand() {\n } catch (ElasticsearchIllegalArgumentException e) {\n }\n \n+ logger.info(\"--> allocating to non-data node, should fail\");\n+ try {\n+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId(\"test\", 0), \"node4\", true)));\n+ fail();\n+ } catch (ElasticsearchIllegalArgumentException e) {\n+ }\n+\n logger.info(\"--> allocating with primary flag set to true\");\n rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId(\"test\", 0), \"node1\", true)));\n assertThat(rerouteResult.changed(), equalTo(true));", "filename": "src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java", "status": "modified" } ] }
{ "body": "Page tracking would help make sure that we never forget to release pages when we don't need them anymore.\n", "comments": [], "number": 4814, "title": "Add tracking of pages to MockPageCacheRecycler" }
{ "body": "This found an issue in BytesRefHash that forgot to release the start offsets.\n\nClose #4814\n", "number": 4815, "review_comments": [], "title": "Add page tracking to MockPageCacheRecycler." }
{ "commits": [ { "message": "Add page tracking to MockPageCacheRecycler.\n\nThis found an issue in BytesRefHash that forgot to release the start offsets.\n\nClose #4814" } ], "files": [ { "diff": "@@ -162,7 +162,7 @@ public boolean release() {\n super.release();\n success = true;\n } finally {\n- Releasables.release(success, bytes, hashes);\n+ Releasables.release(success, bytes, hashes, startOffsets);\n }\n return true;\n }", "filename": "src/main/java/org/elasticsearch/search/aggregations/bucket/BytesRefHash.java", "status": "modified" }, { "diff": "@@ -19,16 +19,29 @@\n \n package org.elasticsearch.cache.recycler;\n \n+import com.google.common.collect.Maps;\n+import org.elasticsearch.ElasticsearchException;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.recycler.Recycler.V;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.test.TestCluster;\n import org.elasticsearch.threadpool.ThreadPool;\n \n import java.util.Random;\n+import java.util.concurrent.ConcurrentMap;\n \n public class MockPageCacheRecycler extends PageCacheRecycler {\n \n+ private static final ConcurrentMap<Object, Throwable> ACQUIRED_PAGES = Maps.newConcurrentMap();\n+\n+ public static void ensureAllPagesAreReleased() {\n+ if (ACQUIRED_PAGES.size() > 0) {\n+ final Throwable t = ACQUIRED_PAGES.entrySet().iterator().next().getValue();\n+ throw new RuntimeException(ACQUIRED_PAGES.size() + \" pages have not been released\", t);\n+ }\n+ ACQUIRED_PAGES.clear();\n+ }\n+\n private final Random random;\n \n @Inject\n@@ -38,13 +51,45 @@ public MockPageCacheRecycler(Settings settings, ThreadPool threadPool) {\n random = new Random(seed);\n }\n \n+ private static <T> V<T> wrap(final V<T> v) {\n+ ACQUIRED_PAGES.put(v, new Throwable());\n+ final Thread t = Thread.currentThread();\n+ return new V<T>() {\n+\n+ @Override\n+ public boolean release() throws ElasticsearchException {\n+ if (t != Thread.currentThread()) {\n+ // Releasing from a different thread doesn't break anything but this is bad practice as pages should be acquired\n+ // as late as possible and released as soon as possible in a try/finally fashion\n+ throw new RuntimeException(\"Page was allocated in \" + t + \" but released in \" + Thread.currentThread());\n+ }\n+ final Throwable t = ACQUIRED_PAGES.remove(v);\n+ if (t == null) {\n+ throw new IllegalStateException(\"Releasing a page that has not been acquired\");\n+ }\n+ return v.release();\n+ }\n+\n+ @Override\n+ public T v() {\n+ return v.v();\n+ }\n+\n+ @Override\n+ public boolean isRecycled() {\n+ return v.isRecycled();\n+ }\n+\n+ };\n+ }\n+\n @Override\n public V<byte[]> bytePage(boolean clear) {\n final V<byte[]> page = super.bytePage(clear);\n if (!clear) {\n random.nextBytes(page.v());\n }\n- return page;\n+ return wrap(page);\n }\n \n @Override\n@@ -55,7 +100,7 @@ public V<int[]> intPage(boolean clear) {\n page.v()[i] = random.nextInt();\n }\n }\n- return page;\n+ return wrap(page);\n }\n \n @Override\n@@ -66,7 +111,7 @@ public V<long[]> longPage(boolean clear) {\n page.v()[i] = random.nextLong();\n }\n }\n- return page;\n+ return wrap(page);\n }\n \n @Override\n@@ -77,7 +122,12 @@ public V<double[]> doublePage(boolean clear) {\n page.v()[i] = random.nextDouble() - 0.5;\n }\n }\n- return page;\n+ return wrap(page);\n+ }\n+\n+ @Override\n+ public V<Object[]> objectPage() {\n+ return wrap(super.objectPage());\n }\n \n }", "filename": "src/test/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java", "status": "modified" }, { "diff": "@@ -36,6 +36,9 @@ public class BytesRefHashTests extends ElasticsearchTestCase {\n BytesRefHash hash;\n \n private void newHash() {\n+ if (hash != null) {\n+ hash.release();\n+ }\n // Test high load factors to make sure that collision resolution works fine\n final float maxLoadFactor = 0.6f + randomFloat() * 0.39f;\n hash = new BytesRefHash(randomIntBetween(0, 100), maxLoadFactor, BigArraysTests.randomCacheRecycler());\n@@ -48,7 +51,8 @@ public void setUp() throws Exception {\n }\n \n public void testDuell() {\n- final BytesRef[] values = new BytesRef[randomIntBetween(1, 100000)];\n+ final int len = randomIntBetween(1, 100000);\n+ final BytesRef[] values = new BytesRef[len];\n for (int i = 0; i < values.length; ++i) {\n values[i] = new BytesRef(randomAsciiOfLength(5));\n }\n@@ -111,6 +115,7 @@ public void testSize() {\n }\n }\n }\n+ hash.release();\n }\n \n /**\n@@ -150,6 +155,7 @@ public void testGet() {\n }\n newHash();\n }\n+ hash.release();\n }\n \n /**\n@@ -190,6 +196,7 @@ public void testAdd() {\n assertAllIn(strings, hash);\n newHash();\n }\n+ hash.release();\n }\n \n @Test\n@@ -225,6 +232,7 @@ public void testFind() throws Exception {\n assertAllIn(strings, hash);\n newHash();\n }\n+ hash.release();\n }\n \n private void assertAllIn(Set<String> strings, BytesRefHash hash) {", "filename": "src/test/java/org/elasticsearch/search/aggregations/bucket/BytesRefHashTests.java", "status": "modified" }, { "diff": "@@ -27,13 +27,15 @@\n import org.apache.lucene.util.LuceneTestCase;\n import org.apache.lucene.util.TimeUnits;\n import org.elasticsearch.Version;\n+import org.elasticsearch.cache.recycler.MockPageCacheRecycler;\n import org.elasticsearch.common.logging.ESLogger;\n import org.elasticsearch.common.logging.Loggers;\n import org.elasticsearch.common.util.concurrent.EsAbortPolicy;\n import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;\n import org.elasticsearch.test.engine.MockInternalEngine;\n import org.elasticsearch.test.junit.listeners.LoggingListener;\n import org.elasticsearch.test.store.MockDirectoryHelper;\n+import org.junit.After;\n import org.junit.AfterClass;\n import org.junit.BeforeClass;\n \n@@ -109,6 +111,11 @@ public File getResource(String relativePath) {\n return new File(uri);\n }\n \n+ @After\n+ public void ensureAllPagesReleased() {\n+ MockPageCacheRecycler.ensureAllPagesAreReleased();\n+ }\n+\n public static void ensureAllFilesClosed() throws IOException {\n try {\n for (MockDirectoryHelper.ElasticsearchMockDirectoryWrapper w : MockDirectoryHelper.wrappers) {", "filename": "src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java", "status": "modified" } ] }
{ "body": "Steps to reproduce:\n1. Fix the transport.port and http.port to fixed values.\n2. Start another process that is using port 9300\n3. Register ElasticSearch service\n4. Try and start ElasticSearch using services\n\nObserved behavior:\nThere should be a bindException in elasticsearch.log file.\n\nActual behavior:\nThere is no error in elasticsearch.log file, but elasticsearch process does not start\n\nMore notes:\nIn Bootstrap class, this seems incorrect check.\n if (foreground) {\n logger.error(errorMessage);\n } else {\n System.err.println(errorMessage);\n System.err.flush();\n }\n", "comments": [ { "body": "Hey,\n\njust to make sure I get this right: What do you mean with register elasticsearch service? Where and how do you reigster it? Which elasticsearch version are you using and which operating system?\n\nSeems the if-else order should be reversed, but want to make sure I get all information first.\n", "created_at": "2014-01-20T11:23:51Z" }, { "body": "Hi Alex,\n\nThat is what I thought too that the if-else should be reversed.\nWhen I said register, I mean install the service. Install is currently\nsupported (0.90.9) only on Windows OOTB, and I was trying on a Windows 7\nlaptop (64-bit).\n\nRegards,\nSameer\n\nOn Mon, Jan 20, 2014 at 4:54 PM, Alexander Reelsen <notifications@github.com\n\n> wrote:\n> \n> Hey,\n> \n> just to make sure I get this right: What do you mean with register\n> elasticsearch service? Where and how do you reigster it? Which\n> elasticsearch version are you using and which operating system?\n> \n> Seems the if-else order should be reversed, but want to make sure I get\n> all information first.\n> \n> —\n> Reply to this email directly or view it on GitHubhttps://github.com/elasticsearch/elasticsearch/issues/4805#issuecomment-32751814\n> .\n", "created_at": "2014-01-20T16:54:10Z" }, { "body": "Hey,\n\ncan you see, if the above patch fixes the issue for you? Would be great! You can either use the master or the 0.90 branch to test.\n\nThanks a lot!\n", "created_at": "2014-01-21T08:24:13Z" }, { "body": "Sure, I have yet to use the ES build process to deploy, but I can test on\n0.90 branch.\n\nThanks and regards,\nSameer\n\nOn Tue, Jan 21, 2014 at 1:54 PM, Alexander Reelsen <notifications@github.com\n\n> wrote:\n> \n> Hey,\n> \n> can you see, if the above patch fixes the issue for you? Would be great!\n> You can either use the master or the 0.90 branch to test.\n> \n> Thanks a lot!\n> \n> —\n> Reply to this email directly or view it on GitHubhttps://github.com/elasticsearch/elasticsearch/issues/4805#issuecomment-32828902\n> .\n", "created_at": "2014-01-21T09:44:35Z" }, { "body": "I tried on 0.90 branch, and it is now updating the error in\nelasticsearch.log file, but service console does not give me an error\nduring startup. I have to refresh the list to see that the ES process did\nnot start. I used to see the error earlier that the ES process did not\nstart.\n\nRegards,\nSameer\n\nOn Tue, Jan 21, 2014 at 1:54 PM, Alexander Reelsen <notifications@github.com\n\n> wrote:\n> \n> Hey,\n> \n> can you see, if the above patch fixes the issue for you? Would be great!\n> You can either use the master or the 0.90 branch to test.\n> \n> Thanks a lot!\n> \n> —\n> Reply to this email directly or view it on GitHubhttps://github.com/elasticsearch/elasticsearch/issues/4805#issuecomment-32828902\n> .\n", "created_at": "2014-01-21T11:54:05Z" }, { "body": "Hi Sameer,\n\nI've tried reproducing the issue and discovered the following - In Windows, under Administrative Tasks > Services:\n1.starting a service gives an error only if ES is configured incorrectly - namely if the service wrapper x86 is configured to use a JVM on 64 bits.\n2. in all other cases, when the service wrapper is properly configured (namely elasticsearch-service-x64 is using a 64-bits JVM and service-32 a 32-bits JVM) there is no error message if the service fails. That is because the service invocation starts and the failure occurs in a background thread.\n\nI've tried this on 0.90.5 to 0.90.10 and in all cases, both 1 and 2 apply.\nTried on Windows 7 64 bit SP1.\n", "created_at": "2014-01-21T16:53:49Z" }, { "body": "Yes, that does make sense that there is no error because the error is in\nanother thread. Maybe, a status check wrapper is required which will be\nused instead of starting ES directly, so that the user gets feedback of\nwhen the service fails to start up.\n\nThanks and regards,\nSameer\n\nOn Tue, Jan 21, 2014 at 10:24 PM, Costin Leau notifications@github.comwrote:\n\n> Hi Sameer,\n> \n> I've tried reproducing the issue and discovered the following - In\n> Windows, under Administrative Tasks > Services:\n> 1.starting a service gives an error only if ES is configured incorrectly -\n> namely if the service wrapper x86 is configured to use a JVM on 64 bits.\n> 2. in all other cases, when the service wrapper is properly configured\n> (namely elasticsearch-service-x64 is using a 64-bits JVM and service-32 a\n> 32-bits JVM) there is no error message if the service fails. That is\n> because the service invocation starts and the failure occurs in a\n> background thread.\n> \n> I've tried this on 0.90.5 to 0.90.10 and in all cases, both 1 and 2 apply.\n> Tried on Windows 7 64 bit SP1.\n> \n> —\n> Reply to this email directly or view it on GitHubhttps://github.com/elasticsearch/elasticsearch/issues/4805#issuecomment-32904453\n> .\n", "created_at": "2014-01-22T03:23:35Z" }, { "body": "I agree that would be nice but I must say I'm unsure where there the problem lies.\nIf you start the service from the command-line using `service.bat start`, you'll\nget the failure message right away.\nHowever, the same service using the same infrastructure behaves differently -\nit seems to be a difference in behavior of commons-daemon rather then ES itself.\n\nI'll dig deeper to figure out what the issue but so far nothing jumped out.\nIn the meantime I recommend using service.bat start|stop.\n\nHope this helps...\n\nOn 22/01/2014 5:24 AM, sameerpokarna wrote:\n\n> Yes, that does make sense that there is no error because the error is in\n> another thread. Maybe, a status check wrapper is required which will be\n> used instead of starting ES directly, so that the user gets feedback of\n> when the service fails to start up.\n> \n> Thanks and regards,\n> Sameer\n> \n> On Tue, Jan 21, 2014 at 10:24 PM, Costin Leau notifications@github.comwrote:\n> \n> > Hi Sameer,\n> > \n> > I've tried reproducing the issue and discovered the following - In\n> > Windows, under Administrative Tasks > Services:\n> > 1.starting a service gives an error only if ES is configured incorrectly -\n> > namely if the service wrapper x86 is configured to use a JVM on 64 bits.\n> > 2. in all other cases, when the service wrapper is properly configured\n> > (namely elasticsearch-service-x64 is using a 64-bits JVM and service-32 a\n> > 32-bits JVM) there is no error message if the service fails. That is\n> > because the service invocation starts and the failure occurs in a\n> > background thread.\n> > \n> > I've tried this on 0.90.5 to 0.90.10 and in all cases, both 1 and 2 apply.\n> > Tried on Windows 7 64 bit SP1.\n> > \n> > —\n> > Reply to this email directly or view it on GitHubhttps://github.com/elasticsearch/elasticsearch/issues/4805#issuecomment-32904453\n> > .\n> \n> —\n> Reply to this email directly or view it on GitHub\n> https://github.com/elasticsearch/elasticsearch/issues/4805#issuecomment-32990302.\n\n## \n\nCostin\n", "created_at": "2014-01-22T14:22:10Z" } ], "number": 4805, "title": "BindException not occuring in elasticserch.log when started as a service" }
{ "body": "If elasticsearch was started in the foreground an immediate exit on startup\nled to logging in the logfile, where as when starting in the background,\nan immediate exit logged to stdout.\n\nCloses #4805\n", "number": 4812, "review_comments": [], "title": "Fix logging on immediate exit on start" }
{ "commits": [ { "message": "Fix logging on immediate exit on start\n\nIf elasticsearch was started in the foreground an immediate exit on startup\nled to logging in the logfile, where as when starting in the background,\nan immediate exit logged to stdout.\n\nCloses #4805" } ], "files": [ { "diff": "@@ -235,10 +235,10 @@ public void run() {\n }\n String errorMessage = buildErrorMessage(stage, e);\n if (foreground) {\n- logger.error(errorMessage);\n- } else {\n System.err.println(errorMessage);\n System.err.flush();\n+ } else {\n+ logger.error(errorMessage);\n }\n Loggers.disableConsoleLogging();\n if (logger.isDebugEnabled()) {", "filename": "src/main/java/org/elasticsearch/bootstrap/Bootstrap.java", "status": "modified" } ] }
{ "body": "AbstractFieldMapper.merge should return a conflict when trying to enable or disable norms on an existing mapping.\n", "comments": [], "number": 4761, "title": "AbstractFieldMapper.merge doesn't return conflicts when trying to enable or disable norms" }
{ "body": "Close #4761\n", "number": 4811, "review_comments": [], "title": "Return a conflict when trying to enable/disable norms." }
{ "commits": [ { "message": "Return a conflict when trying to enable/disable norms.\n\nClose #4761" } ], "files": [ { "diff": "@@ -561,6 +561,9 @@ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappi\n // when the doc_values field data format is configured\n mergeContext.addConflict(\"mapper [\" + names.fullName() + \"] has different \" + TypeParsers.DOC_VALUES + \" values\");\n }\n+ if (this.fieldType().omitNorms() != fieldMergeWith.fieldType.omitNorms()) {\n+ mergeContext.addConflict(\"mapper [\" + names.fullName() + \"] has different `norms.enabled` values\");\n+ }\n if (this.fieldType().tokenized() != fieldMergeWith.fieldType().tokenized()) {\n mergeContext.addConflict(\"mapper [\" + names.fullName() + \"] has different tokenize values\");\n }", "filename": "src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java", "status": "modified" }, { "diff": "@@ -182,6 +182,16 @@ public void updateMappingWithConflicts() throws Exception {\n assertThat(putMappingResponse.isAcknowledged(), equalTo(true));\n }\n \n+ @Test(expected = MergeMappingException.class)\n+ public void updateMappingWithNormsConflicts() throws Exception {\n+ client().admin().indices().prepareCreate(\"test\")\n+ .addMapping(\"type\", \"{\\\"type\\\":{\\\"properties\\\":{\\\"body\\\":{\\\"type\\\":\\\"string\\\", \\\"norms\\\": { \\\"enabled\\\": false }}}}}\")\n+ .execute().actionGet();\n+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(\"test\").setType(\"type\")\n+ .setSource(\"{\\\"type\\\":{\\\"properties\\\":{\\\"body\\\":{\\\"type\\\":\\\"string\\\", \\\"norms\\\": { \\\"enabled\\\": true }}}}}\")\n+ .execute().actionGet();\n+ }\n+\n /*\n First regression test for https://github.com/elasticsearch/elasticsearch/issues/3381\n */", "filename": "src/test/java/org/elasticsearch/indices/mapping/UpdateMappingTests.java", "status": "modified" } ] }
{ "body": "We compare the field type against the default one in `toXContent` in order to only serialize changes from the default field type, which has `norms.enabled: true`.\n\nHowever we also have some logic to omit norms in case norms have not been configured and the field is `not_analyzed`.\n\nThis means that if you configure a field to be `not_analyzed` and have norms enabled:\n\n``` javascript\n{\n \"type\": string,\n \"index\": \"not_analyzed\",\n \"norms\": {\n \"enabled\": true\n }\n}\n```\n\nit will be parsed correctly, but if you serialize it with toXContent, you will get:\n\n``` javascript\n{\n \"type\": \"string\",\n \"index\": \"not_analyzed\"\n}\n```\n\n`norms.enabled` are missing because they are the same as in the default field type. So parsing it again would return a field which has norms disabled.\n\nThe same is true for `index_options` (docs, positions and offsets by default, docs_only in case of `not_analyzed` fields) but this is less of an issue given that it doesn't make sense to index offsets on a not_analyzed field.\n", "comments": [], "number": 4760, "title": "norms.enabled/omit_norms serialization and parsing are inconsistent" }
{ "body": "StringFieldMapper.toXContent uses the defaults for analyzed fields in order to\nknow which options to add to the builder. This means that if the field is not\nanalyzed and has norms enabled, it will omit to emit `norms.enabled: true`.\nParsing the mapping again will result in a StringFieldMapper that has norms\ndisabled.\n\nThe same fix applies to index options.\n\nClose #4760\n", "number": 4810, "review_comments": [], "title": "Make StringFieldMapper.toXContent aware of defaults for not_analyzed fields" }
{ "commits": [ { "message": "Make StringFieldMapper.toXContent aware of defaults for not_analyzed fields.\n\nStringFieldMapper.toXContent uses the defaults for analyzed fields in order to\nknow which options to add to the builder. This means that if the field is not\nanalyzed and has norms enabled, it will omit to emit `norms.enabled: true`.\nParsing the mapping again will result in a StringFieldMapper that has norms\ndisabled.\n\nThe same fix applies to index options.\n\nClose #4760" } ], "files": [ { "diff": "@@ -126,16 +126,22 @@ public StringFieldMapper build(BuilderContext context) {\n // if the field is not analyzed, then by default, we should omit norms and have docs only\n // index options, as probably what the user really wants\n // if they are set explicitly, we will use those values\n+ // we also change the values on the default field type so that toXContent emits what\n+ // differs from the defaults\n+ FieldType defaultFieldType = new FieldType(Defaults.FIELD_TYPE);\n if (fieldType.indexed() && !fieldType.tokenized()) {\n+ defaultFieldType.setOmitNorms(true);\n+ defaultFieldType.setIndexOptions(IndexOptions.DOCS_ONLY);\n if (!omitNormsSet && boost == Defaults.BOOST) {\n fieldType.setOmitNorms(true);\n }\n if (!indexOptionsSet) {\n fieldType.setIndexOptions(IndexOptions.DOCS_ONLY);\n }\n }\n+ defaultFieldType.freeze();\n StringFieldMapper fieldMapper = new StringFieldMapper(buildNames(context),\n- boost, fieldType, docValues, nullValue, indexAnalyzer, searchAnalyzer, searchQuotedAnalyzer,\n+ boost, fieldType, defaultFieldType, docValues, nullValue, indexAnalyzer, searchAnalyzer, searchQuotedAnalyzer,\n positionOffsetGap, ignoreAbove, postingsProvider, docValuesProvider, similarity, normsLoading, \n fieldDataSettings, context.indexSettings(), multiFieldsBuilder.build(this, context));\n fieldMapper.includeInAll(includeInAll);\n@@ -183,16 +189,13 @@ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext\n }\n \n private String nullValue;\n-\n private Boolean includeInAll;\n-\n private int positionOffsetGap;\n-\n private NamedAnalyzer searchQuotedAnalyzer;\n-\n private int ignoreAbove;\n+ private final FieldType defaultFieldType;\n \n- protected StringFieldMapper(Names names, float boost, FieldType fieldType, Boolean docValues,\n+ protected StringFieldMapper(Names names, float boost, FieldType fieldType,FieldType defaultFieldType, Boolean docValues,\n String nullValue, NamedAnalyzer indexAnalyzer, NamedAnalyzer searchAnalyzer,\n NamedAnalyzer searchQuotedAnalyzer, int positionOffsetGap, int ignoreAbove,\n PostingsFormatProvider postingsFormat, DocValuesFormatProvider docValuesFormat,\n@@ -203,6 +206,7 @@ protected StringFieldMapper(Names names, float boost, FieldType fieldType, Boole\n if (fieldType.tokenized() && fieldType.indexed() && hasDocValues()) {\n throw new MapperParsingException(\"Field [\" + names.fullName() + \"] cannot be analyzed and have doc values\");\n }\n+ this.defaultFieldType = defaultFieldType;\n this.nullValue = nullValue;\n this.positionOffsetGap = positionOffsetGap;\n this.searchQuotedAnalyzer = searchQuotedAnalyzer != null ? searchQuotedAnalyzer : this.searchAnalyzer;\n@@ -211,7 +215,7 @@ protected StringFieldMapper(Names names, float boost, FieldType fieldType, Boole\n \n @Override\n public FieldType defaultFieldType() {\n- return Defaults.FIELD_TYPE;\n+ return defaultFieldType;\n }\n \n @Override", "filename": "src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java", "status": "modified" }, { "diff": "@@ -19,11 +19,14 @@\n \n package org.elasticsearch.index.mapper.string;\n \n+import com.google.common.collect.ImmutableMap;\n import org.apache.lucene.index.FieldInfo;\n import org.apache.lucene.index.FieldInfo.DocValuesType;\n import org.apache.lucene.index.IndexableField;\n+import org.apache.lucene.index.IndexableFieldType;\n import org.elasticsearch.common.settings.ImmutableSettings;\n import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.xcontent.ToXContent;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.index.fielddata.FieldDataType;\n import org.elasticsearch.index.mapper.*;\n@@ -74,6 +77,35 @@ public void testLimit() throws Exception {\n assertThat(doc.rootDoc().getField(\"field\"), nullValue());\n }\n \n+ private void assertDefaultAnalyzedFieldType(IndexableFieldType fieldType) {\n+ assertThat(fieldType.omitNorms(), equalTo(false));\n+ assertThat(fieldType.indexOptions(), equalTo(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS));\n+ assertThat(fieldType.storeTermVectors(), equalTo(false));\n+ assertThat(fieldType.storeTermVectorOffsets(), equalTo(false));\n+ assertThat(fieldType.storeTermVectorPositions(), equalTo(false));\n+ assertThat(fieldType.storeTermVectorPayloads(), equalTo(false));\n+ }\n+\n+ private void assertEquals(IndexableFieldType ft1, IndexableFieldType ft2) {\n+ assertEquals(ft1.indexed(), ft2.indexed());\n+ assertEquals(ft1.tokenized(), ft2.tokenized());\n+ assertEquals(ft1.omitNorms(), ft2.omitNorms());\n+ assertEquals(ft1.indexOptions(), ft2.indexOptions());\n+ assertEquals(ft1.storeTermVectors(), ft2.storeTermVectors());\n+ assertEquals(ft1.docValueType(), ft2.docValueType());\n+ }\n+\n+ private void assertParseIdemPotent(IndexableFieldType expected, DocumentMapper mapper) throws Exception {\n+ String mapping = mapper.toXContent(XContentFactory.jsonBuilder().startObject(), new ToXContent.MapParams(ImmutableMap.<String, String>of())).endObject().string();\n+ mapper = MapperTestUtils.newParser().parse(mapping);\n+ ParsedDocument doc = mapper.parse(\"type\", \"1\", XContentFactory.jsonBuilder()\n+ .startObject()\n+ .field(\"field\", \"2345\")\n+ .endObject()\n+ .bytes());\n+ assertEquals(expected, doc.rootDoc().getField(\"field\").fieldType());\n+ }\n+\n @Test\n public void testDefaultsForAnalyzed() throws Exception {\n String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n@@ -88,12 +120,9 @@ public void testDefaultsForAnalyzed() throws Exception {\n .endObject()\n .bytes());\n \n- assertThat(doc.rootDoc().getField(\"field\").fieldType().omitNorms(), equalTo(false));\n- assertThat(doc.rootDoc().getField(\"field\").fieldType().indexOptions(), equalTo(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS));\n- assertThat(doc.rootDoc().getField(\"field\").fieldType().storeTermVectors(), equalTo(false));\n- assertThat(doc.rootDoc().getField(\"field\").fieldType().storeTermVectorOffsets(), equalTo(false));\n- assertThat(doc.rootDoc().getField(\"field\").fieldType().storeTermVectorPositions(), equalTo(false));\n- assertThat(doc.rootDoc().getField(\"field\").fieldType().storeTermVectorPayloads(), equalTo(false));\n+ IndexableFieldType fieldType = doc.rootDoc().getField(\"field\").fieldType();\n+ assertDefaultAnalyzedFieldType(fieldType);\n+ assertParseIdemPotent(fieldType, defaultMapper);\n }\n \n @Test\n@@ -110,12 +139,14 @@ public void testDefaultsForNotAnalyzed() throws Exception {\n .endObject()\n .bytes());\n \n- assertThat(doc.rootDoc().getField(\"field\").fieldType().omitNorms(), equalTo(true));\n- assertThat(doc.rootDoc().getField(\"field\").fieldType().indexOptions(), equalTo(FieldInfo.IndexOptions.DOCS_ONLY));\n- assertThat(doc.rootDoc().getField(\"field\").fieldType().storeTermVectors(), equalTo(false));\n- assertThat(doc.rootDoc().getField(\"field\").fieldType().storeTermVectorOffsets(), equalTo(false));\n- assertThat(doc.rootDoc().getField(\"field\").fieldType().storeTermVectorPositions(), equalTo(false));\n- assertThat(doc.rootDoc().getField(\"field\").fieldType().storeTermVectorPayloads(), equalTo(false));\n+ IndexableFieldType fieldType = doc.rootDoc().getField(\"field\").fieldType();\n+ assertThat(fieldType.omitNorms(), equalTo(true));\n+ assertThat(fieldType.indexOptions(), equalTo(FieldInfo.IndexOptions.DOCS_ONLY));\n+ assertThat(fieldType.storeTermVectors(), equalTo(false));\n+ assertThat(fieldType.storeTermVectorOffsets(), equalTo(false));\n+ assertThat(fieldType.storeTermVectorPositions(), equalTo(false));\n+ assertThat(fieldType.storeTermVectorPayloads(), equalTo(false));\n+ assertParseIdemPotent(fieldType, defaultMapper);\n \n // now test it explicitly set\n \n@@ -131,12 +162,14 @@ public void testDefaultsForNotAnalyzed() throws Exception {\n .endObject()\n .bytes());\n \n- assertThat(doc.rootDoc().getField(\"field\").fieldType().omitNorms(), equalTo(false));\n- assertThat(doc.rootDoc().getField(\"field\").fieldType().indexOptions(), equalTo(FieldInfo.IndexOptions.DOCS_AND_FREQS));\n- assertThat(doc.rootDoc().getField(\"field\").fieldType().storeTermVectors(), equalTo(false));\n- assertThat(doc.rootDoc().getField(\"field\").fieldType().storeTermVectorOffsets(), equalTo(false));\n- assertThat(doc.rootDoc().getField(\"field\").fieldType().storeTermVectorPositions(), equalTo(false));\n- assertThat(doc.rootDoc().getField(\"field\").fieldType().storeTermVectorPayloads(), equalTo(false));\n+ fieldType = doc.rootDoc().getField(\"field\").fieldType();\n+ assertThat(fieldType.omitNorms(), equalTo(false));\n+ assertThat(fieldType.indexOptions(), equalTo(FieldInfo.IndexOptions.DOCS_AND_FREQS));\n+ assertThat(fieldType.storeTermVectors(), equalTo(false));\n+ assertThat(fieldType.storeTermVectorOffsets(), equalTo(false));\n+ assertThat(fieldType.storeTermVectorPositions(), equalTo(false));\n+ assertThat(fieldType.storeTermVectorPayloads(), equalTo(false));\n+ assertParseIdemPotent(fieldType, defaultMapper);\n \n // also test the deprecated omit_norms\n \n@@ -152,7 +185,9 @@ public void testDefaultsForNotAnalyzed() throws Exception {\n .endObject()\n .bytes());\n \n- assertThat(doc.rootDoc().getField(\"field\").fieldType().omitNorms(), equalTo(false));\n+ fieldType = doc.rootDoc().getField(\"field\").fieldType();\n+ assertThat(fieldType.omitNorms(), equalTo(false));\n+ assertParseIdemPotent(fieldType, defaultMapper);\n }\n \n @Test", "filename": "src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java", "status": "modified" } ] }
{ "body": "When executing a query with an empty query_string an org.elasticsearch.search.SearchParseException is raised.\n\nHow to reproduce:\n\n```\ncurl -XPUT localhost:9200/test/test/1 -d '{}'\n\ncurl -XPOST http://localhost:9200/test/test/_search -d '{ \n \"query\" : {\"query_string\":{ query: \"\"}},\n}'\n```\n\nTested with 0.90.5.\n\nResponse:\n{\"error\":\"SearchPhaseExecutionException[Failed to execute phase [query], all shards failed; shardFailures {[MkT5D2yYRgq6g73gx9141A][test][4]: SearchParseException[[test][4]: from[-1],size[-1]: Parse Failure [Failed to parse source [{\\\"query\\\" : {\\\"query_string\\\":{\\\"query\\\": \\\"\\\"}}}]]]; nested: QueryParsingException[[test] Failed to parse query []]; nested: ParseException[Cannot parse '': Encountered \\\"<EOF>\\\" at line 1, column 0.\\nWas expecting one of:\\n <NOT> ...\\n \\\"+\\\" ...\\n \\\"-\\\" ...\\n <BAREOPER> ...\\n \\\"(\\\" ...\\n \\\"_\\\" ...\\n <QUOTED> ...\\n <TERM> ...\\n <PREFIXTERM> ...\\n <WILDTERM> ...\\n <REGEXPTERM> ...\\n \\\"[\\\" ...\\n \\\"{\\\" ...\\n <NUMBER> ...\\n <TERM> ...\\n \\\"_\\\" ...\\n ]; nested: ParseException[Encountered \\\"<EOF>\\\" at line 1, column 0.\\nWas expecting one of:\\n <NOT> ...\\n \\\"+\\\" ...\\n \\\"-\\\" ...\\n <BAREOPER> ...\\n \\\"(\\\" ...\\n \\\"_\\\" ...\\n <QUOTED> ...\\n <TERM> ...\\n <PREFIXTERM> ...\\n <WILDTERM> ...\\n <REGEXPTERM> ...\\n \\\"[\\\" ...\\n \\\"{\\\" ...\\n <NUMBER> ...\\n <TERM> ...\\n \\\"_\\\" ...\\n ]; }{[MkT5D2yYRgq6g73gx9141A][test][3]: SearchParseException[[test][3]: from[-1],size[-1]: Parse Failure [Failed to parse source [{\\\"query\\\" : {\\\"query_string\\\":{\\\"query\\\": \\\"\\\"}}}]]]; nested: QueryParsingException[[test] Failed to parse query []]; nested: ParseException[Cannot parse '': Encountered \\\"<EOF>\\\" at line 1, column 0.\\nWas expecting one of:\\n <NOT> ...\\n \\\"+\\\" ...\\n \\\"-\\\" ...\\n <BAREOPER> ...\\n \\\"(\\\" ...\\n \\\"_\\\" ...\\n <QUOTED> ...\\n <TERM> ...\\n <PREFIXTERM> ...\\n <WILDTERM> ...\\n <REGEXPTERM> ...\\n \\\"[\\\" ...\\n \\\"{\\\" ...\\n <NUMBER> ...\\n <TERM> ...\\n \\\"_\\\" ...\\n ]; nested: ParseException[Encountered \\\"<EOF>\\\" at line 1, column 0.\\nWas expecting one of:\\n <NOT> ...\\n \\\"+\\\" ...\\n \\\"-\\\" ...\\n <BAREOPER> ...\\n \\\"(\\\" ...\\n \\\"_\\\" ...\\n <QUOTED> ...\\n <TERM> ...\\n <PREFIXTERM> ...\\n <WILDTERM> ...\\n <REGEXPTERM> ...\\n \\\"[\\\" ...\\n \\\"{\\\" ...\\n <NUMBER> ...\\n <TERM> ...\\n \\\"_\\\" ...\\n ]; }{[MkT5D2yYRgq6g73gx9141A][test][2]: SearchParseException[[test][2]: from[-1],size[-1]: Parse Failure [Failed to parse source [{\\\"query\\\" : {\\\"query_string\\\":{\\\"query\\\": \\\"\\\"}}}]]]; nested: QueryParsingException[[test] Failed to parse query []]; nested: ParseException[Cannot parse '': Encountered \\\"<EOF>\\\" at line 1, column 0.\\nWas expecting one of:\\n <NOT> ...\\n \\\"+\\\" ...\\n \\\"-\\\" ...\\n <BAREOPER> ...\\n \\\"(\\\" ...\\n \\\"_\\\" ...\\n <QUOTED> ...\\n <TERM> ...\\n <PREFIXTERM> ...\\n <WILDTERM> ...\\n <REGEXPTERM> ...\\n \\\"[\\\" ...\\n \\\"{\\\" ...\\n <NUMBER> ...\\n <TERM> ...\\n \\\"_\\\" ...\\n ]; nested: ParseException[Encountered \\\"<EOF>\\\" at line 1, column 0.\\nWas expecting one of:\\n <NOT> ...\\n \\\"+\\\" ...\\n \\\"-\\\" ...\\n <BAREOPER> ...\\n \\\"(\\\" ...\\n \\\"_\\\" ...\\n <QUOTED> ...\\n <TERM> ...\\n <PREFIXTERM> ...\\n <WILDTERM> ...\\n <REGEXPTERM> ...\\n \\\"[\\\" ...\\n \\\"{\\\" ...\\n <NUMBER> ...\\n <TERM> ...\\n \\\"_\\\" ...\\n ]; }{[MkT5D2yYRgq6g73gx9141A][test][0]: SearchParseException[[test][0]: from[-1],size[-1]: Parse Failure [Failed to parse source [{\\\"query\\\" : {\\\"query_string\\\":{\\\"query\\\": \\\"\\\"}}}]]]; nested: QueryParsingException[[test] Failed to parse query []]; nested: ParseException[Cannot parse '': Encountered \\\"<EOF>\\\" at line 1, column 0.\\nWas expecting one of:\\n <NOT> ...\\n \\\"+\\\" ...\\n \\\"-\\\" ...\\n <BAREOPER> ...\\n \\\"(\\\" ...\\n \\\"_\\\" ...\\n <QUOTED> ...\\n <TERM> ...\\n <PREFIXTERM> ...\\n <WILDTERM> ...\\n <REGEXPTERM> ...\\n \\\"[\\\" ...\\n \\\"{\\\" ...\\n <NUMBER> ...\\n <TERM> ...\\n \\\"_\\\" ...\\n ]; nested: ParseException[Encountered \\\"<EOF>\\\" at line 1, column 0.\\nWas expecting one of:\\n <NOT> ...\\n \\\"+\\\" ...\\n \\\"-\\\" ...\\n <BAREOPER> ...\\n \\\"(\\\" ...\\n \\\"_\\\" ...\\n <QUOTED> ...\\n <TERM> ...\\n <PREFIXTERM> ...\\n <WILDTERM> ...\\n <REGEXPTERM> ...\\n \\\"[\\\" ...\\n \\\"{\\\" ...\\n <NUMBER> ...\\n <TERM> ...\\n \\\"_\\\" ...\\n ]; }{[MkT5D2yYRgq6g73gx9141A][test][1]: SearchParseException[[test][1]: from[-1],size[-1]: Parse Failure [Failed to parse source [{\\\"query\\\" : {\\\"query_string\\\":{\\\"query\\\": \\\"\\\"}}}]]]; nested: QueryParsingException[[test] Failed to parse query []]; nested: ParseException[Cannot parse '': Encountered \\\"<EOF>\\\" at line 1, column 0.\\nWas expecting one of:\\n <NOT> ...\\n \\\"+\\\" ...\\n \\\"-\\\" ...\\n <BAREOPER> ...\\n \\\"(\\\" ...\\n \\\"_\\\" ...\\n <QUOTED> ...\\n <TERM> ...\\n <PREFIXTERM> ...\\n <WILDTERM> ...\\n <REGEXPTERM> ...\\n \\\"[\\\" ...\\n \\\"{\\\" ...\\n <NUMBER> ...\\n <TERM> ...\\n \\\"_\\\" ...\\n ]; nested: ParseException[Encountered \\\"<EOF>\\\" at line 1, column 0.\\nWas expecting one of:\\n <NOT> ...\\n \\\"+\\\" ...\\n \\\"-\\\" ...\\n <BAREOPER> ...\\n \\\"(\\\" ...\\n \\\"_\\\" ...\\n <QUOTED> ...\\n <TERM> ...\\n <PREFIXTERM> ...\\n <WILDTERM> ...\\n <REGEXPTERM> ...\\n \\\"[\\\" ...\\n \\\"{\\\" ...\\n <NUMBER> ...\\n <TERM> ...\\n \\\"_\\\" ...\\n ]; }]\",\"status\":400}\n\nIt will generate the following exception on the server:\n[2013-10-23 11:31:10,295][DEBUG][action.search.type ] [Tomazooma] [test][3], node[MkT5D2yYRgq6g73gx9141A], [P], s[STARTED]: Failed to execute [org.elasticsearch.action.search.SearchRequest@acda796]\norg.elasticsearch.search.SearchParseException: [test][3]: from[-1],size[-1]: Parse Failure [Failed to parse source [{\"query\" : {\"query_string\":{\"query\": \"\"}}}]]\n at org.elasticsearch.search.SearchService.parseSource(SearchService.java:561)\n at org.elasticsearch.search.SearchService.createContext(SearchService.java:464)\n at org.elasticsearch.search.SearchService.createContext(SearchService.java:449)\n at org.elasticsearch.search.SearchService.createAndPutContext(SearchService.java:442)\n at org.elasticsearch.search.SearchService.executeQueryPhase(SearchService.java:214)\n at org.elasticsearch.search.action.SearchServiceTransportAction.sendExecuteQuery(SearchServiceTransportAction.java:202)\n at org.elasticsearch.action.search.type.TransportSearchQueryThenFetchAction$AsyncAction.sendExecuteFirstPhase(TransportSearchQueryThenFetchAction.java:80)\n at org.elasticsearch.action.search.type.TransportSearchTypeAction$BaseAsyncAction.performFirstPhase(TransportSearchTypeAction.java:216)\n at org.elasticsearch.action.search.type.TransportSearchTypeAction$BaseAsyncAction.performFirstPhase(TransportSearchTypeAction.java:203)\n at org.elasticsearch.action.search.type.TransportSearchTypeAction$BaseAsyncAction$2.run(TransportSearchTypeAction.java:186)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1146)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:679)\nCaused by: org.elasticsearch.index.query.QueryParsingException: [test] Failed to parse query []\n at org.elasticsearch.index.query.QueryStringQueryParser.parse(QueryStringQueryParser.java:231)\n at org.elasticsearch.index.query.QueryParseContext.parseInnerQuery(QueryParseContext.java:207)\n at org.elasticsearch.index.query.IndexQueryParserService.parse(IndexQueryParserService.java:281)\n at org.elasticsearch.index.query.IndexQueryParserService.parse(IndexQueryParserService.java:255)\n at org.elasticsearch.search.query.QueryParseElement.parse(QueryParseElement.java:33)\n at org.elasticsearch.search.SearchService.parseSource(SearchService.java:549)\n ... 12 more\nCaused by: org.apache.lucene.queryparser.classic.ParseException: Cannot parse '': Encountered \"<EOF>\" at line 1, column 0.\nWas expecting one of:\n <NOT> ...\n \"+\" ...\n \"-\" ...\n <BAREOPER> ...\n \"(\" ...\n \"*\" ...\n <QUOTED> ...\n <TERM> ...\n <PREFIXTERM> ...\n", "comments": [ { "body": "Not only empty queries; a query containing only whitespace will fail too.\n\nIt's pretty annoying from a logical POV -- empty strings are nothing special, really. Littering the application code with checks for query emptiness/whitespace-only-ness is very ugly and unnatural. \n\nShould be done on ES side (no query restriction = match all). \"400 Bad Request\" is not an appropriate response.\n", "created_at": "2014-01-19T11:07:14Z" } ], "number": 3952, "title": "Empty query_string generates SearchParseException" }
{ "body": "Closes #3952\n", "number": 4807, "review_comments": [], "title": "Return `MatchNoDocsQuery` if query string is emtpy" }
{ "commits": [ { "message": "Return `MatchNoDocsQuery` if query string is emtpy\n\nCloses #3952" } ], "files": [ { "diff": "@@ -287,3 +287,9 @@ query as a search for `\"wi OR fi\"`, while the token stored in your\n index is actually `\"wifi\"`. Escaping the space will protect it from\n being touched by the query string parser: `\"wi\\ fi\"`.\n ****\n+\n+===== Empty Query\n+\n+If the query string is empty or only contains whitespaces the\n+query string is interpreted as a `no_docs_query` and will yield\n+an empty result set. ", "filename": "docs/reference/query-dsl/queries/query-string-syntax.asciidoc", "status": "modified" }, { "diff": "@@ -28,6 +28,7 @@\n import org.apache.lucene.search.*;\n import org.apache.lucene.util.automaton.RegExp;\n import org.elasticsearch.common.lucene.Lucene;\n+import org.elasticsearch.common.lucene.search.MatchNoDocsQuery;\n import org.elasticsearch.common.lucene.search.Queries;\n import org.elasticsearch.common.lucene.search.XFilteredQuery;\n import org.elasticsearch.common.unit.Fuzziness;\n@@ -870,4 +871,14 @@ private Collection<String> extractMultiFields(String field) {\n }\n return fields;\n }\n+\n+ public Query parse(String query) throws ParseException {\n+ if (query.trim().isEmpty()) {\n+ // if the query string is empty we return no docs / empty result\n+ // the behavior is simple to change in the client if all docs is required\n+ // or a default query\n+ return new MatchNoDocsQuery();\n+ }\n+ return super.parse(query);\n+ }\n }", "filename": "src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java", "status": "modified" }, { "diff": "@@ -90,7 +90,15 @@ public void testOmitNormsOnAll() throws ExecutionException, InterruptedException\n assertThat(hits[0].score(), allOf(greaterThan(hits[1].getScore()), greaterThan(hits[2].getScore())));\n \n }\n-\n+ @Test // see #3952\n+ public void testEmptyQueryString() throws ExecutionException, InterruptedException, IOException {\n+ createIndex(\"test\");\n+ indexRandom(true, client().prepareIndex(\"test\", \"type1\", \"1\").setSource(\"field1\", \"the quick brown fox jumps\"),\n+ client().prepareIndex(\"test\", \"type1\", \"2\").setSource(\"field1\", \"quick brown\"),\n+ client().prepareIndex(\"test\", \"type1\", \"3\").setSource(\"field1\", \"quick\"));\n+ assertHitCount(client().prepareSearch().setQuery(queryString(\"quick\")).get(), 3l);\n+ assertHitCount(client().prepareSearch().setQuery(queryString(\"\")).get(), 0l); // return no docs\n+ }\n \n @Test // see https://github.com/elasticsearch/elasticsearch/issues/3177\n public void testIssue3177() {", "filename": "src/test/java/org/elasticsearch/search/query/SimpleQueryTests.java", "status": "modified" } ] }
{ "body": "In 0.90.10 on indexing an empty json document `{}` into an index with `_all` enabled and a default analyzer of type `keyword` and IndexOutOfBoundsException is thrown.\n\nThis Bug was introduced with commit 0ef6ed98945d9698f0703283086883554a28dfb7\n\ncurl-script to reproduce this issue:\n\n``` bash\n# delete index\ncurl -XDELETE http://localhost:9200/all_field_bug\n\n# create index with default analyzer for every field\n# without analyzer the index query below works fine\ncurl -XPUT http://localhost:9200/all_field_bug -d '{\"index.analysis.analyzer.default.type\":\"keyword\"}'\n\n# create some fields, type doesn't matter here\n# index query below works fine with \"_all\": {\"enabled\": false}\ncurl -XPOST http://localhost:9200/all_field_bug/default -d '{\"properties\":{\"date\":{\"type\":\"date\"}}}'\n\n# should index this empty json document\n# but returns {\"error\":\"IndexOutOfBoundsException[Index: 0, Size: 0]\",\"status\":500}\ncurl -XPUT http://localhost:9200/all_field_bug/default/1 -d '{}'\n```\n\nMore detailed stacktrace:\n\n``` java\nCaused by: java.lang.IndexOutOfBoundsException: Index: 0, Size: 0\n at java.util.ArrayList.rangeCheck(ArrayList.java:635)\n at java.util.ArrayList.get(ArrayList.java:411)\n at org.elasticsearch.common.lucene.all.AllEntries.boost(AllEntries.java:159)\n at org.elasticsearch.common.lucene.all.AllTokenStream.incrementToken(AllTokenStream.java:65)\n at org.apache.lucene.index.DocInverterPerField.processFields(DocInverterPerField.java:102)\n at org.apache.lucene.index.DocFieldProcessor.processDocument(DocFieldProcessor.java:248)\n at org.apache.lucene.index.DocumentsWriterPerThread.updateDocument(DocumentsWriterPerThread.java:253)\n at org.apache.lucene.index.DocumentsWriter.updateDocument(DocumentsWriter.java:453)\n at org.apache.lucene.index.IndexWriter.updateDocument(IndexWriter.java:1520)\n at org.apache.lucene.index.IndexWriter.addDocument(IndexWriter.java:1190)\n at org.elasticsearch.index.engine.robin.RobinEngine.innerIndex(RobinEngine.java:581)\n at org.elasticsearch.index.engine.robin.RobinEngine.index(RobinEngine.java:492)\n at org.elasticsearch.index.shard.service.InternalIndexShard.index(InternalIndexShard.java:386)\n at org.elasticsearch.action.index.TransportIndexAction.shardOperationOnPrimary(TransportIndexAction.java:212)\n at org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$AsyncShardOperationAction.performOnPrimary(TransportShardReplicationOperationAction.java:556)\n at org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$AsyncShardOperationAction$1.run(TransportShardReplicationOperationAction.java:426)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n ... 1 more\n```\n\nWill try to come up with a fix, if time is on my side.\n", "comments": [ { "body": "Good catch!\n\n> Will try to come up with a fix, if time is on my side.\n\nCool, don't hesitate to ask for help if you need some!\n", "created_at": "2014-01-17T13:04:39Z" }, { "body": "@mfelsche Do you plan to work on this soon? I'm asking because it would be nice to have a fix for this issue in 1.0 GA.\n", "created_at": "2014-01-17T16:16:28Z" }, { "body": "@jpountz I'm about to create a pull request. against which branch do you prefer it? v0.90.10 or master or something else?\n", "created_at": "2014-01-20T08:48:30Z" }, { "body": "@mfelsche Against master would be perfect!\n", "created_at": "2014-01-20T09:17:04Z" }, { "body": "fixed on 0.90 via bd91a67\nand master via c42f770\n", "created_at": "2014-01-20T10:48:22Z" } ], "number": 4771, "title": "IndexOutOfBoundsException on indexing empty JSON document in 0.90.10" }
{ "body": "should fix #4771 \n", "number": 4803, "review_comments": [ { "body": "can we maybe only enter this logic if the if the list is not empty?\n\n``` JAVA\nif (!entries.isEmpty()) {\n // do the magic\n}\nreturn 1.0f;\n```\n", "created_at": "2014-01-20T09:38:55Z" } ], "title": "avoid IndexOutOfBoundsException on all field with no tokens and keywordanalyzer" }
{ "commits": [ { "message": "fix IndexOutOfBoundsException on _all field without tokens and keyword analyzer\n\nfix AllEntries to only compute boost when there are actually some Entries available for the _all field\n\nCloses #4771" } ], "files": [ { "diff": "@@ -145,20 +145,23 @@ public Set<String> fields() {\n \n // compute the boost for a token with the given startOffset\n public float boost(int startOffset) {\n- int lo = 0, hi = entries.size() - 1;\n- while (lo <= hi) {\n- final int mid = (lo + hi) >>> 1;\n- final int midOffset = entries.get(mid).startOffset();\n- if (startOffset < midOffset) {\n- hi = mid - 1;\n- } else {\n- lo = mid + 1;\n+ if (!entries.isEmpty()) {\n+ int lo = 0, hi = entries.size() - 1;\n+ while (lo <= hi) {\n+ final int mid = (lo + hi) >>> 1;\n+ final int midOffset = entries.get(mid).startOffset();\n+ if (startOffset < midOffset) {\n+ hi = mid - 1;\n+ } else {\n+ lo = mid + 1;\n+ }\n }\n+ final int index = Math.max(0, hi); // protection against broken token streams\n+ assert entries.get(index).startOffset() <= startOffset;\n+ assert index == entries.size() - 1 || entries.get(index + 1).startOffset() > startOffset;\n+ return entries.get(index).boost();\n }\n- final int index = Math.max(0, hi); // protection against broken token streams\n- assert entries.get(index).startOffset() <= startOffset;\n- assert index == entries.size() - 1 || entries.get(index + 1).startOffset() > startOffset;\n- return entries.get(index).boost();\n+ return 1.0f;\n }\n \n @Override", "filename": "src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java", "status": "modified" }, { "diff": "@@ -318,4 +318,25 @@ public void testMultipleTokensAllWithBoost() throws Exception {\n \n indexWriter.close();\n }\n+\n+ @Test\n+ public void testNoTokensWithKeywordAnalyzer() throws Exception {\n+ Directory dir = new RAMDirectory();\n+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.KEYWORD_ANALYZER));\n+\n+ Document doc = new Document();\n+ doc.add(new Field(\"_id\", \"1\", StoredField.TYPE));\n+ AllEntries allEntries = new AllEntries();\n+ allEntries.reset();\n+ doc.add(new TextField(\"_all\", AllTokenStream.allTokenStream(\"_all\", allEntries, Lucene.KEYWORD_ANALYZER)));\n+\n+ indexWriter.addDocument(doc);\n+\n+ IndexReader reader = DirectoryReader.open(indexWriter, true);\n+ IndexSearcher searcher = new IndexSearcher(reader);\n+\n+ TopDocs docs = searcher.search(new MatchAllDocsQuery(), 10);\n+ assertThat(docs.totalHits, equalTo(1));\n+ assertThat(docs.scoreDocs[0].doc, equalTo(0));\n+ }\n }", "filename": "src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java", "status": "modified" }, { "diff": "@@ -2010,4 +2010,13 @@ public void testSimpleQueryStringFlags() {\n assertHitCount(searchResponse, 1l);\n assertFirstHit(searchResponse, hasId(\"4\"));\n }\n+\n+ @Test\n+ public void testSearchEmptyDoc() {\n+ prepareCreate(\"test\").setSettings(\"{\\\"index.analysis.analyzer.default.type\\\":\\\"keyword\\\"}\").get();\n+ client().prepareIndex(\"test\", \"type1\", \"1\").setSource(\"{}\").get();\n+ refresh();\n+ assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1l);\n+ }\n+\n }", "filename": "src/test/java/org/elasticsearch/search/query/SimpleQueryTests.java", "status": "modified" } ] }
{ "body": "Since windows uses different line endings `\\r\\n` vs *nixes `\\n`, the tests fails as an extra char `\\r` is returned on windows.\n", "comments": [ { "body": "@costin can we make this fix depending on a constant. ie only replace if we are only windows?\n", "created_at": "2014-01-17T16:18:34Z" }, { "body": "@s1monw done.\n", "created_at": "2014-01-17T16:20:40Z" }, { "body": "LGTM\n", "created_at": "2014-01-17T17:05:15Z" }, { "body": "@costin +1 to push\n", "created_at": "2014-01-17T20:18:11Z" }, { "body": "Done.\n", "created_at": "2014-01-17T21:00:58Z" } ], "number": 4785, "title": "BulkRequestTests fails on Windows due to line ending differences" }
{ "body": "fix for #4785\n", "number": 4786, "review_comments": [], "title": "fix for #4785" }
{ "commits": [ { "message": "fix for #4785" }, { "message": "apply line ending fix only on Windows\nrelates to #4785" } ], "files": [ { "diff": "@@ -20,9 +20,11 @@\n package org.elasticsearch.action.bulk;\n \n import com.google.common.base.Charsets;\n+import org.apache.lucene.util.Constants;\n import org.elasticsearch.action.delete.DeleteRequest;\n import org.elasticsearch.action.index.IndexRequest;\n import org.elasticsearch.action.update.UpdateRequest;\n+import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.bytes.BytesArray;\n import org.elasticsearch.test.ElasticsearchTestCase;\n import org.junit.Test;\n@@ -36,6 +38,10 @@ public class BulkRequestTests extends ElasticsearchTestCase {\n @Test\n public void testSimpleBulk1() throws Exception {\n String bulkAction = copyToStringFromClasspath(\"/org/elasticsearch/action/bulk/simple-bulk.json\");\n+ // translate Windows line endings (\\r\\n) to standard ones (\\n)\n+ if (Constants.WINDOWS) {\n+ bulkAction = Strings.replace(bulkAction, \"\\r\\n\", \"\\n\");\n+ }\n BulkRequest bulkRequest = new BulkRequest();\n bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), true, null, null);\n assertThat(bulkRequest.numberOfActions(), equalTo(3));", "filename": "src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java", "status": "modified" } ] }
{ "body": "As I understand it, the intention of the bulk API is that individual operations may fail, but a failure in an individual operation should generally not cause the failure of all operations in the request.\n\nFirst let's verify that this is generally how it works. I am testing here with a simple case of malformed JSON (though I originally saw the problem with a subtler JSON-parsing issue of unexpected non-printable ASCII characters in JSON data).\n\n```\n$ curl -XPUT http://localhost:9200/testing/\n{\"ok\":true,\"acknowledged\":true}\n\n$ curl -XPUT http://localhost:9200/testing/person/_mapping -d '{\"person\": {\"dynamic\": \"strict\", \"properties\": {\"last_modified\": {\"type\": \"date\", \"format\": \"dateOptionalTime\"},\"name\": {\"type\": \"string\"}}}}'\n{\"ok\":true,\"acknowledged\":true}\n\n$ cat baddata.txt\n{\"index\": {\"_id\": \"1\"}}\n{\"name\": \"Malformed}\n{\"index\": {\"_id\": \"2\"}}\n{\"name\": \"Good\"}\n\n$ curl -XPOST http://localhost:9200/testing/person/_bulk --data-binary @baddata.txt\n{\"took\":72,\"items\":[{\"index\":{\"_index\":\"testing\",\"_type\":\"person\",\"_id\":\"1\",\"error\":\"MapperParsingException[failed to parse [name]]; nested: JsonParseException[Unexpected end-of-input in VALUE_STRING\\n at [Source: [B@27beb7ec; line: 1, column: 65]]; \"}},{\"index\":{\"_index\":\"testing\",\"_type\":\"person\",\"_id\":\"2\",\"_version\":1,\"ok\":true}}]}\n```\n\nThis worked correctly - one item failed with an error, the other succeeded, and we do indeed find one item indexed in a subsequent search.\n\nNow let's re-create that index and enable the magic \"_timestamp\" field this time:\n\n```\n$ curl -XDELETE http://localhost:9200/testing/\n{\"ok\":true,\"acknowledged\":true}\n\n$ curl -XPUT http://localhost:9200/testing/\n{\"ok\":true,\"acknowledged\":true}\n\n$ curl -XPUT $LOCAL/testing/person/_mapping -d '{\"person\": {\"_timestamp\": {\"enabled\": true, \"path\": \"last_modified\"}, \"dynamic\": \"strict\", \"properties\": {\"last_modified\": {\"type\": \"date\", \"format\": \"dateOptionalTime\"},\"name\": {\"type\": \"string\"}}}}'\n{\"ok\":true,\"acknowledged\":true}\n\n$ curl -XPOST $LOCAL/testing/person/_bulk --data-binary @baddata.txt\n{\"error\":\"ElasticSearchParseException[failed to parse doc to extract routing/timestamp]; nested: JsonParseException[Unexpected end-of-input in VALUE_STRING\\n at [Source: [B@68f55ff2; line: 1, column: 65]]; \",\"status\":400}\n```\n\nThis time the entire request errors out and returns a 400 response code, and no items are successfully indexed.\n\nSince the malformed JSON is limited to a single action in the bulk request, I would expect only that action to fail, regardless of whether the \"_timestamp\" magic field is enabled or not.\n\nTested against latest ElasticSearch release:\n\n```\n$ curl http://localhost:9200\n{\n \"ok\" : true,\n \"status\" : 200,\n \"name\" : \"Pip the Troll\",\n \"version\" : {\n \"number\" : \"0.90.10\",\n \"build_hash\" : \"0a5781f44876e8d1c30b6360628d59cb2a7a2bbb\",\n \"build_timestamp\" : \"2014-01-10T10:18:37Z\",\n \"build_snapshot\" : false,\n \"lucene_version\" : \"4.6\"\n },\n \"tagline\" : \"You Know, for Search\"\n}\n```\n", "comments": [], "number": 4745, "title": "Enabling \"_timestamp\" can cause bulk API to fail entire request instead of single operation" }
{ "body": "If a preparsing of the source is needed (due to mapping configuration,\nwhich extracts the routing/id value from the source) and the source is not\nvalid JSON, then the whole bulk request is failed instead of a single\nBulkRequest.\n\nThis commit ensures, that a broken JSON request is not forwarded to the\ndestination shard and creates an appropriate BulkItemResponse, which\nincludes a failure.\n\nThis also implied changing the BulkItemResponse serialization, because one\ncannot be sure anymore, if a response includes an ID, in case it was not\nspecified and could not be extracted from the JSON.\n\nCloses #4745\n", "number": 4781, "review_comments": [ { "body": "well I guess that was a problem before so we don't need conditional reads / writes here depending on the version? I wonder when this can be `null`?\n", "created_at": "2014-01-20T10:08:50Z" }, { "body": "This can be null, if you try to extract the ID from a broken JSON and thus fail.\n\nThis hasnt popped up before, because the whole request failed and so there was no need to send this single BulkItemResponse over the wire.\n", "created_at": "2014-01-20T10:11:39Z" } ], "title": "Failed preparsing does not fail whole bulk request" }
{ "commits": [ { "message": "Bulk: Failed preparsing does not fail whole bulk request\n\nIf a preparsing of the source is needed (due to mapping configuration,\nwhich extracts the routing/id value from the source) and the source is not\nvalid JSON, then the whole bulk request is failed instead of a single\nBulkRequest.\n\nThis commit ensures, that a broken JSON request is not forwarded to the\ndestination shard and creates an appropriate BulkItemResponse, which\nincludes a failure.\n\nThis also implied changing the BulkItemResponse serialization, because one\ncannot be sure anymore, if a response includes an ID, in case it was not\nspecified and could not be extracted from the JSON.\n\nCloses #4745" } ], "files": [ { "diff": "@@ -264,7 +264,7 @@ public void readFrom(StreamInput in) throws IOException {\n if (in.readBoolean()) {\n String fIndex = in.readSharedString();\n String fType = in.readSharedString();\n- String fId = in.readString();\n+ String fId = in.readOptionalString();\n String fMessage = in.readString();\n RestStatus status = RestStatus.readFrom(in);\n failure = new Failure(fIndex, fType, fId, fMessage, status);\n@@ -294,7 +294,7 @@ public void writeTo(StreamOutput out) throws IOException {\n out.writeBoolean(true);\n out.writeSharedString(failure.getIndex());\n out.writeSharedString(failure.getType());\n- out.writeString(failure.getId());\n+ out.writeOptionalString(failure.getId());\n out.writeString(failure.getMessage());\n RestStatus.writeTo(out, failure.getStatus());\n }", "filename": "src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java", "status": "modified" }, { "diff": "@@ -22,6 +22,7 @@\n import com.google.common.collect.Lists;\n import com.google.common.collect.Maps;\n import com.google.common.collect.Sets;\n+import org.elasticsearch.ElasticsearchParseException;\n import org.elasticsearch.ExceptionsHelper;\n import org.elasticsearch.action.ActionListener;\n import org.elasticsearch.action.ActionRequest;\n@@ -51,10 +52,7 @@\n import org.elasticsearch.transport.TransportChannel;\n import org.elasticsearch.transport.TransportService;\n \n-import java.util.List;\n-import java.util.Locale;\n-import java.util.Map;\n-import java.util.Set;\n+import java.util.*;\n import java.util.concurrent.atomic.AtomicBoolean;\n import java.util.concurrent.atomic.AtomicInteger;\n \n@@ -153,7 +151,10 @@ private void executeBulk(final BulkRequest bulkRequest, final long startTime, fi\n clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.WRITE);\n \n MetaData metaData = clusterState.metaData();\n- for (ActionRequest request : bulkRequest.requests) {\n+ final AtomicArray<BulkItemResponse> responses = new AtomicArray<BulkItemResponse>(bulkRequest.requests.size());\n+\n+ for (int i = 0; i < bulkRequest.requests.size(); i++) {\n+ ActionRequest request = bulkRequest.requests.get(i);\n if (request instanceof IndexRequest) {\n IndexRequest indexRequest = (IndexRequest) request;\n String aliasOrIndex = indexRequest.index();\n@@ -163,7 +164,15 @@ private void executeBulk(final BulkRequest bulkRequest, final long startTime, fi\n if (metaData.hasIndex(indexRequest.index())) {\n mappingMd = metaData.index(indexRequest.index()).mappingOrDefault(indexRequest.type());\n }\n- indexRequest.process(metaData, aliasOrIndex, mappingMd, allowIdGeneration);\n+ try {\n+ indexRequest.process(metaData, aliasOrIndex, mappingMd, allowIdGeneration);\n+ } catch (ElasticsearchParseException e) {\n+ BulkItemResponse.Failure failure = new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), indexRequest.id(), e);\n+ BulkItemResponse bulkItemResponse = new BulkItemResponse(i, \"index\", failure);\n+ responses.set(i, bulkItemResponse);\n+ // make sure the request gets never processed again\n+ bulkRequest.requests.set(i, null);\n+ }\n } else if (request instanceof DeleteRequest) {\n DeleteRequest deleteRequest = (DeleteRequest) request;\n deleteRequest.routing(clusterState.metaData().resolveIndexRouting(deleteRequest.routing(), deleteRequest.index()));\n@@ -174,8 +183,6 @@ private void executeBulk(final BulkRequest bulkRequest, final long startTime, fi\n updateRequest.index(clusterState.metaData().concreteIndex(updateRequest.index()));\n }\n }\n- final AtomicArray<BulkItemResponse> responses = new AtomicArray<BulkItemResponse>(bulkRequest.requests.size());\n-\n \n // first, go over all the requests and create a ShardId -> Operations mapping\n Map<ShardId, List<BulkItemRequest>> requestsByShard = Maps.newHashMap();", "filename": "src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java", "status": "modified" }, { "diff": "@@ -566,7 +566,7 @@ public void process(MetaData metaData, String aliasOrIndex, @Nullable MappingMet\n timestamp = MappingMetaData.Timestamp.parseStringTimestamp(timestamp, mappingMd.timestamp().dateTimeFormatter());\n }\n } catch (Exception e) {\n- throw new ElasticsearchParseException(\"failed to parse doc to extract routing/timestamp\", e);\n+ throw new ElasticsearchParseException(\"failed to parse doc to extract routing/timestamp/id\", e);\n } finally {\n if (parser != null) {\n parser.close();", "filename": "src/main/java/org/elasticsearch/action/index/IndexRequest.java", "status": "modified" }, { "diff": "@@ -19,6 +19,8 @@\n \n package org.elasticsearch.document;\n \n+import com.google.common.base.Charsets;\n+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;\n import org.elasticsearch.action.bulk.BulkRequestBuilder;\n import org.elasticsearch.action.bulk.BulkResponse;\n import org.elasticsearch.action.count.CountResponse;\n@@ -30,6 +32,7 @@\n import org.elasticsearch.common.Priority;\n import org.elasticsearch.common.bytes.BytesArray;\n import org.elasticsearch.common.settings.ImmutableSettings;\n+import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.index.VersionType;\n import org.elasticsearch.index.query.QueryBuilders;\n import org.elasticsearch.test.ElasticsearchIntegrationTest;\n@@ -507,4 +510,84 @@ public void run() {\n assertThat(successes, equalTo(1));\n }\n \n+ @Test // issue 4745\n+ public void preParsingSourceDueToMappingShouldNotBreakCompleteBulkRequest() throws Exception {\n+ XContentBuilder builder = jsonBuilder().startObject()\n+ .startObject(\"type\")\n+ .startObject(\"_timestamp\")\n+ .field(\"enabled\", true)\n+ .field(\"path\", \"last_modified\")\n+ .endObject()\n+ .endObject()\n+ .endObject();\n+ CreateIndexResponse createIndexResponse = prepareCreate(\"test\").addMapping(\"type\", builder).get();\n+ assertAcked(createIndexResponse);\n+\n+ String brokenBuildRequestData = \"{\\\"index\\\": {\\\"_id\\\": \\\"1\\\"}}\\n\" +\n+ \"{\\\"name\\\": \\\"Malformed}\\n\" +\n+ \"{\\\"index\\\": {\\\"_id\\\": \\\"2\\\"}}\\n\" +\n+ \"{\\\"name\\\": \\\"Good\\\", \\\"last_modified\\\" : \\\"2013-04-05\\\"}\\n\";\n+\n+ BulkResponse bulkResponse = client().prepareBulk().add(brokenBuildRequestData.getBytes(Charsets.UTF_8), 0, brokenBuildRequestData.length(), false, \"test\", \"type\").setRefresh(true).get();\n+ assertThat(bulkResponse.getItems().length, is(2));\n+ assertThat(bulkResponse.getItems()[0].isFailed(), is(true));\n+ assertThat(bulkResponse.getItems()[1].isFailed(), is(false));\n+\n+ assertExists(get(\"test\", \"type\", \"2\"));\n+ }\n+\n+ @Test // issue 4745\n+ public void preParsingSourceDueToRoutingShouldNotBreakCompleteBulkRequest() throws Exception {\n+ XContentBuilder builder = jsonBuilder().startObject()\n+ .startObject(\"type\")\n+ .startObject(\"_routing\")\n+ .field(\"required\", true)\n+ .field(\"path\", \"my_routing\")\n+ .endObject()\n+ .endObject()\n+ .endObject();\n+ CreateIndexResponse createIndexResponse = prepareCreate(\"test\").addMapping(\"type\", builder).get();\n+ assertAcked(createIndexResponse);\n+ ensureYellow(\"test\");\n+\n+ String brokenBuildRequestData = \"{\\\"index\\\": {} }\\n\" +\n+ \"{\\\"name\\\": \\\"Malformed}\\n\" +\n+ \"{\\\"index\\\": { \\\"_id\\\" : \\\"24000\\\" } }\\n\" +\n+ \"{\\\"name\\\": \\\"Good\\\", \\\"my_routing\\\" : \\\"48000\\\"}\\n\";\n+\n+ BulkResponse bulkResponse = client().prepareBulk().add(brokenBuildRequestData.getBytes(Charsets.UTF_8), 0, brokenBuildRequestData.length(), false, \"test\", \"type\").setRefresh(true).get();\n+ assertThat(bulkResponse.getItems().length, is(2));\n+ assertThat(bulkResponse.getItems()[0].isFailed(), is(true));\n+ assertThat(bulkResponse.getItems()[1].isFailed(), is(false));\n+\n+ assertExists(client().prepareGet(\"test\", \"type\", \"24000\").setRouting(\"48000\").get());\n+ }\n+\n+\n+ @Test // issue 4745\n+ public void preParsingSourceDueToIdShouldNotBreakCompleteBulkRequest() throws Exception {\n+ XContentBuilder builder = jsonBuilder().startObject()\n+ .startObject(\"type\")\n+ .startObject(\"_id\")\n+ .field(\"path\", \"my_id\")\n+ .endObject()\n+ .endObject()\n+ .endObject();\n+ CreateIndexResponse createIndexResponse = prepareCreate(\"test\").addMapping(\"type\", builder).get();\n+ assertAcked(createIndexResponse);\n+ ensureYellow(\"test\");\n+\n+ String brokenBuildRequestData = \"{\\\"index\\\": {} }\\n\" +\n+ \"{\\\"name\\\": \\\"Malformed}\\n\" +\n+ \"{\\\"index\\\": {} }\\n\" +\n+ \"{\\\"name\\\": \\\"Good\\\", \\\"my_id\\\" : \\\"48\\\"}\\n\";\n+\n+ BulkResponse bulkResponse = client().prepareBulk().add(brokenBuildRequestData.getBytes(Charsets.UTF_8), 0, brokenBuildRequestData.length(), false, \"test\", \"type\").setRefresh(true).get();\n+ assertThat(bulkResponse.getItems().length, is(2));\n+ assertThat(bulkResponse.getItems()[0].isFailed(), is(true));\n+ assertThat(bulkResponse.getItems()[1].isFailed(), is(false));\n+\n+ assertExists(get(\"test\", \"type\", \"48\"));\n+ }\n+\n }", "filename": "src/test/java/org/elasticsearch/document/BulkTests.java", "status": "modified" }, { "diff": "@@ -29,6 +29,7 @@\n import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder;\n import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;\n import org.elasticsearch.action.count.CountResponse;\n+import org.elasticsearch.action.get.GetResponse;\n import org.elasticsearch.action.percolate.PercolateResponse;\n import org.elasticsearch.action.search.SearchRequestBuilder;\n import org.elasticsearch.action.search.SearchResponse;\n@@ -52,6 +53,7 @@\n import java.lang.reflect.InvocationTargetException;\n import java.util.Arrays;\n import java.util.HashSet;\n+import java.util.Locale;\n import java.util.Set;\n \n import static org.hamcrest.MatcherAssert.assertThat;\n@@ -149,6 +151,11 @@ public static void assertMatchCount(PercolateResponse percolateResponse, long ex\n assertVersionSerializable(percolateResponse);\n }\n \n+ public static void assertExists(GetResponse response) {\n+ String message = String.format(Locale.ROOT, \"Expected %s/%s/%s to exist, but does not\", response.getIndex(), response.getType(), response.getId());\n+ assertThat(message, response.isExists(), is(true));\n+ }\n+\n public static void assertFirstHit(SearchResponse searchResponse, Matcher<SearchHit> matcher) {\n assertSearchHit(searchResponse, 1, matcher);\n }", "filename": "src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java", "status": "modified" } ] }
{ "body": "I'm not really sure what caused this but I'm pretty sure I didn't get hot threads from the node it mentions:\n\n```\n[2014-01-17 01:36:11,648][DEBUG][action.admin.cluster.node.hotthreads] [elastic1008] failed to execute on node [Orfebp5QSN2iIag5IKTrXg]\norg.elasticsearch.transport.RemoteTransportException: [elastic1001][inet[/10.64.0.108:9300]][cluster/nodes/hot_threads/n]\nCaused by: org.elasticsearch.ElasticSearchException: failed to detect hot threads\n at org.elasticsearch.action.admin.cluster.node.hotthreads.TransportNodesHotThreadsAction.nodeOperation(TransportNodesHotThreadsAction.java:103)\n at org.elasticsearch.action.admin.cluster.node.hotthreads.TransportNodesHotThreadsAction.nodeOperation(TransportNodesHotThreadsAction.java:43)\n at org.elasticsearch.action.support.nodes.TransportNodesOperationAction$NodeTransportHandler.messageReceived(TransportNodesOperationAction.java:281)\n at org.elasticsearch.action.support.nodes.TransportNodesOperationAction$NodeTransportHandler.messageReceived(TransportNodesOperationAction.java:272)\n at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.run(MessageChannelHandler.java:270)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:724)\nCaused by: java.lang.NullPointerException\n at org.elasticsearch.monitor.jvm.HotThreads.similarity(HotThreads.java:216)\n at org.elasticsearch.monitor.jvm.HotThreads.innerDetect(HotThreads.java:177)\n at org.elasticsearch.monitor.jvm.HotThreads.detect(HotThreads.java:75)\n at org.elasticsearch.action.admin.cluster.node.hotthreads.TransportNodesHotThreadsAction.nodeOperation(TransportNodesHotThreadsAction.java:101)\n ... 7 more\n```\n", "comments": [ { "body": "Thanks!\n", "created_at": "2014-01-17T16:33:23Z" }, { "body": "thank you!\n", "created_at": "2014-01-17T16:38:03Z" } ], "number": 4775, "title": "Failed to detect hot threads" }
{ "body": "If a thread is not alive getting ThreadMXBean#getThreadInfo(long[], int)\nplaces null elemnents in the returned array which are not repected\nin the HotTheards API.\n\nCloses #4775\n", "number": 4777, "review_comments": [], "title": "Check ThreadInfo[] for null element if thread are not alive." }
{ "commits": [ { "message": "Check ThreadInfo[] for null element if thread are not alive.\n\nIf a thread is not alive getting ThreadMXBean#getThreadInfo(long[], int)\nplaces null elemnents in the returned array which are not repected\nin the HotTheards API.\n\nCloses #4775" } ], "files": [ { "diff": "@@ -151,6 +151,9 @@ public int compare(MyThreadInfo o1, MyThreadInfo o2) {\n }\n ThreadInfo[][] allInfos = new ThreadInfo[threadElementsSnapshotCount][];\n for (int j = 0; j < threadElementsSnapshotCount; j++) {\n+ // NOTE, javadoc of getThreadInfo says: If a thread of the given ID is not alive or does not exist,\n+ // null will be set in the corresponding element in the returned array. A thread is alive if it has\n+ // been started and has not yet died.\n allInfos[j] = threadBean.getThreadInfo(ids, Integer.MAX_VALUE);\n Thread.sleep(threadElementsSnapshotDelay.millis());\n }\n@@ -163,8 +166,22 @@ public int compare(MyThreadInfo o1, MyThreadInfo o2) {\n } else if (\"block\".equals(type)) {\n time = hotties.get(t).blockedTime;\n }\n+ String threadName = null;\n+ if (allInfos[0][t] == null) {\n+ for (ThreadInfo[] info : allInfos) {\n+ if (info != null && info[t] != null) {\n+ threadName = info[t].getThreadName();\n+ break;\n+ }\n+ }\n+ if (threadName == null) {\n+ continue; // thread is not alive yet or died before the first snapshot - ignore it!\n+ }\n+ } else {\n+ threadName = allInfos[0][t].getThreadName();\n+ }\n double percent = (((double) time) / interval.nanos()) * 100;\n- sb.append(String.format(Locale.ROOT, \"%n%4.1f%% (%s out of %s) %s usage by thread '%s'%n\", percent, TimeValue.timeValueNanos(time), interval, type, allInfos[0][t].getThreadName()));\n+ sb.append(String.format(Locale.ROOT, \"%n%4.1f%% (%s out of %s) %s usage by thread '%s'%n\", percent, TimeValue.timeValueNanos(time), interval, type, threadName));\n // for each snapshot (2nd array index) find later snapshot for same thread with max number of\n // identical StackTraceElements (starting from end of each)\n boolean[] done = new boolean[threadElementsSnapshotCount];\n@@ -189,16 +206,18 @@ public int compare(MyThreadInfo o1, MyThreadInfo o2) {\n count++;\n }\n }\n- StackTraceElement[] show = allInfos[i][t].getStackTrace();\n- if (count == 1) {\n- sb.append(String.format(Locale.ROOT, \" unique snapshot%n\"));\n- for (int l = 0; l < show.length; l++) {\n- sb.append(String.format(Locale.ROOT, \" %s%n\", show[l]));\n- }\n- } else {\n- sb.append(String.format(Locale.ROOT, \" %d/%d snapshots sharing following %d elements%n\", count, threadElementsSnapshotCount, maxSim));\n- for (int l = show.length - maxSim; l < show.length; l++) {\n- sb.append(String.format(Locale.ROOT, \" %s%n\", show[l]));\n+ if (allInfos[i][t] != null) {\n+ final StackTraceElement[] show = allInfos[i][t].getStackTrace();\n+ if (count == 1) {\n+ sb.append(String.format(Locale.ROOT, \" unique snapshot%n\"));\n+ for (int l = 0; l < show.length; l++) {\n+ sb.append(String.format(Locale.ROOT, \" %s%n\", show[l]));\n+ }\n+ } else {\n+ sb.append(String.format(Locale.ROOT, \" %d/%d snapshots sharing following %d elements%n\", count, threadElementsSnapshotCount, maxSim));\n+ for (int l = show.length - maxSim; l < show.length; l++) {\n+ sb.append(String.format(Locale.ROOT, \" %s%n\", show[l]));\n+ }\n }\n }\n }\n@@ -211,9 +230,11 @@ public int compare(MyThreadInfo o1, MyThreadInfo o2) {\n }\n }\n \n+ private static final StackTraceElement[] EMPTY = new StackTraceElement[0];\n+\n private int similarity(ThreadInfo threadInfo, ThreadInfo threadInfo0) {\n- StackTraceElement[] s1 = threadInfo.getStackTrace();\n- StackTraceElement[] s2 = threadInfo0.getStackTrace();\n+ StackTraceElement[] s1 = threadInfo == null ? EMPTY : threadInfo.getStackTrace();\n+ StackTraceElement[] s2 = threadInfo0 == null ? EMPTY : threadInfo0.getStackTrace();\n int i = s1.length - 1;\n int j = s2.length - 1;\n int rslt = 0;", "filename": "src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java", "status": "modified" }, { "diff": "@@ -0,0 +1,128 @@\n+/*\n+ * Licensed to Elasticsearch under one or more contributor\n+ * license agreements. See the NOTICE file distributed with\n+ * this work for additional information regarding copyright\n+ * ownership. Elasticsearch licenses this file to you under\n+ * the Apache License, Version 2.0 (the \"License\"); you may\n+ * not use this file except in compliance with the License.\n+ * You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.elasticsearch.action.admin;\n+\n+import org.elasticsearch.action.ActionListener;\n+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads;\n+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequestBuilder;\n+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse;\n+import org.elasticsearch.common.unit.TimeValue;\n+import org.elasticsearch.test.ElasticsearchIntegrationTest;\n+import org.junit.Test;\n+\n+import java.util.Map;\n+import java.util.concurrent.CountDownLatch;\n+import java.util.concurrent.ExecutionException;\n+\n+import static org.elasticsearch.index.query.FilterBuilders.andFilter;\n+import static org.elasticsearch.index.query.FilterBuilders.notFilter;\n+import static org.elasticsearch.index.query.FilterBuilders.queryFilter;\n+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;\n+import static org.elasticsearch.index.query.QueryBuilders.termQuery;\n+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;\n+import static org.hamcrest.CoreMatchers.equalTo;\n+import static org.hamcrest.CoreMatchers.notNullValue;\n+\n+/**\n+ */\n+public class HotThreadsTest extends ElasticsearchIntegrationTest {\n+\n+ @Test\n+ public void testHotThreadsDontFail() throws ExecutionException, InterruptedException {\n+ /**\n+ * This test just checks if nothing crashes or gets stuck etc.\n+ */\n+ createIndex(\"test\");\n+ final int iters = atLeast(2);\n+\n+ for (int i = 0; i < iters; i++) {\n+ final String type;\n+ NodesHotThreadsRequestBuilder nodesHotThreadsRequestBuilder = client().admin().cluster().prepareNodesHotThreads();\n+ if (randomBoolean()) {\n+ TimeValue timeValue = new TimeValue(rarely() ? randomIntBetween(500, 5000) : randomIntBetween(20, 500));\n+ nodesHotThreadsRequestBuilder.setInterval(timeValue);\n+ }\n+ if (randomBoolean()) {\n+ nodesHotThreadsRequestBuilder.setThreads(randomIntBetween(1, 100));\n+ }\n+ if (randomBoolean()) {\n+ switch (randomIntBetween(0, 2)) {\n+ case 2:\n+ type = \"cpu\";\n+ break;\n+ case 1:\n+ type = \"wait\";\n+ break;\n+ default:\n+ type = \"block\";\n+ break;\n+ }\n+ assertThat(type, notNullValue());\n+ nodesHotThreadsRequestBuilder.setType(type);\n+ } else {\n+ type = null;\n+ }\n+ final CountDownLatch latch = new CountDownLatch(1);\n+ nodesHotThreadsRequestBuilder.execute(new ActionListener<NodesHotThreadsResponse>() {\n+ @Override\n+ public void onResponse(NodesHotThreadsResponse nodeHotThreads) {\n+ try {\n+ assertThat(nodeHotThreads, notNullValue());\n+ Map<String,NodeHotThreads> nodesMap = nodeHotThreads.getNodesMap();\n+ assertThat(nodesMap.size(), equalTo(cluster().size()));\n+ for (NodeHotThreads ht : nodeHotThreads) {\n+ assertNotNull(ht.getHotThreads());\n+ //logger.info(ht.getHotThreads());\n+ }\n+ } finally {\n+ latch.countDown();\n+ }\n+ }\n+\n+ @Override\n+ public void onFailure(Throwable e) {\n+ logger.error(\"FAILED\", e);\n+ latch.countDown();\n+ fail();\n+ }\n+ });\n+\n+ indexRandom(true,\n+ client().prepareIndex(\"test\", \"type1\", \"1\").setSource(\"field1\", \"value1\"),\n+ client().prepareIndex(\"test\", \"type1\", \"2\").setSource(\"field1\", \"value2\"),\n+ client().prepareIndex(\"test\", \"type1\", \"3\").setSource(\"field1\", \"value3\"));\n+ ensureSearchable();\n+ if (randomBoolean()) {\n+ optimize();\n+ }\n+ while(latch.getCount() > 0) {\n+ assertHitCount(\n+ client().prepareSearch()\n+ .setQuery(matchAllQuery())\n+ .setPostFilter(\n+ andFilter(\n+ queryFilter(matchAllQuery()),\n+ notFilter(andFilter(queryFilter(termQuery(\"field1\", \"value1\")),\n+ queryFilter(termQuery(\"field1\", \"value2\")))))).get(),\n+ 3l);\n+ }\n+ latch.await();\n+ }\n+ }\n+}", "filename": "src/test/java/org/elasticsearch/action/admin/HotThreadsTest.java", "status": "added" } ] }
{ "body": "When using scroll with a has_child filter, the initial request returns the correct total number of hits, but subsequent requests return no hits.\n\nIt looks like this problem was introduced in 0.90.6, and still occurs in 0.90.10. 0.90.5 works as expected.\n\nThe number of documents seems to play a part - in my initial test cases with only 2 parent documents, I couldn't reproduce the issue. However, creating 100 parents does reliably reproduce it. In my testing, 8 parent documents worked fine, but 9 did not.\n\nIt sounds very similar to the issue mentioned here: http://elasticsearch-users.115913.n3.nabble.com/No-hit-using-scan-scroll-with-has-parent-filter-td4047236.html\n\nHere's a test script (requires jq(1) to grab the scroll ID from the first JSON result):\n\n``` bash\n#!/bin/sh\n\nHOST='localhost:9200'\nINDEX='test_scroll_jj'\nCURL=\"curl -q --ipv4 --silent --show-error --fail\"\n\n$CURL -XDELETE \"$HOST/${INDEX}?pretty=true\" >/dev/null\n$CURL -XPOST \"$HOST/${INDEX}/?pretty=true\" -d '\n{\n \"mappings\": {\n \"homes\":{\n \"_parent\":{\n \"type\" : \"person\"\n }\n }\n }\n}' >/dev/null\n\nfor x in {1..100}; do # in my testing, 8 docs works, 9 fails\n $CURL -XPUT \"$HOST/${INDEX}/person/$x/?pretty=true\" -d '{}' >/dev/null\n $CURL -XPOST \"$HOST/${INDEX}/homes?parent=$x&pretty=true\" -d '{}' >/dev/null\ndone\n\n$CURL -XPOST \"$HOST/${INDEX}/_refresh?pretty=true\" >/dev/null\n\necho \"REQUEST ONE:\"\nSCROLL_RESULT=$($CURL -v -XPOST \"http://$HOST/${INDEX}/person/_search?pretty=true&scroll=30s\" -d'\n{\n \"size\" : 1,\n \"fields\" : [\"_id\"],\n \"query\" : {\n \"filtered\" : {\n \"filter\" : {\n \"has_child\" : {\n \"type\" : \"homes\",\n \"query\" : {\n \"match_all\" : {}\n }\n }\n }\n }\n }\n}')\necho $SCROLL_RESULT\n\nscroll_id=$(echo $SCROLL_RESULT | jq -r '.[\"_scroll_id\"]')\n\necho\necho \"REQUEST TWO:\"\n$CURL -v \"http://$HOST/_search/scroll?scroll=30s&scroll_id=$scroll_id&pretty=true\"\n```\n\nThe failing output on 0.90.10:\n\n```\n/tmp|⇒ /tmp/scrollbug.sh\nREQUEST ONE:\n* About to connect() to localhost port 9200 (#0)\n* Trying 127.0.0.1...\n* Adding handle: conn: 0x7fb832006e00\n* Adding handle: send: 0\n* Adding handle: recv: 0\n* Curl_addHandleToPipeline: length: 1\n* - Conn 0 (0x7fb832006e00) send_pipe: 1, recv_pipe: 0\n* Connected to localhost (127.0.0.1) port 9200 (#0)\n> POST /test_scroll_jj/person/_search?pretty=true&scroll=30s HTTP/1.1\n> User-Agent: curl/7.32.0\n> Host: localhost:9200\n> Accept: */*\n> Content-Length: 321\n> Content-Type: application/x-www-form-urlencoded\n>\n} [data not shown]\n* upload completely sent off: 321 out of 321 bytes\n< HTTP/1.1 200 OK\n< Content-Type: application/json; charset=UTF-8\n< Content-Length: 520\n<\n{ [data not shown]\n* Connection #0 to host localhost left intact\n{ \"_scroll_id\" : \"cXVlcnlUaGVuRmV0Y2g7NTs2OkM5SXlBenNyU0lXR21uX3JsN25XcHc7NzpDOUl5QXpzclNJV0dtbl9ybDduV3B3Ozg6QzlJeUF6c3JTSVdHbW5fcmw3bldwdzs5OkM5SXlBenNyU0lXR21uX3JsN25XcHc7MTA6QzlJeUF6c3JTSVdHbW5fcmw3bldwdzswOw==\", \"took\" : 5, \"timed_out\" : false, \"_shards\" : { \"total\" : 5, \"successful\" : 5, \"failed\" : 0 }, \"hits\" : { \"total\" : 100, \"max_score\" : 1.0, \"hits\" : [ { \"_index\" : \"test_scroll_jj\", \"_type\" : \"person\", \"_id\" : \"2\", \"_score\" : 1.0 } ] } }\n\nREQUEST TWO:\n* About to connect() to localhost port 9200 (#0)\n* Trying 127.0.0.1...\n* Adding handle: conn: 0x7f8589806e00\n* Adding handle: send: 0\n* Adding handle: recv: 0\n* Curl_addHandleToPipeline: length: 1\n* - Conn 0 (0x7f8589806e00) send_pipe: 1, recv_pipe: 0\n* Connected to localhost (127.0.0.1) port 9200 (#0)\n> GET /_search/scroll?scroll=30s&scroll_id=cXVlcnlUaGVuRmV0Y2g7NTs2OkM5SXlBenNyU0lXR21uX3JsN25XcHc7NzpDOUl5QXpzclNJV0dtbl9ybDduV3B3Ozg6QzlJeUF6c3JTSVdHbW5fcmw3bldwdzs5OkM5SXlBenNyU0lXR21uX3JsN25XcHc7MTA6QzlJeUF6c3JTSVdHbW5fcmw3bldwdzswOw==&pretty=true HTTP/1.1\n> User-Agent: curl/7.32.0\n> Host: localhost:9200\n> Accept: */*\n>\n< HTTP/1.1 200 OK\n< Content-Type: application/json; charset=UTF-8\n< Content-Length: 410\n<\n{\n \"_scroll_id\" : \"cXVlcnlUaGVuRmV0Y2g7NTs2OkM5SXlBenNyU0lXR21uX3JsN25XcHc7NzpDOUl5QXpzclNJV0dtbl9ybDduV3B3Ozg6QzlJeUF6c3JTSVdHbW5fcmw3bldwdzs5OkM5SXlBenNyU0lXR21uX3JsN25XcHc7MTA6QzlJeUF6c3JTSVdHbW5fcmw3bldwdzswOw==\",\n \"took\" : 0,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 5,\n \"successful\" : 5,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : 0,\n \"max_score\" : null,\n \"hits\" : [ ]\n }\n}\n```\n\nAnd the expected output as per 0.90.5:\n\n```\n/tmp|⇒ /tmp/scrollbug.sh\nREQUEST ONE:\n* About to connect() to localhost port 9200 (#0)\n* Trying 127.0.0.1...\n* Adding handle: conn: 0x7fd04a006e00\n* Adding handle: send: 0\n* Adding handle: recv: 0\n* Curl_addHandleToPipeline: length: 1\n* - Conn 0 (0x7fd04a006e00) send_pipe: 1, recv_pipe: 0\n* Connected to localhost (127.0.0.1) port 9200 (#0)\n> POST /test_scroll_jj/person/_search?pretty=true&scroll=30s HTTP/1.1\n> User-Agent: curl/7.32.0\n> Host: localhost:9200\n> Accept: */*\n> Content-Length: 321\n> Content-Type: application/x-www-form-urlencoded\n>\n} [data not shown]\n* upload completely sent off: 321 out of 321 bytes\n< HTTP/1.1 200 OK\n< Content-Type: application/json; charset=UTF-8\n< Content-Length: 523\n<\n{ [data not shown]\n* Connection #0 to host localhost left intact\n{ \"_scroll_id\" : \"cXVlcnlUaGVuRmV0Y2g7NTsyMTpILV9IUWU2MlRrUzQyd2JRYzZLS3dROzIzOkgtX0hRZTYyVGtTNDJ3YlFjNktLd1E7MjI6SC1fSFFlNjJUa1M0MndiUWM2S0t3UTsyNDpILV9IUWU2MlRrUzQyd2JRYzZLS3dROzI1OkgtX0hRZTYyVGtTNDJ3YlFjNktLd1E7MDs=\", \"took\" : 9, \"timed_out\" : false, \"_shards\" : { \"total\" : 5, \"successful\" : 5, \"failed\" : 0 }, \"hits\" : { \"total\" : 100, \"max_score\" : 1.0, \"hits\" : [ { \"_index\" : \"test_scroll_jj\", \"_type\" : \"person\", \"_id\" : \"2\", \"_score\" : 1.0 } ] } }\n\nREQUEST TWO:\n* About to connect() to localhost port 9200 (#0)\n* Trying 127.0.0.1...\n* Adding handle: conn: 0x7f8a92006e00\n* Adding handle: send: 0\n* Adding handle: recv: 0\n* Curl_addHandleToPipeline: length: 1\n* - Conn 0 (0x7f8a92006e00) send_pipe: 1, recv_pipe: 0\n* Connected to localhost (127.0.0.1) port 9200 (#0)\n> GET /_search/scroll?scroll=30s&scroll_id=cXVlcnlUaGVuRmV0Y2g7NTsyMTpILV9IUWU2MlRrUzQyd2JRYzZLS3dROzIzOkgtX0hRZTYyVGtTNDJ3YlFjNktLd1E7MjI6SC1fSFFlNjJUa1M0MndiUWM2S0t3UTsyNDpILV9IUWU2MlRrUzQyd2JRYzZLS3dROzI1OkgtX0hRZTYyVGtTNDJ3YlFjNktLd1E7MDs=&pretty=true HTTP/1.1\n> User-Agent: curl/7.32.0\n> Host: localhost:9200\n> Accept: */*\n>\n< HTTP/1.1 200 OK\n< Content-Type: application/json; charset=UTF-8\n< Content-Length: 523\n<\n{\n \"_scroll_id\" : \"cXVlcnlUaGVuRmV0Y2g7NTsyMTpILV9IUWU2MlRrUzQyd2JRYzZLS3dROzIzOkgtX0hRZTYyVGtTNDJ3YlFjNktLd1E7MjI6SC1fSFFlNjJUa1M0MndiUWM2S0t3UTsyNDpILV9IUWU2MlRrUzQyd2JRYzZLS3dROzI1OkgtX0hRZTYyVGtTNDJ3YlFjNktLd1E7MDs=\",\n \"took\" : 2,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 5,\n \"successful\" : 5,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : 100,\n \"max_score\" : 1.0,\n \"hits\" : [ {\n \"_index\" : \"test_scroll_jj\",\n \"_type\" : \"person\",\n \"_id\" : \"7\",\n \"_score\" : 1.0\n } ]\n }\n}\n```\n", "comments": [ { "body": "I've written this up as a an integration test and used git bisect to try and track down where this broke between 0.90.5 and 0.90.6. It looks like this commit 9950e4440aeea76fc71b5ee534e15d4bfb1d73ed seems to be the culprit.\n\nThis is the test method I'm using.\n\n``` java\n @Test\n public void simpleScrolledHasChildFilteredQuery() throws Exception {\n client().admin().indices().prepareCreate(\"test\")\n .setSettings(ImmutableSettings.settingsBuilder().put(\"index.number_of_shards\", 1).put(\"index.number_of_replicas\", 0))\n .execute().actionGet();\n client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();\n client().admin()\n .indices()\n .preparePutMapping(\"test\")\n .setType(\"child\")\n .setSource(\n jsonBuilder().startObject().startObject(\"child\").startObject(\"_parent\").field(\"type\", \"parent\").endObject()\n .endObject().endObject()).execute().actionGet();\n\n\n for (int i = 0; i < 10; i++) {\n client().prepareIndex(\"test\", \"parent\", \"p\" + i).setSource(\"{}\").execute().actionGet();\n client().prepareIndex(\"test\", \"child\", \"c\" + i).setSource(\"{}\").setParent(\"p\" + i).execute().actionGet();\n }\n\n client().admin().indices().prepareRefresh().execute().actionGet();\n\n final SearchResponse scrollResponse = client().prepareSearch(\"test\")\n .setScroll(TimeValue.timeValueSeconds(30))\n .setSize(1)\n .addField(\"_id\")\n .setTypes(\"parent\")\n .setQuery(filteredQuery(matchAllQuery(), FilterBuilders.hasChildFilter(\"child\", matchAllQuery())))\n .execute()\n .actionGet();\n\n final SearchResponse firstScroll = client().prepareSearchScroll(scrollResponse.getScrollId()).setScroll(TimeValue.timeValueSeconds(30)).execute().actionGet();\n final SearchResponse secondScroll = client().prepareSearchScroll(firstScroll.getScrollId()).setScroll(TimeValue.timeValueSeconds(30)).execute().actionGet();\n\n client().prepareClearScroll().addScrollId(secondScroll.getScrollId()).execute().actionGet();\n\n assertThat(scrollResponse.getFailedShards(), equalTo(0));\n assertThat(scrollResponse.getHits().totalHits(), equalTo(10l));\n\n assertThat(firstScroll.getFailedShards(), equalTo(0));\n assertThat(firstScroll.getHits().getHits().length, equalTo(1));\n\n assertThat(secondScroll.getFailedShards(), equalTo(0));\n assertThat(secondScroll.getHits().getHits().length, equalTo(1));\n }\n```\n", "created_at": "2014-01-13T23:12:16Z" }, { "body": "Nice catch! \n\nI further looked into this issue and this error only seems to occur with the has_child or has_parent filter, but not with the has_child / has_parent query.\n", "created_at": "2014-01-14T22:57:05Z" }, { "body": "Thanks for fixing it.\n", "created_at": "2014-01-15T22:05:56Z" }, { "body": "This issue seems to still happening in 6.2? Different API but not results are fetched when combining post filter with scrolling", "created_at": "2018-06-13T16:07:50Z" } ], "number": 4703, "title": "Scrolling with has_child filter returns no hits on 2nd request" }
{ "body": "Consume the entire weight and pre compute the DocIdSets for all segments instead of keeping the weight around and build a DocIdSet when a segment is being processed. This fixes issues where the has_child / has_parent filter produce no results or errors on subsequent scan requests.\n\nAlso made CustomQueryWrappingFilter implement Releasable in order to cleanup the pre-computed DocIdSets.\n\nCloses #4703\n", "number": 4773, "review_comments": [], "title": "Fix for p/c filters in scan api" }
{ "commits": [ { "message": "Consume the entire weight and pre compute the DocIdSets for all segments instead of keeping the weight around and build a DocIdSet when a segment is being processed. This fixes issues where the has_child / has_parent filter produce no results or errors on subsequent scan requests.\n\nAlso made CustomQueryWrappingFilter implement Releasable in order to cleanup the pre-computed DocIdSets.\n\nCloses #4703" } ], "files": [ { "diff": "@@ -18,25 +18,30 @@\n */\n package org.elasticsearch.index.search.child;\n \n+import org.apache.lucene.index.AtomicReader;\n import org.apache.lucene.index.AtomicReaderContext;\n import org.apache.lucene.search.*;\n import org.apache.lucene.util.Bits;\n+import org.elasticsearch.ElasticsearchException;\n+import org.elasticsearch.common.lease.Releasable;\n+import org.elasticsearch.common.lucene.docset.DocIdSets;\n import org.elasticsearch.search.internal.SearchContext;\n \n import java.io.IOException;\n+import java.util.IdentityHashMap;\n \n /**\n * Forked from {@link QueryWrapperFilter} to make sure the weight is only created once.\n * This filter should never be cached! This filter only exists for internal usage.\n *\n * @elasticsearch.internal\n */\n-public class CustomQueryWrappingFilter extends Filter {\n+public class CustomQueryWrappingFilter extends Filter implements Releasable {\n \n private final Query query;\n \n private IndexSearcher searcher;\n- private Weight weight;\n+ private IdentityHashMap<AtomicReader, DocIdSet> docIdSets;\n \n /** Constructs a filter which only matches documents matching\n * <code>query</code>.\n@@ -54,24 +59,43 @@ public final Query getQuery() {\n \n @Override\n public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits acceptDocs) throws IOException {\n- SearchContext searchContext = SearchContext.current();\n- if (weight == null) {\n+ final SearchContext searchContext = SearchContext.current();\n+ if (docIdSets == null) {\n assert searcher == null;\n IndexSearcher searcher = searchContext.searcher();\n- weight = searcher.createNormalizedWeight(query);\n+ docIdSets = new IdentityHashMap<AtomicReader, DocIdSet>();\n this.searcher = searcher;\n+ searchContext.addReleasable(this);\n+\n+ final Weight weight = searcher.createNormalizedWeight(query);\n+ for (final AtomicReaderContext leaf : searcher.getTopReaderContext().leaves()) {\n+ final DocIdSet set = DocIdSets.toCacheable(leaf.reader(), new DocIdSet() {\n+ @Override\n+ public DocIdSetIterator iterator() throws IOException {\n+ return weight.scorer(leaf, true, false, null);\n+ }\n+ @Override\n+ public boolean isCacheable() { return false; }\n+ });\n+ docIdSets.put(leaf.reader(), set);\n+ }\n } else {\n assert searcher == SearchContext.current().searcher();\n }\n+ final DocIdSet set = docIdSets.get(context.reader());\n+ if (set != null && acceptDocs != null) {\n+ return BitsFilteredDocIdSet.wrap(set, acceptDocs);\n+ }\n+ return set;\n+ }\n \n- return new DocIdSet() {\n- @Override\n- public DocIdSetIterator iterator() throws IOException {\n- return weight.scorer(context, true, false, acceptDocs);\n- }\n- @Override\n- public boolean isCacheable() { return false; }\n- };\n+ @Override\n+ public boolean release() throws ElasticsearchException {\n+ // We need to clear the docIdSets, otherwise this is leaved unused\n+ // DocIdSets around and can potentially become a memory leak.\n+ docIdSets = null;\n+ searcher = null;\n+ return true;\n }\n \n @Override\n@@ -81,9 +105,15 @@ public String toString() {\n \n @Override\n public boolean equals(Object o) {\n- if (!(o instanceof CustomQueryWrappingFilter))\n- return false;\n- return this.query.equals(((CustomQueryWrappingFilter)o).query);\n+ if (o == this) {\n+ return true;\n+ }\n+ if (o != null && o instanceof CustomQueryWrappingFilter &&\n+ this.query.equals(((CustomQueryWrappingFilter)o).query)) {\n+ return true;\n+ }\n+\n+ return false;\n }\n \n @Override", "filename": "src/main/java/org/elasticsearch/index/search/child/CustomQueryWrappingFilter.java", "status": "modified" }, { "diff": "@@ -33,6 +33,7 @@\n import org.elasticsearch.common.Priority;\n import org.elasticsearch.common.lucene.search.function.CombineFunction;\n import org.elasticsearch.common.settings.ImmutableSettings;\n+import org.elasticsearch.common.unit.TimeValue;\n import org.elasticsearch.index.mapper.MergeMappingException;\n import org.elasticsearch.index.query.*;\n import org.elasticsearch.index.search.child.ScoreType;\n@@ -2170,6 +2171,61 @@ public void testAdd_ParentFieldAfterIndexingParentDocButBeforeIndexingChildDoc()\n assertSearchHits(searchResponse, \"c1\");\n }\n \n+ @Test\n+ public void testParentChildQueriesViaScrollApi() throws Exception {\n+ client().admin().indices().prepareCreate(\"test\")\n+ .setSettings(ImmutableSettings.settingsBuilder().put(\"index.number_of_shards\", 1).put(\"index.number_of_replicas\", 0))\n+ .execute().actionGet();\n+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();\n+ client().admin()\n+ .indices()\n+ .preparePutMapping(\"test\")\n+ .setType(\"child\")\n+ .setSource(\n+ jsonBuilder().startObject().startObject(\"child\").startObject(\"_parent\").field(\"type\", \"parent\").endObject()\n+ .endObject().endObject()).execute().actionGet();\n+\n+\n+ for (int i = 0; i < 10; i++) {\n+ client().prepareIndex(\"test\", \"parent\", \"p\" + i).setSource(\"{}\").execute().actionGet();\n+ client().prepareIndex(\"test\", \"child\", \"c\" + i).setSource(\"{}\").setParent(\"p\" + i).execute().actionGet();\n+ }\n+\n+ client().admin().indices().prepareRefresh().execute().actionGet();\n+\n+ QueryBuilder[] queries = new QueryBuilder[]{\n+ hasChildQuery(\"child\", matchAllQuery()),\n+ filteredQuery(matchAllQuery(), hasChildFilter(\"child\", matchAllQuery())),\n+ hasParentQuery(\"parent\", matchAllQuery()),\n+ filteredQuery(matchAllQuery(), hasParentFilter(\"parent\", matchAllQuery())),\n+ topChildrenQuery(\"child\", matchAllQuery()).factor(10)\n+ };\n+\n+ for (QueryBuilder query : queries) {\n+ SearchResponse scrollResponse = client().prepareSearch(\"test\")\n+ .setScroll(TimeValue.timeValueSeconds(30))\n+ .setSize(1)\n+ .addField(\"_id\")\n+ .setQuery(query)\n+ .setSearchType(\"scan\")\n+ .execute()\n+ .actionGet();\n+\n+ assertThat(scrollResponse.getFailedShards(), equalTo(0));\n+ assertThat(scrollResponse.getHits().totalHits(), equalTo(10l));\n+\n+ int scannedDocs = 0;\n+ do {\n+ scrollResponse = client()\n+ .prepareSearchScroll(scrollResponse.getScrollId())\n+ .setScroll(TimeValue.timeValueSeconds(30)).execute().actionGet();\n+ assertThat(scrollResponse.getHits().totalHits(), equalTo(10l));\n+ scannedDocs += scrollResponse.getHits().getHits().length;\n+ } while (scrollResponse.getHits().getHits().length > 0);\n+ assertThat(scannedDocs, equalTo(10));\n+ }\n+ }\n+\n private static HasChildFilterBuilder hasChildFilter(String type, QueryBuilder queryBuilder) {\n HasChildFilterBuilder hasChildFilterBuilder = FilterBuilders.hasChildFilter(type, queryBuilder);\n hasChildFilterBuilder.setShortCircuitCutoff(randomInt(10));", "filename": "src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java", "status": "modified" } ] }
{ "body": "Any filter that wraps a p/c filter (has_child & has_parent) either directly or indirectly must never be cached. \n\nThe reason behind this is that the filter-cache caches per segment reader and the p/c filters rely on executing with a top level reader. The p/c filters execute in a two phase search. The first phase collects the parent ids of any document that matches with the wrapped filter or query. The second phase iterates over all parent or child documents and checks if the parent id of each document (for parent docs this the _uid field value and child docs the _parent field value) is in the set of ids collected in the first phase. The second phase executes per segment, but the first phase executed top level. \n\nNote: p/c filters on their own can't already be cached, since the cache options are a no-op in the filter parsers.\n", "comments": [], "number": 4757, "title": "Forcefully never cache any filter that wraps a p/c filter" }
{ "body": "During query parsing if a filter is encountered that extends from NoCacheFilter then the filter will not be given to the filter cache (also not wrapped in FilterCacheFilterWrapper). Also if a filter directly or indirectly wraps a NoCacheFilter then that filter will also not be cached.\n\nThis addition is useful for p/c filters, date range filters that use `NOW` in the range expressions and perhaps other filters.\n\nRelates to #4757\n", "number": 4768, "review_comments": [], "title": "Added no-cache infrastucture the the filter cache." }
{ "commits": [ { "message": "Added no-cache infrastucture the the filter cache.\n\nDuring query parsing if a filter is encountered that extends from NoCacheFilter then the filter will not be given to the filter cache (also not wrapped in FilterCacheFilterWrapper).\nAlso if a filter directly or indirectly wraps a NoCacheFilter then that filter will also not be cached.\n\nRelates to #4757" } ], "files": [ { "diff": "@@ -19,11 +19,61 @@\n \n package org.elasticsearch.common.lucene.search;\n \n+import org.apache.lucene.index.AtomicReaderContext;\n+import org.apache.lucene.search.DocIdSet;\n import org.apache.lucene.search.Filter;\n+import org.apache.lucene.util.Bits;\n+\n+import java.io.IOException;\n \n /**\n * A marker interface for {@link org.apache.lucene.search.Filter} denoting the filter\n * as one that should not be cached, ever.\n */\n public abstract class NoCacheFilter extends Filter {\n+\n+ private static final class NoCacheFilterWrapper extends NoCacheFilter {\n+ private final Filter delegate;\n+ private NoCacheFilterWrapper(Filter delegate) {\n+ this.delegate = delegate;\n+ }\n+\n+ @Override\n+ public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {\n+ return delegate.getDocIdSet(context, acceptDocs);\n+ }\n+\n+ @Override\n+ public int hashCode() {\n+ return delegate.hashCode();\n+ }\n+\n+ @Override\n+ public boolean equals(Object obj) {\n+ if (this == obj) {\n+ return true;\n+ }\n+ if (obj instanceof NoCacheFilterWrapper) {\n+ return delegate.equals(((NoCacheFilterWrapper)obj).delegate);\n+ }\n+ return false;\n+ }\n+\n+ @Override\n+ public String toString() {\n+\n+ return \"no_cache(\" + delegate + \")\";\n+ }\n+\n+ }\n+\n+ /**\n+ * Wraps a filter in a NoCacheFilter or returns it if it already is a NoCacheFilter.\n+ */\n+ public static Filter wrap(Filter filter) {\n+ if (filter instanceof NoCacheFilter) {\n+ return filter;\n+ }\n+ return new NoCacheFilterWrapper(filter);\n+ }\n }\n\\ No newline at end of file", "filename": "src/main/java/org/elasticsearch/common/lucene/search/NoCacheFilter.java", "status": "modified" }, { "diff": "@@ -30,6 +30,7 @@\n import org.elasticsearch.cache.recycler.CacheRecycler;\n import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.ParseField;\n+import org.elasticsearch.common.lucene.search.NoCacheFilter;\n import org.elasticsearch.common.xcontent.XContentParser;\n import org.elasticsearch.index.Index;\n import org.elasticsearch.index.analysis.AnalysisService;\n@@ -75,6 +76,8 @@ public static void removeTypes() {\n \n private final Index index;\n \n+ private boolean propagateNoCache = false;\n+\n IndexQueryParserService indexQueryParser;\n \n private final Map<String, Filter> namedFilters = Maps.newHashMap();\n@@ -168,6 +171,9 @@ public Filter cacheFilter(Filter filter, @Nullable CacheKeyFilter.Key cacheKey)\n if (filter == null) {\n return null;\n }\n+ if (this.propagateNoCache || filter instanceof NoCacheFilter) {\n+ return filter;\n+ }\n if (cacheKey != null) {\n filter = new CacheKeyFilter.Wrapper(filter, cacheKey);\n }\n@@ -251,7 +257,7 @@ public Filter parseInnerFilter() throws IOException, QueryParsingException {\n if (filterParser == null) {\n throw new QueryParsingException(index, \"No filter registered for [\" + filterName + \"]\");\n }\n- Filter result = filterParser.parse(this);\n+ Filter result = executeFilterParser(filterParser);\n if (parser.currentToken() == XContentParser.Token.END_OBJECT || parser.currentToken() == XContentParser.Token.END_ARRAY) {\n // if we are at END_OBJECT, move to the next one...\n parser.nextToken();\n@@ -264,12 +270,17 @@ public Filter parseInnerFilter(String filterName) throws IOException, QueryParsi\n if (filterParser == null) {\n throw new QueryParsingException(index, \"No filter registered for [\" + filterName + \"]\");\n }\n+ return executeFilterParser(filterParser);\n+ }\n+\n+ private Filter executeFilterParser(FilterParser filterParser) throws IOException {\n+ final boolean propagateNoCache = this.propagateNoCache; // first safe the state that we need to restore\n+ this.propagateNoCache = false; // parse the subfilter with caching, that's fine\n Filter result = filterParser.parse(this);\n- // don't move to the nextToken in this case...\n-// if (parser.currentToken() == XContentParser.Token.END_OBJECT || parser.currentToken() == XContentParser.Token.END_ARRAY) {\n-// // if we are at END_OBJECT, move to the next one...\n-// parser.nextToken();\n-// }\n+ // now make sure we set propagateNoCache to true if it is true already or if the result is\n+ // an instance of NoCacheFilter or if we used to be true! all filters above will\n+ // be not cached ie. wrappers of this filter!\n+ this.propagateNoCache |= (result instanceof NoCacheFilter) || propagateNoCache;\n return result;\n }\n ", "filename": "src/main/java/org/elasticsearch/index/query/QueryParseContext.java", "status": "modified" } ] }
{ "body": "If the thread pools of an elasticsearch node cannot be shutdown\nimmediately, a wait of 10 seconds is added. This clashes with the\nRPM scripts, as by default the init functions wait for 3 seconds\nfor a service to shutdown before a KILL signal is sent, resulting\nin an unclean shutdown - not from an elasticsearch point of view,\nbut from init system point of view, as some lock files are left\naround.\n\nIn order to prevent this the init script as well as the systemd\nconfiguration now feature the same timeout than the debian package,\nwhich is 20 seconds.\n\nThe await statement, which causes the 10 second delay can be found in\nInternalNode.close()\n", "comments": [ { "body": "this was fixed in d3fb28cd6bee6240ad386f05f75fcc79b266f13d\n", "created_at": "2014-02-05T12:21:18Z" } ], "number": 5020, "title": "RPMs: Add timeout to shutdown with KILL signal" }
{ "body": "If the thread pools of an elasticsearch node cannot be shutdown\nimmediately, a wait of 10 seconds is added. This clashes with the\nRPM scripts, as by default the init functions wait for 3 seconds\nfor a service to shutdown before a KILL signal is sent, resulting\nin an unclean shutdown - not from an elasticsearch point of view,\nbut from init system point of view, as some lock files are left\naround.\n\nIn order to prevent this the init script as well as the systemd\nconfiguration now feature the same timeout than the debian package,\nwhich is 20 seconds.\n\nThe await statement, which causes the 10 second delay can be found in\nInternalNode.close()\n\nCloses #5020\n", "number": 4721, "review_comments": [], "title": "RPMs: Add timeout to shutdown with KILL signal" }
{ "commits": [ { "message": "RPMs: Add timeout to shutdown with KILL signal\n\nIf the thread pools of an elasticsearch node cannot be shutdown\nimmediately, a wait of 10 seconds is added. This clashes with the\nRPM scripts, as by default the init functions wait for 3 seconds\nfor a service to shutdown before a KILL signal is sent, resulting\nin an unclean shutdown - not from an elasticsearch point of view,\nbut from init system point of view, as some lock files are left\naround.\n\nIn order to prevent this the init script as well as the systemd\nconfiguration now feature the same timeout than the debian package,\nwhich is 20 seconds.\n\nThe await statement, which causes the 10 second delay can be found in\nInternalNode.close()" } ], "files": [ { "diff": "@@ -95,7 +95,7 @@ start() {\n stop() {\n echo -n $\"Stopping $prog: \"\n # stop it here, often \"killproc $prog\"\n- killproc -p $pidfile $prog\n+ killproc -p $pidfile -d 20 $prog\n retval=$?\n echo\n [ $retval -eq 0 ] && rm -f $lockfile", "filename": "src/rpm/init.d/elasticsearch", "status": "modified" }, { "diff": "@@ -13,6 +13,8 @@ ExecStart=/usr/share/elasticsearch/bin/elasticsearch -d -p /var/run/elasticsearc\n LimitNOFILE=65535\n # See MAX_LOCKED_MEMORY in sysconfig, use \"infinity\" when MAX_LOCKED_MEMORY=unlimited and using bootstrap.mlockall: true\n #LimitMEMLOCK=infinity\n+# Shutdown delay in seconds, before process is tried to be killed with KILL (if configured)\n+TimeoutStopSec=20\n \n [Install]\n WantedBy=multi-user.target", "filename": "src/rpm/systemd/elasticsearch.service", "status": "modified" } ] }
{ "body": "```\ncurl -XDELETE localhost:9200/test\ncurl -XPUT localhost:9200/test/test/1 -d '{ \"empty\": {}, \"not_empty\": { \"key\": \"value\" } }'\ncurl localhost:9200/test/test/1/_source\n```\n\nReturns `{ \"empty\": {}, \"not_empty\": { \"key\": \"value\" } }` as expected.\n\n```\ncurl -XDELETE localhost:9200/test\ncurl -XPUT localhost:9200/test\ncurl -XPUT localhost:9200/test/test/_mapping -d '{ \"test\": { \"_source\" : { \"excludes\": [ \"ignored\" ] } } }'\ncurl -XPUT localhost:9200/test/test/1 -d '{ \"empty\": {}, \"not_empty\": { \"key\": \"value\" } }'\ncurl localhost:9200/test/test/1/_source\n```\n\nReturns `{\"not_empty\":{\"key\":\"value\"}}` which is not expected.\n", "comments": [], "number": 4047, "title": "Empty objects are not stored in _source when an include/exclude list is present" }
{ "body": "When excluding '*.f1' from `{ \"obj\": { \"f1\": 1, \"f2\": 2 } }` XContentMapValues.filter returns `{ \"obj\": { \"f2\": 2}}`. When run on `{ \"obj\": { \"f1\" : 1 }}` we should return `{ \"obj\": { }}` to maintain object structure. People currently need to always check whether `obj` is there or not.\n\nCloses #4715\nCloses #4047\nRelated to #4491\n", "number": 4717, "review_comments": [], "title": "excluding all fields of an object should not remove parent." }
{ "commits": [ { "message": "excluding all fields of an object should not remove parent.\n\nWhen excluding '*.f1' from `{ \"obj\": { \"f1\": 1, \"f2\": 2 } }` XContentMapValues.filter returns `{ \"obj\": { \"f2\": 2}}`. When run on `{ \"obj\": { \"f1\" : 1 }}` we should return `{ \"obj\": { }}` to maintain object structure. People currently need to always check whether `obj` is there or not.\n\nCloses #4715\nCloses #4047\nRelated to #4491" }, { "message": "Fixed a partial field test failing due to change of behavior." }, { "message": "Move exactIncludeMatch and pathIsPrefixOfAnInclude initialization to one place for clarity." } ], "files": [ { "diff": "@@ -154,24 +154,18 @@ private static void filter(Map<String, Object> map, Map<String, Object> into, St\n }\n sb.append(key);\n String path = sb.toString();\n- boolean excluded = false;\n- for (String exclude : excludes) {\n- if (Regex.simpleMatch(exclude, path)) {\n- excluded = true;\n- break;\n- }\n- }\n- if (excluded) {\n+\n+ if (Regex.simpleMatch(excludes, path)) {\n sb.setLength(mark);\n continue;\n }\n- boolean exactIncludeMatch;\n+\n+ boolean exactIncludeMatch = false; // true if the current position was specifically mentioned\n+ boolean pathIsPrefixOfAnInclude = false; // true if potentially a sub scope can be included\n if (includes.length == 0) {\n // implied match anything\n exactIncludeMatch = true;\n } else {\n- exactIncludeMatch = false;\n- boolean pathIsPrefixOfAnInclude = false;\n for (String include : includes) {\n // check for prefix matches as well to see if we need to zero in, something like: obj1.arr1.* or *.field\n // note, this does not work well with middle matches, like obj1.*.obj3\n@@ -198,19 +192,20 @@ private static void filter(Map<String, Object> map, Map<String, Object> into, St\n break;\n }\n }\n- if (!pathIsPrefixOfAnInclude && !exactIncludeMatch) {\n- // skip subkeys, not interesting.\n- sb.setLength(mark);\n- continue;\n- }\n+ }\n+\n+ if (!(pathIsPrefixOfAnInclude || exactIncludeMatch)) {\n+ // skip subkeys, not interesting.\n+ sb.setLength(mark);\n+ continue;\n }\n \n \n if (entry.getValue() instanceof Map) {\n Map<String, Object> innerInto = Maps.newHashMap();\n // if we had an exact match, we want give deeper excludes their chance\n filter((Map<String, Object>) entry.getValue(), innerInto, exactIncludeMatch ? Strings.EMPTY_ARRAY : includes, excludes, sb);\n- if (!innerInto.isEmpty()) {\n+ if (exactIncludeMatch || !innerInto.isEmpty()) {\n into.put(entry.getKey(), innerInto);\n }\n } else if (entry.getValue() instanceof List) {", "filename": "src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java", "status": "modified" }, { "diff": "@@ -26,14 +26,14 @@\n import org.elasticsearch.common.xcontent.XContentHelper;\n import org.elasticsearch.common.xcontent.XContentType;\n import org.elasticsearch.test.ElasticsearchTestCase;\n+import org.hamcrest.Matchers;\n import org.junit.Test;\n \n import java.util.Arrays;\n import java.util.HashMap;\n import java.util.List;\n import java.util.Map;\n \n-import static org.hamcrest.MatcherAssert.assertThat;\n import static org.hamcrest.Matchers.*;\n import static org.hamcrest.core.IsEqual.equalTo;\n \n@@ -46,6 +46,7 @@ public void testFilter() throws Exception {\n XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n .field(\"test1\", \"value1\")\n .field(\"test2\", \"value2\")\n+ .field(\"something_else\", \"value3\")\n .endObject();\n \n Map<String, Object> source = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();\n@@ -59,8 +60,9 @@ public void testFilter() throws Exception {\n assertThat(filter.get(\"test2\").toString(), equalTo(\"value2\"));\n \n filter = XContentMapValues.filter(source, Strings.EMPTY_ARRAY, new String[]{\"test1\"});\n- assertThat(filter.size(), equalTo(1));\n+ assertThat(filter.size(), equalTo(2));\n assertThat(filter.get(\"test2\").toString(), equalTo(\"value2\"));\n+ assertThat(filter.get(\"something_else\").toString(), equalTo(\"value3\"));\n \n // more complex object...\n builder = XContentFactory.jsonBuilder().startObject()\n@@ -200,20 +202,6 @@ public void testExtractRawValue() throws Exception {\n assertThat(XContentMapValues.extractRawValues(\"path1.xxx.path2.yyy.test\", map).get(0).toString(), equalTo(\"value\"));\n }\n \n- @Test\n- public void testThatFilteringWithNestedArrayAndExclusionWorks() throws Exception {\n- XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n- .startArray(\"coordinates\")\n- .startArray().value(\"foo\").endArray()\n- .endArray()\n- .endObject();\n-\n- Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n- Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{\"nonExistingField\"});\n-\n- assertThat(mapTuple.v2(), equalTo(filteredSource));\n- }\n-\n @Test\n public void prefixedNamesFilteringTest() {\n Map<String, Object> map = new HashMap<String, Object>();\n@@ -368,4 +356,101 @@ public void filterWithEmptyIncludesExcludes() {\n assertThat(filteredMap.get(\"field\").toString(), equalTo(\"value\"));\n \n }\n+\n+ @SuppressWarnings({\"unchecked\"})\n+ @Test\n+ public void testThatFilterIncludesEmptyObjectWhenUsingIncludes() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{\"obj\"}, Strings.EMPTY_ARRAY);\n+\n+ assertThat(mapTuple.v2(), equalTo(filteredSource));\n+ }\n+\n+ @Test\n+ public void testThatFilterIncludesEmptyObjectWhenUsingExcludes() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{\"nonExistingField\"});\n+\n+ assertThat(mapTuple.v2(), equalTo(filteredSource));\n+ }\n+\n+ @Test\n+ public void testNotOmittingObjectsWithExcludedProperties() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj\")\n+ .field(\"f1\", \"v1\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{\"obj.f1\"});\n+\n+ assertThat(filteredSource.size(), equalTo(1));\n+ assertThat(filteredSource, hasKey(\"obj\"));\n+ assertThat(((Map) filteredSource.get(\"obj\")).size(), equalTo(0));\n+ }\n+\n+ @SuppressWarnings({\"unchecked\"})\n+ @Test\n+ public void testNotOmittingObjectWithNestedExcludedObject() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj1\")\n+ .startObject(\"obj2\")\n+ .startObject(\"obj3\")\n+ .endObject()\n+ .endObject()\n+ .endObject()\n+ .endObject();\n+\n+ // implicit include\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{\"*.obj2\"});\n+\n+ assertThat(filteredSource.size(), equalTo(1));\n+ assertThat(filteredSource, hasKey(\"obj1\"));\n+ assertThat(((Map) filteredSource.get(\"obj1\")).size(), Matchers.equalTo(0));\n+\n+ // explicit include\n+ filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{\"obj1\"}, new String[]{\"*.obj2\"});\n+ assertThat(filteredSource.size(), equalTo(1));\n+ assertThat(filteredSource, hasKey(\"obj1\"));\n+ assertThat(((Map) filteredSource.get(\"obj1\")).size(), Matchers.equalTo(0));\n+\n+ // wild card include\n+ filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{\"*.obj2\"}, new String[]{\"*.obj3\"});\n+ assertThat(filteredSource.size(), equalTo(1));\n+ assertThat(filteredSource, hasKey(\"obj1\"));\n+ assertThat(((Map<String, Object>) filteredSource.get(\"obj1\")), hasKey(\"obj2\"));\n+ assertThat(((Map) ((Map) filteredSource.get(\"obj1\")).get(\"obj2\")).size(), Matchers.equalTo(0));\n+ }\n+\n+ @SuppressWarnings({\"unchecked\"})\n+ @Test\n+ public void testIncludingObjectWithNestedIncludedObject() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj1\")\n+ .startObject(\"obj2\")\n+ .endObject()\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{\"*.obj2\"}, Strings.EMPTY_ARRAY);\n+\n+ assertThat(filteredSource.size(), equalTo(1));\n+ assertThat(filteredSource, hasKey(\"obj1\"));\n+ assertThat(((Map) filteredSource.get(\"obj1\")).size(), equalTo(1));\n+ assertThat(((Map<String, Object>) filteredSource.get(\"obj1\")), hasKey(\"obj2\"));\n+ assertThat(((Map) ((Map) filteredSource.get(\"obj1\")).get(\"obj2\")).size(), equalTo(0));\n+ }\n }", "filename": "src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java", "status": "modified" }, { "diff": "@@ -253,7 +253,7 @@ public void testPartialFields() throws Exception {\n \n SearchResponse response = client().prepareSearch(\"test\")\n .addPartialField(\"partial1\", \"obj1.arr1.*\", null)\n- .addPartialField(\"partial2\", null, \"obj1.*\")\n+ .addPartialField(\"partial2\", null, \"obj1\")\n .execute().actionGet();\n assertThat(\"Failures \" + Arrays.toString(response.getShardFailures()), response.getShardFailures().length, equalTo(0));\n ", "filename": "src/test/java/org/elasticsearch/search/fields/SearchFieldsTests.java", "status": "modified" } ] }
{ "body": "Multi data path config support writes a file to a data location based on the available size (by default). There is a Lucene file called `segments.gen` that has the same name, and only in that case, we need to make sure we alway write it to the same data location, otherwise, the index will have multiple segments.gen files, and the shard can seem to be corrupted.\n\nThe message if this case happens is that segments_xxx file was not found, in which case, a find for segments.gen can yield multiple files. Deleting the segments.gen files will cause the shard to recover properly (as its an extra protection layer to resolve the segments header by Lucene)\n", "comments": [ { "body": "Hi,\n\nI was using ES 0.90.7 and my shards were after restart of the cluster going missing. Physically, I can see the files (in 6 cases out of 8) still on the disk, but the shards won't come up with the indication of a segments_X file missing. I have followed your advice of removing the segments.gen file in order to recover, but the shards are not coming up. Even after restart of the cluster, the shards are not coming up, the error is now:\n\n[2014-01-15 18:46:00,504][DEBUG][cluster.service ] [Synch] processing [shard-failed ([1millionnewv2][4], node[fWigetX5QNar2zIpvkEK_Q], [P], s[INITIALIZING]), reas\non [Failed to start shard, message [IndexShardGatewayRecoveryException[[1millionnewv2][4] failed to fetch index version after copying it over]; nested: IndexShardGatewayRe\ncoveryException[[1millionnewv2][4] shard allocated for local recovery (post api), should exist, but doesn't, current files: [_s41.nvd, _twd_es090_0.tip, _um4.nvm,... ]]; nested: IndexNotF\noundException[no segments\\* file found in store(least_used[rate_limited(niofs(/0/elasticsearch/elasticsearch/nodes/0/indices/1millionnewv2/4/index), type=MERGE, rate=20.0),\n rate_limited(niofs(/1/elasticsearch/elasticsearch/nodes/0/indices/1millionnewv2/4/index), type=MERGE, rate=20.0), rate_limited(niofs(/2/elasticsearch/elasticsearch/nodes/\n0/indices/1millionnewv2/4/index), type=MERGE, rate=20.0), rate_limited(niofs(/3/elasticsearch/elasticsearch/nodes/0/indices/1millionnewv2/4/index), type=MERGE, rate=20.0)]\n): files: [ .... ] ]]]: no change in cluster_state\n\nWhat can I do if the usual strategy of the segments.gen file removal doesn't work? I have even updated the ES to the latest 0.90.10 version but the restart problems are still the same and the shards are not coming up. I have lost a considerable amount of data, in those 2 shards that were wiped out, but I could still save a lot of data in the 6 shards I am could recover.\n", "created_at": "2014-01-15T18:05:41Z" }, { "body": "@sbarton are you sure you deleted the segments.gen from all data directories for the relevant shard ([1millionnewv2][4]) (or, better yet, just delete it recursively across all data locations)? the failure suggests you potentially didn't.\n", "created_at": "2014-01-20T15:57:20Z" }, { "body": "@kimchy I have made sure I removed the segments.gen from all data directories (using find command) but still no luck. I at the end gave up (I had more shards in the same situation on other machines - tried the same approach but none of them came back) and re-indexed the whole thing once again. But I can say, that the 0.90.10 version is not loosing shards even after several harsh restarts.\n", "created_at": "2014-02-06T09:35:48Z" } ], "number": 4674, "title": "Multi data path config can cause a shard to be perceived as corrupted" }
{ "body": "Multi data path config support writes a file to a data location based on the available size (by default). There is a Lucene file called segments.gen that has the same name, and only in that case, we need to make sure we alway write it to the same data location, otherwise, the index will have multiple segments.gen files, and the shard can seem to be corrupted.\n\nThe message if this case happens is that segments_xxx file was not found, in which case, a find for segments.gen can yield multiple files. Deleting the segments.gen files will cause the shard to recover properly (as its an extra protection layer to resolve the segments header by Lucene)\n\nMake sure the segments.gen file is writtne to the same directory every time\nfixes #4674\n", "number": 4676, "review_comments": [], "title": "Multi data path config can cause a shard to be perceived as corrupted" }
{ "commits": [ { "message": "Multi data path config can cause a shard to be perceived as corrupted\nMulti data path config support writes a file to a data location based on the available size (by default). There is a Lucene file called segments.gen that has the same name, and only in that case, we need to make sure we alway write it to the same data location, otherwise, the index will have multiple segments.gen files, and the shard can seem to be corrupted.\n\nThe message if this case happens is that segments_xxx file was not found, in which case, a find for segments.gen can yield multiple files. Deleting the segments.gen files will cause the shard to recover properly (as its an extra protection layer to resolve the segments header by Lucene)\n\nMake sure the segments.gen file is writtne to the same directory every time\nfixes #4674" } ], "files": [ { "diff": "@@ -22,6 +22,7 @@\n import com.google.common.collect.ImmutableList;\n import com.google.common.collect.ImmutableMap;\n import com.google.common.collect.Maps;\n+import org.apache.lucene.index.IndexFileNames;\n import org.apache.lucene.store.*;\n import org.apache.lucene.util.IOUtils;\n import org.elasticsearch.common.Nullable;\n@@ -426,7 +427,9 @@ public IndexOutput createOutput(String name, IOContext context) throws IOExcepti\n public IndexOutput createOutput(String name, IOContext context, boolean raw) throws IOException {\n ensureOpen();\n Directory directory;\n- if (isChecksum(name)) {\n+ // we want to write the segments gen file to the same directory *all* the time\n+ // to make sure we don't create multiple copies of it\n+ if (isChecksum(name) || IndexFileNames.SEGMENTS_GEN.equals(name)) {\n directory = distributor.primary();\n } else {\n directory = distributor.any();\n@@ -441,7 +444,7 @@ public IndexOutput createOutput(String name, IOContext context, boolean raw) thr\n boolean computeChecksum = !raw;\n if (computeChecksum) {\n // don't compute checksum for segment based files\n- if (\"segments.gen\".equals(name) || name.startsWith(\"segments\")) {\n+ if (IndexFileNames.SEGMENTS_GEN.equals(name) || name.startsWith(IndexFileNames.SEGMENTS)) {\n computeChecksum = false;\n }\n }\n@@ -562,7 +565,7 @@ public void sync(Collection<String> names) throws IOException {\n }\n for (String name : names) {\n // write the checksums file when we sync on the segments file (committed)\n- if (!name.equals(\"segments.gen\") && name.startsWith(\"segments\")) {\n+ if (!name.equals(IndexFileNames.SEGMENTS_GEN) && name.startsWith(IndexFileNames.SEGMENTS)) {\n writeChecksums();\n break;\n }", "filename": "src/main/java/org/elasticsearch/index/store/Store.java", "status": "modified" } ] }
{ "body": "If a `type` or `path` is missing in the REST test yaml file, it is automatically replaced with `_all`. This makes it hard to test changes in the api, for example adding the possibility to leave the index blank in addition to `_all` and `*` in the uri. \n", "comments": [], "number": 4657, "title": "remove default `_all` for `type` and `index` if these are missing in REST tests" }
{ "body": "...T tests\n\nIf a type or path is missing in the REST test yaml file, it is\nautomatically replaced with _all. This makes it hard to test changes\nin the api, for example adding the possibility to leave the index\nblank in addition to _all and \\* in the uri.\n\ncloses #4657\n", "number": 4658, "review_comments": [], "title": "remove default `_all` for `type` and `index` if these are missing in RES..." }
{ "commits": [ { "message": "remove default `_all` for `type` and `index` if these are missing in REST tests\n\nIf a type or path is missing in the REST test yaml file, it is\nautomatically replaced with _all. This makes it hard to test changes\nin the api, for example adding the possibility to leave the index\nblank in addition to _all and * in the uri.\n\ncloses #4657" }, { "message": "remove TODO,that never happens" } ], "files": [ { "diff": "@@ -18,8 +18,7 @@\n },\n \"type\": {\n \"type\" : \"string\",\n- \"required\" : false,\n- \"default\" : \"_all\",\n+ \"required\" : true,\n \"description\" : \"The type of the document (use `_all` to fetch the first document matching the ID across all types)\"\n }\n },", "filename": "rest-api-spec/api/exists.json", "status": "modified" }, { "diff": "@@ -18,8 +18,7 @@\n },\n \"type\": {\n \"type\" : \"string\",\n- \"required\" : false,\n- \"default\" : \"_all\",\n+ \"required\" : true,\n \"description\" : \"The type of the document (use `_all` to fetch the first document matching the ID across all types)\"\n }\n },", "filename": "rest-api-spec/api/get.json", "status": "modified" }, { "diff": "@@ -18,8 +18,7 @@\n },\n \"type\": {\n \"type\" : \"string\",\n- \"required\" : false,\n- \"default\" : \"_all\",\n+ \"required\" : true,\n \"description\" : \"The type of the document; use `_all` to fetch the first document matching the ID across all types\"\n }\n },", "filename": "rest-api-spec/api/get_source.json", "status": "modified" }, { "diff": "@@ -8,7 +8,6 @@\n \"parts\": {\n \"index\": {\n \"type\" : \"list\",\n- \"default\" : \"_all\",\n \"description\" : \"A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices\"\n },\n \"type\": {", "filename": "rest-api-spec/api/search.json", "status": "modified" }, { "diff": "@@ -11,6 +11,7 @@\n - do:\n exists:\n index: test_1\n+ type: _all\n id: 1\n \n - is_true: ''", "filename": "rest-api-spec/test/exists/70_defaults.yaml", "status": "modified" }, { "diff": "@@ -22,6 +22,7 @@\n - do:\n get:\n index: test_1\n+ type: _all\n id: 中文\n \n - match: { _index: test_1 }", "filename": "rest-api-spec/test/get/10_basic.yaml", "status": "modified" }, { "diff": "@@ -11,6 +11,7 @@\n - do:\n get:\n index: test_1\n+ type: _all\n id: 1\n \n - match: { _index: test_1 }", "filename": "rest-api-spec/test/get/15_default_values.yaml", "status": "modified" }, { "diff": "@@ -18,6 +18,7 @@\n - do:\n get_source:\n index: test_1\n+ type: _all\n id: 1\n \n - match: { '': { foo: bar } }", "filename": "rest-api-spec/test/get_source/10_basic.yaml", "status": "modified" }, { "diff": "@@ -10,6 +10,7 @@\n - do:\n get_source:\n index: test_1\n+ type: _all\n id: 1\n \n - match: { '': { foo: bar } }", "filename": "rest-api-spec/test/get_source/15_default_values.yaml", "status": "modified" }, { "diff": "@@ -26,7 +26,8 @@\n \n - do:\n search:\n- type: test\n+ index: _all\n+ type: test\n body:\n query:\n match:", "filename": "rest-api-spec/test/search/20_default_values.yaml", "status": "modified" }, { "diff": "@@ -36,8 +36,6 @@\n */\n public class RestApi {\n \n- private static final String ALL = \"_all\";\n-\n private final String name;\n private List<String> methods = Lists.newArrayList();\n private List<String> paths = Lists.newArrayList();\n@@ -123,18 +121,18 @@ public String getFinalPath(Map<String, String> pathParams) {\n RestPath matchingRestPath = findMatchingRestPath(pathParams.keySet());\n String path = matchingRestPath.path;\n for (Map.Entry<String, String> paramEntry : matchingRestPath.params.entrySet()) {\n- //replace path placeholders with actual values\n+ // replace path placeholders with actual values\n String value = pathParams.get(paramEntry.getValue());\n if (value == null) {\n- //there might be additional placeholder to replace, not available as input params\n- //it can only be {index} or {type} to be replaced with _all\n- if (paramEntry.getValue().equals(\"index\") || paramEntry.getValue().equals(\"type\")) {\n- value = ALL;\n- } else {\n- throw new IllegalArgumentException(\"path [\" + path + \"] contains placeholders that weren't replaced with proper values\");\n- }\n+ // if a value is missing, we got the wrong path or the test was\n+ // specified incorrectly\n+ // TODO: What if more than one path exists? for example: PUT\n+ // index/type/_mapping vs. PUT index/_maping/type? Should we\n+ // randomize?\n+ throw new IllegalArgumentException(\"parameter [\" + paramEntry.getValue() + \"] missing\");\n+ } else {\n+ path = path.replace(paramEntry.getKey(), value);\n }\n- path = path.replace(paramEntry.getKey(), value);\n }\n return path;\n }", "filename": "src/test/java/org/elasticsearch/test/rest/spec/RestApi.java", "status": "modified" } ] }
{ "body": "Sometimes if there are very small number of nodes compared to large number of shards and indices deltas between nodes are very close to the default threshold `1.0` but due to the fact that we use floats we might end up with a weight of `1.000000001` which then in-turn triggers a relocation which is unnecessary and is kind of `undone` in the next iteration due to the same issue. \n", "comments": [], "number": 4630, "title": "BalancedShardAllocator might trigger unnecessary relocation under rare circumstances if deltas are very close to the threshold due to rounding issues." }
{ "body": "Adding a small value to the threshold prevents weight deltas that are\nvery very close to the threshold to not trigger relocations. These\ndeltas can be rounding errors that lead to unnecessary relocations. In\npractice this might only happen under very rare circumstances.\nIn general it's a good idea for the shard allocator to be a bit\nmore conservative in terms of rebalancing since in general relocation\ncosts are pretty high.\n\nCloses #4630\n", "number": 4631, "review_comments": [ { "body": "I've noticed these flip flopping some. I think someone had \"use star if >3 imports per package\" and I ended up setting that. I see you do the more normal \"never use star\". This isn't a big deal and I dunno if it is worth having a standard. The only real way to enforce these standards is with something like checkstyle because reviewers forget.\n", "created_at": "2014-01-06T15:19:37Z" }, { "body": "we try to be practical when it comes to code style... some things we're strict about, other less. I think with today's idea's the import styles are quite meaningless (I personally have time folded all the time and never find the need to look at them), so whether one uses \\* or use explicit imports... I think it's really not a big deal (I think I personally left the default intellij behaviour where imports are collapsed above a certain threshold)\n", "created_at": "2014-01-06T15:45:24Z" }, { "body": "Hmm, a fuzzy `lessThan` here may move us further from the balance... Maybe it can have bad effects?\n", "created_at": "2014-01-06T15:57:25Z" }, { "body": "When I first started contributing that was something I thought about. I tried to puzzle out what setting would cause the least churn and still let Eclipse handle all the imports for me. I set the setting and then pretty much ignored it.\n", "created_at": "2014-01-06T15:58:51Z" }, { "body": "this loop is only to find the shard that makes the biggest impact the decision that we rebalace is already made. I don't think it can have any bad effect here but it won't make a difference either so I will just remove it.\n", "created_at": "2014-01-06T16:09:22Z" } ], "title": "Use a tolerance to decide if a value is less than the threshold" }
{ "commits": [ { "message": "Use a tolerance to decide if a value is less than the threshold\n\nAdding a small value to the threshold prevents weight deltas that are\nvery very close to the threshold to not trigger relocations. These\ndeltas can be rounding errors that lead to unnecessary relocations. In\npractice this might only happen under very rare circumstances.\nIn general it's a good idea for the shard allocator to be a bit\nmore conversavtive in terms of rebalancing since in general relocation\ncosts are pretty high.\n\nCloses #4630" } ], "files": [ { "diff": "@@ -24,7 +24,10 @@\n import org.apache.lucene.util.IntroSorter;\n import org.elasticsearch.ElasticSearchIllegalArgumentException;\n import org.elasticsearch.cluster.metadata.MetaData;\n-import org.elasticsearch.cluster.routing.*;\n+import org.elasticsearch.cluster.routing.MutableShardRouting;\n+import org.elasticsearch.cluster.routing.RoutingNode;\n+import org.elasticsearch.cluster.routing.RoutingNodes;\n+import org.elasticsearch.cluster.routing.ShardRoutingState;\n import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;\n import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;\n import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;\n@@ -345,6 +348,13 @@ private boolean initialize(RoutingNodes routing, RoutingNodes.UnassignedShards u\n return allocateUnassigned(unassigned, routing.ignoredUnassigned());\n }\n \n+ private static boolean lessThan(float delta, float threshold) {\n+ /* deltas close to the threshold are \"rounded\" to the threshold manually\n+ to prevent floating point problems if the delta is very close to the\n+ threshold ie. 1.000000002 which can trigger unnecessary balance actions*/\n+ return delta <= threshold + 0.001f;\n+ }\n+\n /**\n * Balances the nodes on the cluster model according to the weight\n * function. The configured threshold is the minimum delta between the\n@@ -384,8 +394,8 @@ public boolean balance() {\n advance_range:\n if (maxNode.numShards(index) > 0) {\n float delta = weights[highIdx] - weights[lowIdx];\n- delta = delta <= threshold ? delta : sorter.weight(Operation.THRESHOLD_CHECK, maxNode) - sorter.weight(Operation.THRESHOLD_CHECK, minNode);\n- if (delta <= threshold) {\n+ delta = lessThan(delta, threshold) ? delta : sorter.weight(Operation.THRESHOLD_CHECK, maxNode) - sorter.weight(Operation.THRESHOLD_CHECK, minNode);\n+ if (lessThan(delta, threshold)) {\n if (lowIdx > 0 && highIdx-1 > 0 // is there a chance for a higher delta?\n && (weights[highIdx-1] - weights[0] > threshold) // check if we need to break at all\n ) {\n@@ -412,7 +422,7 @@ public boolean balance() {\n maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), weights[lowIdx], delta);\n }\n /* pass the delta to the replication function to prevent relocations that only swap the weights of the two nodes.\n- * a relocation must bring us closer to the balance if we only achive the same delta the relocation is useless */\n+ * a relocation must bring us closer to the balance if we only achieve the same delta the relocation is useless */\n if (tryRelocateShard(Operation.BALANCE, minNode, maxNode, index, delta)) {\n /*\n * TODO we could be a bit smarter here, we don't need to fully sort necessarily", "filename": "src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java", "status": "modified" }, { "diff": "@@ -34,7 +34,9 @@\n import org.elasticsearch.test.ElasticsearchAllocationTestCase;\n import org.junit.Test;\n \n-import java.util.*;\n+import java.util.ArrayList;\n+import java.util.Collections;\n+import java.util.List;\n \n import static org.elasticsearch.cluster.routing.ShardRoutingState.*;\n import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;\n@@ -209,7 +211,7 @@ public void testRandom() {\n nodesBuilder.put(node);\n }\n clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();\n- clusterState = stabelize(clusterState, service);\n+ clusterState = stabilize(clusterState, service);\n }\n }\n \n@@ -247,28 +249,28 @@ public void testRollingRestart() {\n .put(newNode(\"old0\", getPreviousVersion()))\n .put(newNode(\"old1\", getPreviousVersion()))\n .put(newNode(\"old2\", getPreviousVersion()))).build();\n- clusterState = stabelize(clusterState, service);\n+ clusterState = stabilize(clusterState, service);\n \n clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()\n .put(newNode(\"old0\", getPreviousVersion()))\n .put(newNode(\"old1\", getPreviousVersion()))\n .put(newNode(\"new0\"))).build();\n \n- clusterState = stabelize(clusterState, service);\n+ clusterState = stabilize(clusterState, service);\n \n clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()\n .put(newNode(\"node0\", getPreviousVersion()))\n .put(newNode(\"new1\"))\n .put(newNode(\"new0\"))).build();\n \n- clusterState = stabelize(clusterState, service);\n+ clusterState = stabilize(clusterState, service);\n \n clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()\n .put(newNode(\"new2\"))\n .put(newNode(\"new1\"))\n .put(newNode(\"new0\"))).build();\n \n- clusterState = stabelize(clusterState, service);\n+ clusterState = stabilize(clusterState, service);\n routingTable = clusterState.routingTable();\n for (int i = 0; i < routingTable.index(\"test\").shards().size(); i++) {\n assertThat(routingTable.index(\"test\").shard(i).shards().size(), equalTo(3));\n@@ -281,38 +283,30 @@ public void testRollingRestart() {\n }\n }\n \n- private ClusterState stabelize(ClusterState clusterState, AllocationService service) {\n+ private ClusterState stabilize(ClusterState clusterState, AllocationService service) {\n logger.trace(\"RoutingNodes: {}\", clusterState.routingNodes().prettyPrint());\n \n RoutingTable routingTable = service.reroute(clusterState).routingTable();\n clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n RoutingNodes routingNodes = clusterState.routingNodes();\n assertRecoveryNodeVersions(routingNodes);\n \n- logger.info(\"start all the primary shards, replicas will start initializing\");\n- routingNodes = clusterState.routingNodes();\n- routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();\n- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n- routingNodes = clusterState.routingNodes();\n- assertRecoveryNodeVersions(routingNodes);\n-\n- logger.info(\"start the replica shards\");\n- routingNodes = clusterState.routingNodes();\n- routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();\n- clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n- routingNodes = clusterState.routingNodes();\n logger.info(\"complete rebalancing\");\n RoutingTable prev = routingTable;\n- while (true) {\n+ boolean stable = false;\n+ for (int i = 0; i < 1000; i++) { // at most 200 iters - this should be enough for all tests\n logger.trace(\"RoutingNodes: {}\", clusterState.getRoutingNodes().prettyPrint());\n routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();\n clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n routingNodes = clusterState.routingNodes();\n- if (routingTable == prev)\n+ if (stable = (routingTable == prev)) {\n break;\n+ }\n assertRecoveryNodeVersions(routingNodes);\n prev = routingTable;\n }\n+ logger.info(\"stabilized success [{}]\", stable);\n+ assertThat(stable, is(true));\n return clusterState;\n }\n ", "filename": "src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java", "status": "modified" } ] }
{ "body": "Double wildcards with non-matching index pattern can cause [Regex#simpleMatch](https://github.com/elasticsearch/elasticsearch/blob/6a04c169326ab99c1e5b4eef6f9fdbed222b5fa0/src/main/java/org/elasticsearch/common/regex/Regex.java#L81) to go into infinite loop. To reproduce, call `Regex.simpleMatch(\"**ddd\", \"fff\")`.\n", "comments": [ { "body": "LGTM\n", "created_at": "2014-01-03T18:57:17Z" } ], "number": 4610, "title": "Double wildcards in the the index name can cause a request to hang" }
{ "body": "Fixes #4610\n", "number": 4611, "review_comments": [], "title": "Fix potential infinite loop in double wildcard processing" }
{ "commits": [ { "message": "Fix potential infinite loop in double wildcard processing\n\nFixes #4610" } ], "files": [ { "diff": "@@ -71,6 +71,9 @@ public static boolean simpleMatch(String pattern, String str) {\n int nextIndex = pattern.indexOf('*', firstIndex + 1);\n if (nextIndex == -1) {\n return str.endsWith(pattern.substring(1));\n+ } else if (nextIndex == 1) {\n+ // Double wildcard \"**\" - skipping the first \"*\"\n+ return simpleMatch(pattern.substring(1), str);\n }\n String part = pattern.substring(1, nextIndex);\n int partIndex = str.indexOf(part);", "filename": "src/main/java/org/elasticsearch/common/regex/Regex.java", "status": "modified" }, { "diff": "@@ -25,25 +25,26 @@\n import java.util.regex.Pattern;\n \n import static org.hamcrest.Matchers.equalTo;\n+\n public class RegexTests extends ElasticsearchTestCase {\n \n @Test\n public void testFlags() {\n- String[] supportedFlags = new String[] { \"CASE_INSENSITIVE\", \"MULTILINE\", \"DOTALL\", \"UNICODE_CASE\", \"CANON_EQ\", \"UNIX_LINES\",\n- \"LITERAL\", \"COMMENTS\", \"UNICODE_CHAR_CLASS\" };\n- int[] flags = new int[] { Pattern.CASE_INSENSITIVE, Pattern.MULTILINE, Pattern.DOTALL, Pattern.UNICODE_CASE, Pattern.CANON_EQ,\n- Pattern.UNIX_LINES, Pattern.LITERAL, Pattern.COMMENTS, Regex.UNICODE_CHARACTER_CLASS };\n+ String[] supportedFlags = new String[]{\"CASE_INSENSITIVE\", \"MULTILINE\", \"DOTALL\", \"UNICODE_CASE\", \"CANON_EQ\", \"UNIX_LINES\",\n+ \"LITERAL\", \"COMMENTS\", \"UNICODE_CHAR_CLASS\"};\n+ int[] flags = new int[]{Pattern.CASE_INSENSITIVE, Pattern.MULTILINE, Pattern.DOTALL, Pattern.UNICODE_CASE, Pattern.CANON_EQ,\n+ Pattern.UNIX_LINES, Pattern.LITERAL, Pattern.COMMENTS, Regex.UNICODE_CHARACTER_CLASS};\n Random random = getRandom();\n int num = 10 + random.nextInt(100);\n for (int i = 0; i < num; i++) {\n- int numFlags = random.nextInt(flags.length+1);\n+ int numFlags = random.nextInt(flags.length + 1);\n int current = 0;\n StringBuilder builder = new StringBuilder();\n for (int j = 0; j < numFlags; j++) {\n int index = random.nextInt(flags.length);\n current |= flags[index];\n builder.append(supportedFlags[index]);\n- if (j < numFlags-1) {\n+ if (j < numFlags - 1) {\n builder.append(\"|\");\n }\n }\n@@ -53,4 +54,18 @@ public void testFlags() {\n Pattern.compile(\"\\\\w\\\\d{1,2}\", current); // accepts the flags?\n }\n }\n+\n+ @Test(timeout = 1000)\n+ public void testDoubleWildcardMatch() {\n+ assertTrue(Regex.simpleMatch(\"ddd\", \"ddd\"));\n+ assertTrue(Regex.simpleMatch(\"d*d*d\", \"dadd\"));\n+ assertTrue(Regex.simpleMatch(\"**ddd\", \"dddd\"));\n+ assertFalse(Regex.simpleMatch(\"**ddd\", \"fff\"));\n+ assertTrue(Regex.simpleMatch(\"fff*ddd\", \"fffabcddd\"));\n+ assertTrue(Regex.simpleMatch(\"fff**ddd\", \"fffabcddd\"));\n+ assertFalse(Regex.simpleMatch(\"fff**ddd\", \"fffabcdd\"));\n+ assertTrue(Regex.simpleMatch(\"fff*******ddd\", \"fffabcddd\"));\n+ assertFalse(Regex.simpleMatch(\"fff******ddd\", \"fffabcdd\"));\n+ }\n+\n }\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/common/regex/RegexTests.java", "status": "modified" } ] }
{ "body": "Closing all indices doesnt' work when doing:\n\n```\ncurl -XPOST localhost:9200/*/_close\n```\n\nalthough it works using `_all` or using a wildcard expression like `index*`.\n", "comments": [], "number": 4564, "title": "Closing all indices doesn't work when using wildcard only" }
{ "body": "Named wildcards were not always properly replaced with proper values by PathTrie.\nDelete index (`curl -XDELETE localhost:9200/*`) worked anyway as the named wildcard is the last path element. \nWhen the named wildcard wasn't the last path element (e.g. `curl -XPOST localhost:29200/*/_close`), the variable didn't get replaced with the current '*' value, but with the empty string, which lead to an error as empty index is not allowed by open/close index.\n\nCloses #4564\n", "number": 4606, "review_comments": [], "title": "Fixed open/close index api when using wildcard only" }
{ "commits": [ { "message": "Fixed open/close index api when using wildcard only\n\nNamed wildcards were not always properly replaced with proper values by PathTrie.\nDelete index (curl -XDELETE localhost:9200/*) worked anyway as the named wildcard is the last path element (and even if {index} didn't get replaced with '*', the empty string would have mapped to all indices anyway). When the named wildcard wasn't the last path element (e.g. curl -XPOST localhost:29200/*/_close), the variable didn't get replaced with the current '*' value, but with the empty string, which leads to an error as empty index is not allowed by open/close index.\n\nCloses #4564" } ], "files": [ { "diff": "@@ -0,0 +1,82 @@\n+setup:\n+ - do:\n+ indices.create:\n+ index: test_index1\n+ - do:\n+ indices.create:\n+ index: test_index2\n+ - do:\n+ indices.create:\n+ index: test_index3\n+ - do:\n+ cluster.health:\n+ wait_for_status: yellow\n+\n+---\n+\"All indices\":\n+ - do:\n+ indices.close:\n+ index: _all\n+\n+ - do:\n+ catch: forbidden\n+ search:\n+ index: test_index2\n+\n+ - do:\n+ indices.open:\n+ index: _all\n+\n+ - do:\n+ cluster.health:\n+ wait_for_status: yellow\n+\n+ - do:\n+ search:\n+ index: test_index2\n+\n+---\n+\"Trailing wildcard\":\n+ - do:\n+ indices.close:\n+ index: test_*\n+\n+ - do:\n+ catch: forbidden\n+ search:\n+ index: test_index2\n+\n+ - do:\n+ indices.open:\n+ index: test_*\n+\n+ - do:\n+ cluster.health:\n+ wait_for_status: yellow\n+\n+ - do:\n+ search:\n+ index: test_index2\n+\n+---\n+\"Only wildcard\":\n+ - do:\n+ indices.close:\n+ index: '*'\n+\n+ - do:\n+ catch: forbidden\n+ search:\n+ index: test_index3\n+\n+ - do:\n+ indices.open:\n+ index: '*'\n+\n+ - do:\n+ cluster.health:\n+ wait_for_status: yellow\n+\n+ - do:\n+ search:\n+ index: test_index3\n\\ No newline at end of file", "filename": "rest-api-spec/test/indices.open/20_multiple_indices.yaml", "status": "added" }, { "diff": "@@ -157,42 +157,39 @@ public T retrieve(String[] path, int index, Map<String, String> params) {\n \n String token = path[index];\n TrieNode<T> node = children.get(token);\n- boolean usedWildcard = false;\n+ boolean usedWildcard;\n if (node == null) {\n node = children.get(wildcard);\n if (node == null) {\n return null;\n- } else {\n- usedWildcard = true;\n- if (params != null && node.isNamedWildcard()) {\n- put(params, node.namedWildcard(), token);\n- }\n }\n+ usedWildcard = true;\n+ } else {\n+ usedWildcard = token.equals(wildcard);\n }\n \n+ put(params, node, token);\n+\n if (index == (path.length - 1)) {\n- if (params != null && node.isNamedWildcard()) {\n- put(params, node.namedWildcard(), token);\n- }\n return node.value;\n }\n \n T res = node.retrieve(path, index + 1, params);\n if (res == null && !usedWildcard) {\n node = children.get(wildcard);\n if (node != null) {\n- if (params != null && node.isNamedWildcard()) {\n- put(params, node.namedWildcard(), token);\n- }\n+ put(params, node, token);\n res = node.retrieve(path, index + 1, params);\n }\n }\n \n return res;\n }\n \n- private void put(Map<String, String> params, String key, String value) {\n- params.put(key, decoder.decode(value));\n+ private void put(Map<String, String> params, TrieNode<T> node, String value) {\n+ if (params != null && node.isNamedWildcard()) {\n+ params.put(node.namedWildcard(), decoder.decode(value));\n+ }\n }\n }\n ", "filename": "src/main/java/org/elasticsearch/common/path/PathTrie.java", "status": "modified" }, { "diff": "@@ -25,7 +25,6 @@\n import java.util.Map;\n \n import static com.google.common.collect.Maps.newHashMap;\n-import static org.hamcrest.MatcherAssert.assertThat;\n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.nullValue;\n \n@@ -113,11 +112,32 @@ public void testPreferNonWildcardExecution() {\n }\n \n @Test\n- public void testEndWithNamedWildcardAndLookupWithWildcard() {\n+ public void testNamedWildcardAndLookupWithWildcard() {\n PathTrie<String> trie = new PathTrie<String>();\n trie.insert(\"x/{test}\", \"test1\");\n+ trie.insert(\"{test}/a\", \"test2\");\n+ trie.insert(\"/{test}\", \"test3\");\n+ trie.insert(\"/{test}/_endpoint\", \"test4\");\n+ trie.insert(\"/*/{test}/_endpoint\", \"test5\");\n+\n Map<String, String> params = newHashMap();\n assertThat(trie.retrieve(\"/x/*\", params), equalTo(\"test1\"));\n assertThat(params.get(\"test\"), equalTo(\"*\"));\n+\n+ params = newHashMap();\n+ assertThat(trie.retrieve(\"/b/a\", params), equalTo(\"test2\"));\n+ assertThat(params.get(\"test\"), equalTo(\"b\"));\n+\n+ params = newHashMap();\n+ assertThat(trie.retrieve(\"/*\", params), equalTo(\"test3\"));\n+ assertThat(params.get(\"test\"), equalTo(\"*\"));\n+\n+ params = newHashMap();\n+ assertThat(trie.retrieve(\"/*/_endpoint\", params), equalTo(\"test4\"));\n+ assertThat(params.get(\"test\"), equalTo(\"*\"));\n+\n+ params = newHashMap();\n+ assertThat(trie.retrieve(\"a/*/_endpoint\", params), equalTo(\"test5\"));\n+ assertThat(params.get(\"test\"), equalTo(\"*\"));\n }\n }", "filename": "src/test/java/org/elasticsearch/common/path/PathTrieTests.java", "status": "modified" } ] }
{ "body": "im getting error below after killing one node in the cluster \n(exception is thrown on remaining nodes)\n\n```\norg.elasticsearch.common.util.concurrent.UncategorizedExecutionException: Failed execution\n at org.elasticsearch.action.support.AdapterActionFuture.rethrowExecutionException(AdapterActionFuture.java:90)\n at org.elasticsearch.action.support.AdapterActionFuture.actionGet(AdapterActionFuture.java:49)\n at org.elasticsearch.action.ActionRequestBuilder.get(ActionRequestBuilder.java:67)\n ...\nCaused by: java.util.concurrent.ExecutionException: java.lang.NullPointerException\n at org.elasticsearch.common.util.concurrent.BaseFuture$Sync.getValue(BaseFuture.java:288)\n at org.elasticsearch.common.util.concurrent.BaseFuture$Sync.get(BaseFuture.java:275)\n at org.elasticsearch.common.util.concurrent.BaseFuture.get(BaseFuture.java:113)\n at org.elasticsearch.action.support.AdapterActionFuture.actionGet(AdapterActionFuture.java:45)\n ... 15 more\nCaused by: java.lang.NullPointerException\n at org.elasticsearch.cluster.routing.IndexShardRoutingTable.getActiveAttribute(IndexShardRoutingTable.java:441)\n at org.elasticsearch.cluster.routing.IndexShardRoutingTable.preferAttributesActiveInitializingShardsIt(IndexShardRoutingTable.java:488)\n at org.elasticsearch.cluster.routing.IndexShardRoutingTable.preferAttributesActiveInitializingShardsIt(IndexShardRoutingTable.java:483)\n at org.elasticsearch.cluster.routing.operation.plain.PlainOperationRouting.preferenceActiveShardIterator(PlainOperationRouting.java:169)\n at org.elasticsearch.cluster.routing.operation.plain.PlainOperationRouting.getShards(PlainOperationRouting.java:80)\n at org.elasticsearch.action.get.TransportGetAction.shards(TransportGetAction.java:80)\n at org.elasticsearch.action.get.TransportGetAction.shards(TransportGetAction.java:42)\n at org.elasticsearch.action.support.single.shard.TransportShardSingleOperationAction$AsyncSingleAction.<init>(TransportShardSingleOperationAction.java:121)\n at org.elasticsearch.action.support.single.shard.TransportShardSingleOperationAction$AsyncSingleAction.<init>(TransportShardSingleOperationAction.java:97)\n at org.elasticsearch.action.support.single.shard.TransportShardSingleOperationAction.doExecute(TransportShardSingleOperationAction.java:74)\n at org.elasticsearch.action.support.single.shard.TransportShardSingleOperationAction.doExecute(TransportShardSingleOperationAction.java:49)\n at org.elasticsearch.action.support.TransportAction.execute(TransportAction.java:63)\n at org.elasticsearch.client.node.NodeClient.execute(NodeClient.java:92)\n at org.elasticsearch.client.support.AbstractClient.get(AbstractClient.java:179)\n at org.elasticsearch.action.get.GetRequestBuilder.doExecute(GetRequestBuilder.java:112)\n at org.elasticsearch.action.ActionRequestBuilder.execute(ActionRequestBuilder.java:85)\n at org.elasticsearch.action.ActionRequestBuilder.execute(ActionRequestBuilder.java:59)\n\n```\n\ncontext:\n- version: 0.90.9\n- 3 node cluster\n- 2 replicas\n- 10 shards per index\n", "comments": [ { "body": "thanks for reporting this! I will push a fix soonish\n", "created_at": "2014-01-03T13:00:29Z" } ], "number": 4589, "title": "NullPointerException in IndexShardRoutingTable.getActiveAttribute" }
{ "body": "The node we need to lookup for attribute colelction might not be part\nof the `DiscoveryNodes` anymore due to node failure or shutdown. This\ncommit adds a check and removes the shard from the iteration.\n\nCloses #4589\n", "number": 4598, "review_comments": [], "title": "Check if node is still present when collecting attribute shard routings" }
{ "commits": [ { "message": "Check if node is still present when collecting attribute shard routings\n\nThe node we need to lookup for attribute colelction might not be part\nof the `DiscoveryNodes` anymore due to node failure or shutdown. This\ncommit adds a check and removes the shard from the iteration.\n\nCloses #4589" } ], "files": [ { "diff": "@@ -23,6 +23,7 @@\n import com.google.common.collect.ImmutableMap;\n import com.google.common.collect.UnmodifiableIterator;\n import jsr166y.ThreadLocalRandom;\n+import org.elasticsearch.cluster.node.DiscoveryNode;\n import org.elasticsearch.cluster.node.DiscoveryNodes;\n import org.elasticsearch.common.collect.MapBuilder;\n import org.elasticsearch.common.io.stream.StreamInput;\n@@ -430,22 +431,9 @@ private AttributesRoutings getActiveAttribute(AttributesKey key, DiscoveryNodes\n if (shardRoutings == null) {\n synchronized (shardsByAttributeMutex) {\n ArrayList<ShardRouting> from = new ArrayList<ShardRouting>(activeShards);\n- ArrayList<ShardRouting> to = new ArrayList<ShardRouting>();\n- for (String attribute : key.attributes) {\n- String localAttributeValue = nodes.localNode().attributes().get(attribute);\n- if (localAttributeValue == null) {\n- continue;\n- }\n- for (Iterator<ShardRouting> iterator = from.iterator(); iterator.hasNext(); ) {\n- ShardRouting fromShard = iterator.next();\n- if (localAttributeValue.equals(nodes.get(fromShard.currentNodeId()).attributes().get(attribute))) {\n- iterator.remove();\n- to.add(fromShard);\n- }\n- }\n- }\n+ ImmutableList<ShardRouting> to = collectAttributeShards(key, nodes, from);\n \n- shardRoutings = new AttributesRoutings(ImmutableList.copyOf(to), ImmutableList.copyOf(from));\n+ shardRoutings = new AttributesRoutings(to, ImmutableList.copyOf(from));\n activeShardsByAttributes = MapBuilder.newMapBuilder(activeShardsByAttributes).put(key, shardRoutings).immutableMap();\n }\n }\n@@ -457,28 +445,34 @@ private AttributesRoutings getInitializingAttribute(AttributesKey key, Discovery\n if (shardRoutings == null) {\n synchronized (shardsByAttributeMutex) {\n ArrayList<ShardRouting> from = new ArrayList<ShardRouting>(allInitializingShards);\n- ArrayList<ShardRouting> to = new ArrayList<ShardRouting>();\n- for (String attribute : key.attributes) {\n- String localAttributeValue = nodes.localNode().attributes().get(attribute);\n- if (localAttributeValue == null) {\n- continue;\n- }\n- for (Iterator<ShardRouting> iterator = from.iterator(); iterator.hasNext(); ) {\n- ShardRouting fromShard = iterator.next();\n- if (localAttributeValue.equals(nodes.get(fromShard.currentNodeId()).attributes().get(attribute))) {\n- iterator.remove();\n- to.add(fromShard);\n- }\n- }\n- }\n-\n- shardRoutings = new AttributesRoutings(ImmutableList.copyOf(to), ImmutableList.copyOf(from));\n+ ImmutableList<ShardRouting> to = collectAttributeShards(key, nodes, from);\n+ shardRoutings = new AttributesRoutings(to, ImmutableList.copyOf(from));\n initializingShardsByAttributes = MapBuilder.newMapBuilder(initializingShardsByAttributes).put(key, shardRoutings).immutableMap();\n }\n }\n return shardRoutings;\n }\n \n+ private static ImmutableList<ShardRouting> collectAttributeShards(AttributesKey key, DiscoveryNodes nodes, ArrayList<ShardRouting> from) {\n+ final ArrayList<ShardRouting> to = new ArrayList<ShardRouting>();\n+ for (final String attribute : key.attributes) {\n+ final String localAttributeValue = nodes.localNode().attributes().get(attribute);\n+ if (localAttributeValue != null) {\n+ for (Iterator<ShardRouting> iterator = from.iterator(); iterator.hasNext(); ) {\n+ ShardRouting fromShard = iterator.next();\n+ final DiscoveryNode discoveryNode = nodes.get(fromShard.currentNodeId());\n+ if (discoveryNode == null) {\n+ iterator.remove(); // node is not present anymore - ignore shard\n+ } else if (localAttributeValue.equals(discoveryNode.attributes().get(attribute))) {\n+ iterator.remove();\n+ to.add(fromShard);\n+ }\n+ }\n+ }\n+ }\n+ return ImmutableList.copyOf(to);\n+ }\n+\n public ShardIterator preferAttributesActiveInitializingShardsIt(String[] attributes, DiscoveryNodes nodes) {\n return preferAttributesActiveInitializingShardsIt(attributes, nodes, pickIndex());\n }", "filename": "src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java", "status": "modified" } ] }
{ "body": "I've been having some issues with the following sequence of queries:\n\n```\ncurl -XDELETE 'http://localhost:9201/test__garments'\ncurl -XPOST 'http://localhost:9201/test__garments/garment/1' -d '{\"id\":1, \"name\":\"Some Garment\"}'\ncurl -XPOST 'http://localhost:9201/test__garments/_refresh'\ncurl -XPUT 'http://localhost:9201/test__garments/verdict/_mapping' -d '{\"verdict\":{\"_parent\":{\"type\":\"garment\"},\"properties\":{\"id\":{\"type\":\"integer\"}}}}'\ncurl -XPOST 'http://localhost:9201/test__garments/verdict/1?parent=1' -d '{\"id\":1}'\n\ncurl -XPOST 'http://localhost:9201/test__garments/_refresh'\ncurl -XPOST 'http://localhost:9201/test__garments/verdict/_search' -d '\n{\n \"query\": {\n \"filtered\": {\n \"query\": {\n \"match_all\": {}\n },\n \"filter\": {\n \"has_parent\": {\n \"type\": \"garment\",\n \"query\": {\n \"match_all\": {}\n }\n }\n }\n }\n }\n}\n '\n```\n\nIt should produce 1 result (I'm indexing a document (garment), adding a child document (verdict) and then querying for child documents that have a parent), but it produces no results (and continues to do so no matter how long I wait). If I remove the first refresh it seems ok or if I do\n\n```\ncurl -XPOST 'http://localhost:9200/_cache/clear?id_cache=true'\n```\n\nbefore the search then I will also get a result (as discussed at https://groups.google.com/forum/#!topic/elasticsearch/oD8EKEYeZuM). I noticed this behavior when moving from 0.20.6 to 0.90.9\n\nIdeally I would not have to expire the cache by hand - or the circumstances in which one needs to do this should be documented\n", "comments": [ { "body": "@fcheung I can confirm this bug. The reason why this occurs b/c in `0.90.9` ES only loads the ids of parent documents, whereas in `0.20.6` the ids of all documents are loaded into memory. The way ES decides if a doc is a parent doc, is based on what has been defined in the mapping. At the time the first refresh occurs there is no `_parent` mapping defined and no ids are loaded into memory, but when the second refresh occurs there is a parent mapping. However the first document doesn't get included in the id cache, because the segment that document is in has already been processed. ES needs to invalidate that particular cache (on the segment level) if a new parent type has been added.\n", "created_at": "2013-12-30T21:09:14Z" }, { "body": "Thanks, that's very helpful information - enough for me to upgrade to 0.90 with confidence.\n", "created_at": "2013-12-31T13:04:08Z" }, { "body": "@fcheung I pushed a fix for this in master and the fix will be part of the next 1.0-RC1 release. I didn't backport this change since the fix relies on some infrastructure in the mapping that isn't available in the 0.90 branch.\n", "created_at": "2014-01-03T15:40:30Z" }, { "body": "Awesome - thanks for the quick turnaround!\n", "created_at": "2014-01-03T15:58:14Z" }, { "body": "Fixed via https://github.com/elasticsearch/elasticsearch/commit/38f038f899f2b321a4d691343514b0392411bbbc\n", "created_at": "2014-01-06T09:48:54Z" } ], "number": 4568, "title": "Odd interaction between refresh and parent/child queries " }
{ "body": "Already loaded SimpleIdReaderCache should be reloaded when a new `_parent` has been introduced.\n\nRelates #4568\n", "number": 4595, "review_comments": [], "title": "Refresh the id_cache if a new child type with _parent field has been introduced" }
{ "commits": [ { "message": "Already loaded SimpleIdReaderCache should be reloaded when a new `_parent` has been introduced.\n\nRelates #4568" } ], "files": [ { "diff": "@@ -34,6 +34,8 @@\n import org.elasticsearch.index.Index;\n import org.elasticsearch.index.cache.id.IdCache;\n import org.elasticsearch.index.cache.id.IdReaderCache;\n+import org.elasticsearch.index.mapper.DocumentMapper;\n+import org.elasticsearch.index.mapper.DocumentTypeListener;\n import org.elasticsearch.index.mapper.Uid;\n import org.elasticsearch.index.mapper.internal.ParentFieldMapper;\n import org.elasticsearch.index.mapper.internal.UidFieldMapper;\n@@ -45,31 +47,36 @@\n import java.io.IOException;\n import java.util.*;\n import java.util.concurrent.ConcurrentMap;\n+import java.util.concurrent.atomic.AtomicReference;\n \n /**\n *\n */\n-public class SimpleIdCache extends AbstractIndexComponent implements IdCache, SegmentReader.CoreClosedListener {\n+public class SimpleIdCache extends AbstractIndexComponent implements IdCache, SegmentReader.CoreClosedListener, DocumentTypeListener {\n \n- private final ConcurrentMap<Object, SimpleIdReaderCache> idReaders;\n private final boolean reuse;\n+ private final ConcurrentMap<Object, SimpleIdReaderCache> idReaders;\n+ private final AtomicReference<NavigableSet<HashedBytesArray>> parentTypesHolder;\n \n IndexService indexService;\n \n @Inject\n public SimpleIdCache(Index index, @IndexSettings Settings indexSettings) {\n super(index, indexSettings);\n+ reuse = componentSettings.getAsBoolean(\"reuse\", false);\n idReaders = ConcurrentCollections.newConcurrentMap();\n- this.reuse = componentSettings.getAsBoolean(\"reuse\", false);\n+ parentTypesHolder = new AtomicReference<NavigableSet<HashedBytesArray>>(new TreeSet<HashedBytesArray>(UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder));\n }\n \n @Override\n public void setIndexService(IndexService indexService) {\n this.indexService = indexService;\n+ indexService.mapperService().addTypeListener(this);\n }\n \n @Override\n public void close() throws ElasticSearchException {\n+ indexService.mapperService().removeTypeListener(this);\n clear();\n }\n \n@@ -117,21 +124,12 @@ public void refresh(List<AtomicReaderContext> atomicReaderContexts) throws IOExc\n // do the refresh\n Map<Object, Map<String, TypeBuilder>> builders = new HashMap<Object, Map<String, TypeBuilder>>();\n Map<Object, IndexReader> cacheToReader = new HashMap<Object, IndexReader>();\n-\n- // We don't want to load uid of child documents, this allows us to not load uids of child types.\n- NavigableSet<HashedBytesArray> parentTypes = new TreeSet<HashedBytesArray>(UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder);\n- BytesRef spare = new BytesRef();\n- for (String type : indexService.mapperService().types()) {\n- ParentFieldMapper parentFieldMapper = indexService.mapperService().documentMapper(type).parentFieldMapper();\n- if (parentFieldMapper.active()) {\n- parentTypes.add(new HashedBytesArray(Strings.toUTF8Bytes(parentFieldMapper.type(), spare)));\n- }\n- }\n+ NavigableSet<HashedBytesArray> parentTypes = this.parentTypesHolder.get();\n \n // first, go over and load all the id->doc map for all types\n for (AtomicReaderContext context : atomicReaderContexts) {\n AtomicReader reader = context.reader();\n- if (idReaders.containsKey(reader.getCoreCacheKey())) {\n+ if (!refreshNeeded(context)) {\n // no need, continue\n continue;\n }\n@@ -150,6 +148,7 @@ public void refresh(List<AtomicReaderContext> atomicReaderContexts) throws IOExc\n DocsEnum docsEnum = null;\n uid: for (BytesRef term = termsEnum.next(); term != null; term = termsEnum.next()) {\n HashedBytesArray[] typeAndId = Uid.splitUidIntoTypeAndId(term);\n+ // We don't want to load uid of child documents, this allows us to not load uids of child types.\n if (!parentTypes.contains(typeAndId[0])) {\n do {\n HashedBytesArray nextParent = parentTypes.ceiling(typeAndId[0]);\n@@ -189,10 +188,9 @@ public void refresh(List<AtomicReaderContext> atomicReaderContexts) throws IOExc\n }\n \n // now, go and load the docId->parentId map\n-\n for (AtomicReaderContext context : atomicReaderContexts) {\n AtomicReader reader = context.reader();\n- if (idReaders.containsKey(reader.getCoreCacheKey())) {\n+ if (!refreshNeeded(context)) {\n // no need, continue\n continue;\n }\n@@ -245,7 +243,10 @@ public void refresh(List<AtomicReaderContext> atomicReaderContexts) throws IOExc\n }\n IndexReader indexReader = cacheToReader.get(readerKey);\n SimpleIdReaderCache readerCache = new SimpleIdReaderCache(types.immutableMap(), ShardUtils.extractShardId(indexReader));\n- idReaders.put(readerKey, readerCache);\n+ SimpleIdReaderCache oldReaderCache = idReaders.put(readerKey, readerCache);\n+ if (oldReaderCache != null) {\n+ onRemoval(oldReaderCache);\n+ }\n onCached(readerCache);\n }\n }\n@@ -295,13 +296,45 @@ private HashedBytesArray checkIfCanReuse(Map<Object, Map<String, TypeBuilder>> b\n \n private boolean refreshNeeded(List<AtomicReaderContext> atomicReaderContexts) {\n for (AtomicReaderContext atomicReaderContext : atomicReaderContexts) {\n- if (!idReaders.containsKey(atomicReaderContext.reader().getCoreCacheKey())) {\n+ if (refreshNeeded(atomicReaderContext)) {\n return true;\n }\n }\n return false;\n }\n \n+ private boolean refreshNeeded(AtomicReaderContext atomicReaderContext) {\n+ SimpleIdReaderCache key = idReaders.get(atomicReaderContext.reader().getCoreCacheKey());\n+ // if key.reload == true, then we trash the SimpleIdReaderCache, while we could reload it in a smart manner.\n+ return key == null || key.reload();\n+ }\n+\n+ @Override\n+ public void beforeCreate(DocumentMapper mapper) {\n+ NavigableSet<HashedBytesArray> parentTypes = parentTypesHolder.get();\n+ ParentFieldMapper parentFieldMapper = mapper.parentFieldMapper();\n+ if (parentFieldMapper.active()) {\n+ // A _parent field can never be added to an existing mapping, so a _parent field either exists on\n+ // a new created or doesn't exists. This is why we can update the known parent types via DocumentTypeListener\n+ if (parentTypes.add(new HashedBytesArray(Strings.toUTF8Bytes(parentFieldMapper.type(), new BytesRef())))) {\n+ parentTypesHolder.set(parentTypes);\n+ for (SimpleIdReaderCache readerCache : idReaders.values()) {\n+ readerCache.reload(true);\n+ }\n+ }\n+ }\n+ }\n+\n+ @Override\n+ public void afterRemove(DocumentMapper mapper) {\n+ NavigableSet<HashedBytesArray> parentTypes = parentTypesHolder.get();\n+ ParentFieldMapper parentFieldMapper = mapper.parentFieldMapper();\n+ if (parentFieldMapper.active()) {\n+ parentTypes.remove(new HashedBytesArray(Strings.toUTF8Bytes(parentFieldMapper.type(), new BytesRef())));\n+ parentTypesHolder.set(parentTypes);\n+ }\n+ }\n+\n static class TypeBuilder {\n final ObjectIntOpenHashMap<HashedBytesArray> idToDoc = new ObjectIntOpenHashMap<HashedBytesArray>();\n final HashedBytesArray[] docToId;", "filename": "src/main/java/org/elasticsearch/index/cache/id/simple/SimpleIdCache.java", "status": "modified" }, { "diff": "@@ -32,6 +32,7 @@\n public class SimpleIdReaderCache implements IdReaderCache {\n \n private final ImmutableMap<String, SimpleIdReaderTypeCache> types;\n+ private volatile boolean reload;\n \n @Nullable\n public final ShardId shardId;\n@@ -64,6 +65,14 @@ public int docById(String type, HashedBytesArray id) {\n return -1;\n }\n \n+ public boolean reload() {\n+ return reload;\n+ }\n+\n+ public void reload(boolean reload) {\n+ this.reload = reload;\n+ }\n+\n public long sizeInBytes() {\n long sizeInBytes = 0;\n for (SimpleIdReaderTypeCache readerTypeCache : types.values()) {", "filename": "src/main/java/org/elasticsearch/index/cache/id/simple/SimpleIdReaderCache.java", "status": "modified" }, { "diff": "@@ -26,15 +26,15 @@ public interface DocumentTypeListener {\n /**\n * Invoked just before a new document type has been created.\n *\n- * @param type The new document type\n+ * @param mapper The new document mapper of the type being added\n */\n- void beforeCreate(String type);\n+ void beforeCreate(DocumentMapper mapper);\n \n /**\n * Invoked just after an existing document type has been removed.\n *\n- * @param type The existing document type\n+ * @param mapper The existing document mapper of the type being removed\n */\n- void afterRemove(String type);\n+ void afterRemove(DocumentMapper mapper);\n \n }", "filename": "src/main/java/org/elasticsearch/index/mapper/DocumentTypeListener.java", "status": "modified" }, { "diff": "@@ -295,7 +295,7 @@ private DocumentMapper merge(DocumentMapper mapper) {\n mapper.addObjectMapperListener(objectMapperListener, false);\n \n for (DocumentTypeListener typeListener : typeListeners) {\n- typeListener.beforeCreate(mapper.type());\n+ typeListener.beforeCreate(mapper);\n }\n mappers = newMapBuilder(mappers).put(mapper.type(), mapper).map();\n return mapper;\n@@ -339,7 +339,7 @@ public void remove(String type) {\n mappers = newMapBuilder(mappers).remove(type).map();\n removeObjectAndFieldMappers(docMapper);\n for (DocumentTypeListener typeListener : typeListeners) {\n- typeListener.afterRemove(type);\n+ typeListener.afterRemove(docMapper);\n }\n }\n }", "filename": "src/main/java/org/elasticsearch/index/mapper/MapperService.java", "status": "modified" }, { "diff": "@@ -20,6 +20,7 @@\n import org.elasticsearch.index.fielddata.IndexFieldDataService;\n import org.elasticsearch.index.indexing.IndexingOperationListener;\n import org.elasticsearch.index.indexing.ShardIndexingService;\n+import org.elasticsearch.index.mapper.DocumentMapper;\n import org.elasticsearch.index.mapper.DocumentTypeListener;\n import org.elasticsearch.index.mapper.MapperService;\n import org.elasticsearch.index.mapper.internal.TypeFieldMapper;\n@@ -191,15 +192,15 @@ private Query parseQuery(String type, BytesReference querySource, XContentParser\n private class PercolateTypeListener implements DocumentTypeListener {\n \n @Override\n- public void beforeCreate(String type) {\n- if (PercolatorService.TYPE_NAME.equals(type)) {\n+ public void beforeCreate(DocumentMapper mapper) {\n+ if (PercolatorService.TYPE_NAME.equals(mapper.type())) {\n enableRealTimePercolator();\n }\n }\n \n @Override\n- public void afterRemove(String type) {\n- if (PercolatorService.TYPE_NAME.equals(type)) {\n+ public void afterRemove(DocumentMapper mapper) {\n+ if (PercolatorService.TYPE_NAME.equals(mapper.type())) {\n disableRealTimePercolator();\n clear();\n }", "filename": "src/main/java/org/elasticsearch/index/percolator/PercolatorQueriesRegistry.java", "status": "modified" }, { "diff": "@@ -262,6 +262,7 @@ private SimpleIdCache createSimpleIdCache(Tuple<String, String>... documentTypes\n Index index = new Index(\"test\");\n SimpleIdCache idCache = new SimpleIdCache(index, settings);\n MapperService mapperService = MapperTestUtils.newMapperService();\n+ idCache.setIndexService(new StubIndexService(mapperService));\n \n for (Tuple<String, String> documentType : documentTypes) {\n String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(documentType.v1())\n@@ -270,7 +271,6 @@ private SimpleIdCache createSimpleIdCache(Tuple<String, String>... documentTypes\n mapperService.merge(documentType.v1(), new CompressedString(defaultMapping), true);\n }\n \n- idCache.setIndexService(new StubIndexService(mapperService));\n return idCache;\n }\n ", "filename": "src/test/java/org/elasticsearch/index/cache/id/SimpleIdCacheTests.java", "status": "modified" }, { "diff": "@@ -328,11 +328,12 @@ static SearchContext createSearchContext(String indexName, String parentType, St\n final CacheRecycler cacheRecycler = new CacheRecycler(ImmutableSettings.EMPTY);\n Settings settings = ImmutableSettings.EMPTY;\n MapperService mapperService = MapperTestUtils.newMapperService(index, settings);\n+ final IndexService indexService = new SimpleIdCacheTests.StubIndexService(mapperService);\n+ idCache.setIndexService(indexService);\n+ // Id_cache is now registered as document type listener, so we can add mappings.\n mapperService.merge(\n childType, new CompressedString(PutMappingRequest.buildFromSimplifiedDef(childType, \"_parent\", \"type=\" + parentType).string()), true\n );\n- final IndexService indexService = new SimpleIdCacheTests.StubIndexService(mapperService);\n- idCache.setIndexService(indexService);\n \n ThreadPool threadPool = new ThreadPool();\n NodeSettingsService nodeSettingsService = new NodeSettingsService(settings);", "filename": "src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java", "status": "modified" }, { "diff": "@@ -27,6 +27,7 @@\n import org.elasticsearch.action.count.CountResponse;\n import org.elasticsearch.action.explain.ExplainResponse;\n import org.elasticsearch.action.index.IndexRequestBuilder;\n+import org.elasticsearch.action.search.SearchPhaseExecutionException;\n import org.elasticsearch.action.search.SearchResponse;\n import org.elasticsearch.action.search.SearchType;\n import org.elasticsearch.action.search.ShardSearchFailure;\n@@ -36,6 +37,7 @@\n import org.elasticsearch.index.mapper.MergeMappingException;\n import org.elasticsearch.index.query.*;\n import org.elasticsearch.index.search.child.ScoreType;\n+import org.elasticsearch.rest.RestStatus;\n import org.elasticsearch.search.facet.terms.TermsFacet;\n import org.elasticsearch.search.sort.SortBuilders;\n import org.elasticsearch.search.sort.SortOrder;\n@@ -54,8 +56,7 @@\n import static org.elasticsearch.index.query.QueryBuilders.*;\n import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;\n import static org.elasticsearch.search.facet.FacetBuilders.termsFacet;\n-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;\n-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;\n+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;\n import static org.hamcrest.Matchers.*;\n \n /**\n@@ -2052,6 +2053,124 @@ public void testNamedFilters() throws Exception {\n assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo(\"test\"));\n }\n \n+ @Test\n+ public void testParentChildQueriesNoParentType() throws Exception {\n+ client().admin().indices().prepareCreate(\"test\")\n+ .setSettings(ImmutableSettings.settingsBuilder()\n+ .put(\"index.number_of_shards\", 1)\n+ .put(\"index.refresh_interval\", -1)\n+ .put(\"index.number_of_replicas\", 0))\n+ .execute().actionGet();\n+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();\n+\n+ String parentId = \"p1\";\n+ client().prepareIndex(\"test\", \"parent\", parentId).setSource(\"p_field\", \"1\").execute().actionGet();\n+ client().admin().indices().prepareRefresh().get();\n+\n+ try {\n+ client().prepareSearch(\"test\")\n+ .setQuery(hasChildQuery(\"child\", termQuery(\"c_field\", \"1\")))\n+ .execute().actionGet();\n+ fail();\n+ } catch (SearchPhaseExecutionException e) {\n+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));\n+ }\n+\n+ try {\n+ client().prepareSearch(\"test\")\n+ .setQuery(hasChildQuery(\"child\", termQuery(\"c_field\", \"1\")).scoreType(\"max\"))\n+ .execute().actionGet();\n+ fail();\n+ } catch (SearchPhaseExecutionException e) {\n+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));\n+ }\n+\n+ try {\n+ client().prepareSearch(\"test\")\n+ .setPostFilter(hasChildFilter(\"child\", termQuery(\"c_field\", \"1\")))\n+ .execute().actionGet();\n+ fail();\n+ } catch (SearchPhaseExecutionException e) {\n+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));\n+ }\n+\n+ try {\n+ client().prepareSearch(\"test\")\n+ .setQuery(topChildrenQuery(\"child\", termQuery(\"c_field\", \"1\")).score(\"max\"))\n+ .execute().actionGet();\n+ fail();\n+ } catch (SearchPhaseExecutionException e) {\n+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));\n+ }\n+\n+ // can't fail, because there is no check, this b/c parent type can be refered by many child types.\n+ client().prepareSearch(\"test\")\n+ .setQuery(hasParentQuery(\"parent\", termQuery(\"p_field\", \"1\")).scoreType(\"score\"))\n+ .execute().actionGet();\n+ client().prepareSearch(\"test\")\n+ .setPostFilter(hasParentFilter(\"parent\", termQuery(\"p_field\", \"1\")))\n+ .execute().actionGet();\n+ }\n+\n+ @Test\n+ public void testAdd_ParentFieldAfterIndexingParentDocButBeforeIndexingChildDoc() throws Exception {\n+ client().admin().indices().prepareCreate(\"test\")\n+ .setSettings(ImmutableSettings.settingsBuilder()\n+ .put(\"index.number_of_shards\", 1)\n+ .put(\"index.refresh_interval\", -1)\n+ .put(\"index.number_of_replicas\", 0))\n+ .execute().actionGet();\n+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();\n+\n+ String parentId = \"p1\";\n+ client().prepareIndex(\"test\", \"parent\", parentId).setSource(\"p_field\", \"1\").execute().actionGet();\n+ client().admin().indices().prepareRefresh().get();\n+ assertAcked(client().admin()\n+ .indices()\n+ .preparePutMapping(\"test\")\n+ .setType(\"child\")\n+ .setSource(\"_parent\", \"type=parent\"));\n+ client().prepareIndex(\"test\", \"child\", \"c1\").setSource(\"c_field\", \"1\").setParent(parentId).execute().actionGet();\n+ client().admin().indices().prepareRefresh().get();\n+\n+ SearchResponse searchResponse = client().prepareSearch(\"test\")\n+ .setQuery(hasChildQuery(\"child\", termQuery(\"c_field\", \"1\")))\n+ .execute().actionGet();\n+ assertHitCount(searchResponse, 1l);\n+ assertSearchHits(searchResponse, parentId);\n+\n+ searchResponse = client().prepareSearch(\"test\")\n+ .setQuery(hasChildQuery(\"child\", termQuery(\"c_field\", \"1\")).scoreType(\"max\"))\n+ .execute().actionGet();\n+ assertHitCount(searchResponse, 1l);\n+ assertSearchHits(searchResponse, parentId);\n+\n+\n+ searchResponse = client().prepareSearch(\"test\")\n+ .setPostFilter(hasChildFilter(\"child\", termQuery(\"c_field\", \"1\")))\n+ .execute().actionGet();\n+ assertHitCount(searchResponse, 1l);\n+ assertSearchHits(searchResponse, parentId);\n+\n+ searchResponse = client().prepareSearch(\"test\")\n+ .setQuery(topChildrenQuery(\"child\", termQuery(\"c_field\", \"1\")).score(\"max\"))\n+ .execute().actionGet();\n+ assertHitCount(searchResponse, 1l);\n+ assertSearchHits(searchResponse, parentId);\n+\n+ searchResponse = client().prepareSearch(\"test\")\n+ .setPostFilter(hasParentFilter(\"parent\", termQuery(\"p_field\", \"1\")))\n+ .execute().actionGet();\n+ assertHitCount(searchResponse, 1l);\n+ assertSearchHits(searchResponse, \"c1\");\n+\n+ searchResponse = client().prepareSearch(\"test\")\n+ .setQuery(hasParentQuery(\"parent\", termQuery(\"p_field\", \"1\")).scoreType(\"score\"))\n+ .execute().actionGet();\n+ assertHitCount(searchResponse, 1l);\n+ assertSearchHits(searchResponse, \"c1\");\n+ }\n+\n private static HasChildFilterBuilder hasChildFilter(String type, QueryBuilder queryBuilder) {\n HasChildFilterBuilder hasChildFilterBuilder = FilterBuilders.hasChildFilter(type, queryBuilder);\n hasChildFilterBuilder.setShortCircuitCutoff(randomInt(10));", "filename": "src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java", "status": "modified" } ] }
{ "body": " today if a specific feature is disabled for term vectors with something\n like `\"store_term_vector_positions\" \" false` term vectors might be disabled\n altogether even if `\"tore_term_vectors\" : true` in the mapping. This depends on the\n order of the values in the mapping since the more specific one might override\n the less specific on.\n", "comments": [], "number": 4582, "title": "Term Vector settings should be treated like flags without propergation" }
{ "body": "See #4579 #4581 #4582\n", "number": 4583, "review_comments": [], "title": "Fix toXContent in the All Mapper" }
{ "commits": [ { "message": "Term Vector settings should be treated like flags without propergation\n\ntoday if a specific feature is disabled for term vectors with something\nlike 'store_term_vector_positions = false' term vectors might be disabeled\nalltogether even if 'store_term_vectors=true' in the mapping. This depends on the\norder of the values in the mapping since the more specific one might override\nthe less specific on.\n\nCloses #4582" }, { "message": "Simulate the entire toXContent instead of special caseing\n\nToday we try to detect if we need to generate the mapping or not in\nthe all mapper. This is error prone since it misses conditions if not\nexplicitly added. We should rather similate the generation instead.\n\nThis commit also adds a random test to check if the settings\nof the all field mapper are correctly applied.\n\nCloses #4579\nCloses #4581" } ], "files": [ { "diff": "@@ -117,24 +117,32 @@ public T docValues(boolean docValues) {\n }\n \n public T storeTermVectors(boolean termVectors) {\n- this.fieldType.setStoreTermVectors(termVectors);\n+ if (termVectors) {\n+ this.fieldType.setStoreTermVectors(termVectors);\n+ } // don't set it to false, it is default and might be flipped by a more specific option\n return builder;\n }\n \n public T storeTermVectorOffsets(boolean termVectorOffsets) {\n- this.fieldType.setStoreTermVectors(termVectorOffsets);\n+ if (termVectorOffsets) {\n+ this.fieldType.setStoreTermVectors(termVectorOffsets);\n+ }\n this.fieldType.setStoreTermVectorOffsets(termVectorOffsets);\n return builder;\n }\n \n public T storeTermVectorPositions(boolean termVectorPositions) {\n- this.fieldType.setStoreTermVectors(termVectorPositions);\n+ if (termVectorPositions) {\n+ this.fieldType.setStoreTermVectors(termVectorPositions);\n+ }\n this.fieldType.setStoreTermVectorPositions(termVectorPositions);\n return builder;\n }\n \n public T storeTermVectorPayloads(boolean termVectorPayloads) {\n- this.fieldType.setStoreTermVectors(termVectorPayloads);\n+ if (termVectorPayloads) {\n+ this.fieldType.setStoreTermVectors(termVectorPayloads);\n+ }\n this.fieldType.setStoreTermVectorPayloads(termVectorPayloads);\n return builder;\n }", "filename": "src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java", "status": "modified" }, { "diff": "@@ -28,6 +28,7 @@\n import org.apache.lucene.search.TermQuery;\n import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.Strings;\n+import org.elasticsearch.common.io.stream.BytesStreamOutput;\n import org.elasticsearch.common.lucene.Lucene;\n import org.elasticsearch.common.lucene.all.AllField;\n import org.elasticsearch.common.lucene.all.AllTermQuery;\n@@ -256,16 +257,26 @@ protected String contentType() {\n \n @Override\n public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {\n- // if all are defaults, no need to write it at all\n boolean includeDefaults = params.paramAsBoolean(\"include_defaults\", false);\n-\n- if (!includeDefaults && enabled == Defaults.ENABLED && fieldType.stored() == Defaults.FIELD_TYPE.stored() &&\n- fieldType.storeTermVectors() == Defaults.FIELD_TYPE.storeTermVectors() &&\n- indexAnalyzer == null && searchAnalyzer == null && customFieldDataSettings == null\n- && fieldType.omitNorms() == Defaults.FIELD_TYPE.omitNorms()) {\n- return builder;\n+ if (!includeDefaults) {\n+ // simulate the generation to make sure we don't add unnecessary content if all is default\n+ // if all are defaults, no need to write it at all - generating is twice is ok though\n+ BytesStreamOutput bytesStreamOutput = new BytesStreamOutput(0);\n+ XContentBuilder b = new XContentBuilder(builder.contentType().xContent(), bytesStreamOutput);\n+ long pos = bytesStreamOutput.position();\n+ innerToXContent(b, false);\n+ b.flush();\n+ if (pos == bytesStreamOutput.position()) {\n+ return builder;\n+ }\n }\n builder.startObject(CONTENT_TYPE);\n+ innerToXContent(builder, includeDefaults);\n+ builder.endObject();\n+ return builder;\n+ }\n+\n+ private void innerToXContent(XContentBuilder builder, boolean includeDefaults) throws IOException {\n if (includeDefaults || enabled != Defaults.ENABLED) {\n builder.field(\"enabled\", enabled);\n }\n@@ -276,7 +287,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n builder.field(\"store\", fieldType.stored());\n }\n if (includeDefaults || fieldType.storeTermVectors() != Defaults.FIELD_TYPE.storeTermVectors()) {\n- builder.field(\"store_term_vector\", fieldType.storeTermVectors());\n+ builder.field(\"store_term_vectors\", fieldType.storeTermVectors());\n }\n if (includeDefaults || fieldType.storeTermVectorOffsets() != Defaults.FIELD_TYPE.storeTermVectorOffsets()) {\n builder.field(\"store_term_vector_offsets\", fieldType.storeTermVectorOffsets());\n@@ -332,11 +343,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n } else if (includeDefaults) {\n builder.field(\"fielddata\", (Map) fieldDataType.getSettings().getAsMap());\n }\n-\n- builder.endObject();\n- return builder;\n }\n \n+\n @Override\n public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {\n // do nothing here, no merging, but also no exception", "filename": "src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java", "status": "modified" }, { "diff": "@@ -20,12 +20,18 @@\n package org.elasticsearch.index.mapper.all;\n \n import org.apache.lucene.index.Term;\n+import org.apache.lucene.search.Query;\n import org.apache.lucene.search.TermQuery;\n import org.elasticsearch.common.bytes.BytesArray;\n+import org.elasticsearch.common.collect.Tuple;\n+import org.elasticsearch.common.io.stream.BytesStreamOutput;\n import org.elasticsearch.common.lucene.all.AllEntries;\n import org.elasticsearch.common.lucene.all.AllField;\n import org.elasticsearch.common.lucene.all.AllTermQuery;\n import org.elasticsearch.common.lucene.all.AllTokenStream;\n+import org.elasticsearch.common.xcontent.ToXContent;\n+import org.elasticsearch.common.xcontent.XContentBuilder;\n+import org.elasticsearch.common.xcontent.XContentType;\n import org.elasticsearch.index.mapper.DocumentMapper;\n import org.elasticsearch.index.mapper.FieldMapper;\n import org.elasticsearch.index.mapper.MapperTestUtils;\n@@ -34,9 +40,14 @@\n import org.hamcrest.Matchers;\n import org.junit.Test;\n \n+import java.util.ArrayList;\n+import java.util.Collections;\n+import java.util.List;\n+\n import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;\n import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;\n-import static org.hamcrest.Matchers.equalTo;\n+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n+import static org.hamcrest.Matchers.*;\n \n /**\n *\n@@ -152,4 +163,121 @@ public void testSimpleAllMappersWithReparseWithStore() throws Exception {\n assertThat(text, equalTo(allEntries.buildText()));\n assertThat(field.fieldType().omitNorms(), equalTo(false));\n }\n+\n+ @Test\n+ public void testRandom() throws Exception {\n+ boolean omitNorms = false;\n+ boolean stored = false;\n+ boolean enabled = true;\n+ boolean autoBoost = false;\n+ boolean tv_stored = false;\n+ boolean tv_payloads = false;\n+ boolean tv_offsets = false;\n+ boolean tv_positions = false;\n+ String similarity = null;\n+ boolean fieldData = false;\n+ XContentBuilder mappingBuilder = jsonBuilder();\n+ mappingBuilder.startObject().startObject(\"test\");\n+ List<Tuple<String, Boolean>> booleanOptionList = new ArrayList<Tuple<String, Boolean>>();\n+ boolean allDefault = true;\n+ if (frequently()) {\n+ allDefault = false;\n+ mappingBuilder.startObject(\"_all\");\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"omit_norms\", omitNorms = randomBoolean()));\n+ }\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"store\", stored = randomBoolean()));\n+ }\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"store_term_vectors\", tv_stored = randomBoolean()));\n+ }\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"enabled\", enabled = randomBoolean()));\n+ }\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"auto_boost\", autoBoost = randomBoolean()));\n+ }\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"store_term_vector_offsets\", tv_offsets = randomBoolean()));\n+ }\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"store_term_vector_positions\", tv_positions = randomBoolean()));\n+ }\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"store_term_vector_payloads\", tv_payloads = randomBoolean()));\n+ }\n+ Collections.shuffle(booleanOptionList, getRandom());\n+ for (Tuple<String, Boolean> option : booleanOptionList) {\n+ mappingBuilder.field(option.v1(), option.v2().booleanValue());\n+ }\n+ tv_stored |= tv_positions || tv_payloads || tv_offsets;\n+ if (randomBoolean()) {\n+ mappingBuilder.field(\"similarity\", similarity = randomBoolean() ? \"BM25\" : \"TF/IDF\");\n+ }\n+ if (randomBoolean()) {\n+ fieldData = true;\n+ mappingBuilder.startObject(\"fielddata\");\n+ mappingBuilder.field(\"foo\", \"bar\");\n+ mappingBuilder.endObject();\n+ }\n+ mappingBuilder.endObject();\n+ }\n+\n+ String mapping = mappingBuilder.endObject().endObject().bytes().toUtf8();\n+ logger.info(mapping);\n+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);\n+ String builtMapping = docMapper.mappingSource().string();\n+ // reparse it\n+ DocumentMapper builtDocMapper = MapperTestUtils.newParser().parse(builtMapping);\n+\n+ byte[] json = jsonBuilder().startObject().startObject(\"test\")\n+ .field(\"foo\", \"bar\")\n+ .field(\"_id\", 1)\n+ .field(\"foobar\", \"foobar\")\n+ .endObject().endObject().bytes().array();\n+ Document doc = builtDocMapper.parse(new BytesArray(json)).rootDoc();\n+ AllField field = (AllField) doc.getField(\"_all\");\n+ if (enabled) {\n+ assertThat(field.fieldType().omitNorms(), equalTo(omitNorms));\n+ assertThat(field.fieldType().stored(), equalTo(stored));\n+ assertThat(field.fieldType().storeTermVectorOffsets(), equalTo(tv_offsets));\n+ assertThat(field.fieldType().storeTermVectorPayloads(), equalTo(tv_payloads));\n+ assertThat(field.fieldType().storeTermVectorPositions(), equalTo(tv_positions));\n+ assertThat(field.fieldType().storeTermVectors(), equalTo(tv_stored));\n+ AllEntries allEntries = ((AllTokenStream) field.tokenStream(docMapper.mappers().indexAnalyzer())).allEntries();\n+ assertThat(allEntries.fields().size(), equalTo(2));\n+ assertThat(allEntries.fields().contains(\"foobar\"), equalTo(true));\n+ assertThat(allEntries.fields().contains(\"foo\"), equalTo(true));\n+ if (!stored) {\n+ assertThat(field.stringValue(), nullValue());\n+ }\n+ String text = stored ? field.stringValue() : \"bar foobar\";\n+ assertThat(text.trim(), equalTo(allEntries.buildText().trim()));\n+ } else {\n+ assertThat(field, nullValue());\n+ }\n+\n+ Term term = new Term(\"foo\", \"bar\");\n+ Query query = builtDocMapper.allFieldMapper().queryStringTermQuery(term);\n+ if (autoBoost) {\n+ assertThat(query, equalTo((Query)new AllTermQuery(term)));\n+ } else {\n+ assertThat(query, equalTo((Query)new TermQuery(term)));\n+ }\n+ if (similarity == null || similarity.equals(\"TF/IDF\")) {\n+ assertThat(builtDocMapper.allFieldMapper().similarity(), nullValue());\n+ } else {\n+ assertThat(similarity, equalTo(builtDocMapper.allFieldMapper().similarity().name()));\n+ }\n+ assertThat(builtMapping.contains(\"fielddata\"), is(fieldData));\n+ if (allDefault) {\n+ BytesStreamOutput bytesStreamOutput = new BytesStreamOutput(0);\n+ XContentBuilder b = new XContentBuilder(XContentType.JSON.xContent(), bytesStreamOutput);\n+ XContentBuilder xContentBuilder = builtDocMapper.allFieldMapper().toXContent(b, ToXContent.EMPTY_PARAMS);\n+ xContentBuilder.flush();\n+ assertThat(bytesStreamOutput.size(), equalTo(0));\n+ }\n+\n+ }\n }", "filename": "src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java", "status": "modified" } ] }
{ "body": "In the all field mapper the settings that is used is `store_term_vector` but it should be `store_term_vectors`.\n", "comments": [], "number": 4581, "title": "All field uses wrong setting for `term vectors`" }
{ "body": "See #4579 #4581 #4582\n", "number": 4583, "review_comments": [], "title": "Fix toXContent in the All Mapper" }
{ "commits": [ { "message": "Term Vector settings should be treated like flags without propergation\n\ntoday if a specific feature is disabled for term vectors with something\nlike 'store_term_vector_positions = false' term vectors might be disabeled\nalltogether even if 'store_term_vectors=true' in the mapping. This depends on the\norder of the values in the mapping since the more specific one might override\nthe less specific on.\n\nCloses #4582" }, { "message": "Simulate the entire toXContent instead of special caseing\n\nToday we try to detect if we need to generate the mapping or not in\nthe all mapper. This is error prone since it misses conditions if not\nexplicitly added. We should rather similate the generation instead.\n\nThis commit also adds a random test to check if the settings\nof the all field mapper are correctly applied.\n\nCloses #4579\nCloses #4581" } ], "files": [ { "diff": "@@ -117,24 +117,32 @@ public T docValues(boolean docValues) {\n }\n \n public T storeTermVectors(boolean termVectors) {\n- this.fieldType.setStoreTermVectors(termVectors);\n+ if (termVectors) {\n+ this.fieldType.setStoreTermVectors(termVectors);\n+ } // don't set it to false, it is default and might be flipped by a more specific option\n return builder;\n }\n \n public T storeTermVectorOffsets(boolean termVectorOffsets) {\n- this.fieldType.setStoreTermVectors(termVectorOffsets);\n+ if (termVectorOffsets) {\n+ this.fieldType.setStoreTermVectors(termVectorOffsets);\n+ }\n this.fieldType.setStoreTermVectorOffsets(termVectorOffsets);\n return builder;\n }\n \n public T storeTermVectorPositions(boolean termVectorPositions) {\n- this.fieldType.setStoreTermVectors(termVectorPositions);\n+ if (termVectorPositions) {\n+ this.fieldType.setStoreTermVectors(termVectorPositions);\n+ }\n this.fieldType.setStoreTermVectorPositions(termVectorPositions);\n return builder;\n }\n \n public T storeTermVectorPayloads(boolean termVectorPayloads) {\n- this.fieldType.setStoreTermVectors(termVectorPayloads);\n+ if (termVectorPayloads) {\n+ this.fieldType.setStoreTermVectors(termVectorPayloads);\n+ }\n this.fieldType.setStoreTermVectorPayloads(termVectorPayloads);\n return builder;\n }", "filename": "src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java", "status": "modified" }, { "diff": "@@ -28,6 +28,7 @@\n import org.apache.lucene.search.TermQuery;\n import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.Strings;\n+import org.elasticsearch.common.io.stream.BytesStreamOutput;\n import org.elasticsearch.common.lucene.Lucene;\n import org.elasticsearch.common.lucene.all.AllField;\n import org.elasticsearch.common.lucene.all.AllTermQuery;\n@@ -256,16 +257,26 @@ protected String contentType() {\n \n @Override\n public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {\n- // if all are defaults, no need to write it at all\n boolean includeDefaults = params.paramAsBoolean(\"include_defaults\", false);\n-\n- if (!includeDefaults && enabled == Defaults.ENABLED && fieldType.stored() == Defaults.FIELD_TYPE.stored() &&\n- fieldType.storeTermVectors() == Defaults.FIELD_TYPE.storeTermVectors() &&\n- indexAnalyzer == null && searchAnalyzer == null && customFieldDataSettings == null\n- && fieldType.omitNorms() == Defaults.FIELD_TYPE.omitNorms()) {\n- return builder;\n+ if (!includeDefaults) {\n+ // simulate the generation to make sure we don't add unnecessary content if all is default\n+ // if all are defaults, no need to write it at all - generating is twice is ok though\n+ BytesStreamOutput bytesStreamOutput = new BytesStreamOutput(0);\n+ XContentBuilder b = new XContentBuilder(builder.contentType().xContent(), bytesStreamOutput);\n+ long pos = bytesStreamOutput.position();\n+ innerToXContent(b, false);\n+ b.flush();\n+ if (pos == bytesStreamOutput.position()) {\n+ return builder;\n+ }\n }\n builder.startObject(CONTENT_TYPE);\n+ innerToXContent(builder, includeDefaults);\n+ builder.endObject();\n+ return builder;\n+ }\n+\n+ private void innerToXContent(XContentBuilder builder, boolean includeDefaults) throws IOException {\n if (includeDefaults || enabled != Defaults.ENABLED) {\n builder.field(\"enabled\", enabled);\n }\n@@ -276,7 +287,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n builder.field(\"store\", fieldType.stored());\n }\n if (includeDefaults || fieldType.storeTermVectors() != Defaults.FIELD_TYPE.storeTermVectors()) {\n- builder.field(\"store_term_vector\", fieldType.storeTermVectors());\n+ builder.field(\"store_term_vectors\", fieldType.storeTermVectors());\n }\n if (includeDefaults || fieldType.storeTermVectorOffsets() != Defaults.FIELD_TYPE.storeTermVectorOffsets()) {\n builder.field(\"store_term_vector_offsets\", fieldType.storeTermVectorOffsets());\n@@ -332,11 +343,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n } else if (includeDefaults) {\n builder.field(\"fielddata\", (Map) fieldDataType.getSettings().getAsMap());\n }\n-\n- builder.endObject();\n- return builder;\n }\n \n+\n @Override\n public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {\n // do nothing here, no merging, but also no exception", "filename": "src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java", "status": "modified" }, { "diff": "@@ -20,12 +20,18 @@\n package org.elasticsearch.index.mapper.all;\n \n import org.apache.lucene.index.Term;\n+import org.apache.lucene.search.Query;\n import org.apache.lucene.search.TermQuery;\n import org.elasticsearch.common.bytes.BytesArray;\n+import org.elasticsearch.common.collect.Tuple;\n+import org.elasticsearch.common.io.stream.BytesStreamOutput;\n import org.elasticsearch.common.lucene.all.AllEntries;\n import org.elasticsearch.common.lucene.all.AllField;\n import org.elasticsearch.common.lucene.all.AllTermQuery;\n import org.elasticsearch.common.lucene.all.AllTokenStream;\n+import org.elasticsearch.common.xcontent.ToXContent;\n+import org.elasticsearch.common.xcontent.XContentBuilder;\n+import org.elasticsearch.common.xcontent.XContentType;\n import org.elasticsearch.index.mapper.DocumentMapper;\n import org.elasticsearch.index.mapper.FieldMapper;\n import org.elasticsearch.index.mapper.MapperTestUtils;\n@@ -34,9 +40,14 @@\n import org.hamcrest.Matchers;\n import org.junit.Test;\n \n+import java.util.ArrayList;\n+import java.util.Collections;\n+import java.util.List;\n+\n import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;\n import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;\n-import static org.hamcrest.Matchers.equalTo;\n+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n+import static org.hamcrest.Matchers.*;\n \n /**\n *\n@@ -152,4 +163,121 @@ public void testSimpleAllMappersWithReparseWithStore() throws Exception {\n assertThat(text, equalTo(allEntries.buildText()));\n assertThat(field.fieldType().omitNorms(), equalTo(false));\n }\n+\n+ @Test\n+ public void testRandom() throws Exception {\n+ boolean omitNorms = false;\n+ boolean stored = false;\n+ boolean enabled = true;\n+ boolean autoBoost = false;\n+ boolean tv_stored = false;\n+ boolean tv_payloads = false;\n+ boolean tv_offsets = false;\n+ boolean tv_positions = false;\n+ String similarity = null;\n+ boolean fieldData = false;\n+ XContentBuilder mappingBuilder = jsonBuilder();\n+ mappingBuilder.startObject().startObject(\"test\");\n+ List<Tuple<String, Boolean>> booleanOptionList = new ArrayList<Tuple<String, Boolean>>();\n+ boolean allDefault = true;\n+ if (frequently()) {\n+ allDefault = false;\n+ mappingBuilder.startObject(\"_all\");\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"omit_norms\", omitNorms = randomBoolean()));\n+ }\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"store\", stored = randomBoolean()));\n+ }\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"store_term_vectors\", tv_stored = randomBoolean()));\n+ }\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"enabled\", enabled = randomBoolean()));\n+ }\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"auto_boost\", autoBoost = randomBoolean()));\n+ }\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"store_term_vector_offsets\", tv_offsets = randomBoolean()));\n+ }\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"store_term_vector_positions\", tv_positions = randomBoolean()));\n+ }\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"store_term_vector_payloads\", tv_payloads = randomBoolean()));\n+ }\n+ Collections.shuffle(booleanOptionList, getRandom());\n+ for (Tuple<String, Boolean> option : booleanOptionList) {\n+ mappingBuilder.field(option.v1(), option.v2().booleanValue());\n+ }\n+ tv_stored |= tv_positions || tv_payloads || tv_offsets;\n+ if (randomBoolean()) {\n+ mappingBuilder.field(\"similarity\", similarity = randomBoolean() ? \"BM25\" : \"TF/IDF\");\n+ }\n+ if (randomBoolean()) {\n+ fieldData = true;\n+ mappingBuilder.startObject(\"fielddata\");\n+ mappingBuilder.field(\"foo\", \"bar\");\n+ mappingBuilder.endObject();\n+ }\n+ mappingBuilder.endObject();\n+ }\n+\n+ String mapping = mappingBuilder.endObject().endObject().bytes().toUtf8();\n+ logger.info(mapping);\n+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);\n+ String builtMapping = docMapper.mappingSource().string();\n+ // reparse it\n+ DocumentMapper builtDocMapper = MapperTestUtils.newParser().parse(builtMapping);\n+\n+ byte[] json = jsonBuilder().startObject().startObject(\"test\")\n+ .field(\"foo\", \"bar\")\n+ .field(\"_id\", 1)\n+ .field(\"foobar\", \"foobar\")\n+ .endObject().endObject().bytes().array();\n+ Document doc = builtDocMapper.parse(new BytesArray(json)).rootDoc();\n+ AllField field = (AllField) doc.getField(\"_all\");\n+ if (enabled) {\n+ assertThat(field.fieldType().omitNorms(), equalTo(omitNorms));\n+ assertThat(field.fieldType().stored(), equalTo(stored));\n+ assertThat(field.fieldType().storeTermVectorOffsets(), equalTo(tv_offsets));\n+ assertThat(field.fieldType().storeTermVectorPayloads(), equalTo(tv_payloads));\n+ assertThat(field.fieldType().storeTermVectorPositions(), equalTo(tv_positions));\n+ assertThat(field.fieldType().storeTermVectors(), equalTo(tv_stored));\n+ AllEntries allEntries = ((AllTokenStream) field.tokenStream(docMapper.mappers().indexAnalyzer())).allEntries();\n+ assertThat(allEntries.fields().size(), equalTo(2));\n+ assertThat(allEntries.fields().contains(\"foobar\"), equalTo(true));\n+ assertThat(allEntries.fields().contains(\"foo\"), equalTo(true));\n+ if (!stored) {\n+ assertThat(field.stringValue(), nullValue());\n+ }\n+ String text = stored ? field.stringValue() : \"bar foobar\";\n+ assertThat(text.trim(), equalTo(allEntries.buildText().trim()));\n+ } else {\n+ assertThat(field, nullValue());\n+ }\n+\n+ Term term = new Term(\"foo\", \"bar\");\n+ Query query = builtDocMapper.allFieldMapper().queryStringTermQuery(term);\n+ if (autoBoost) {\n+ assertThat(query, equalTo((Query)new AllTermQuery(term)));\n+ } else {\n+ assertThat(query, equalTo((Query)new TermQuery(term)));\n+ }\n+ if (similarity == null || similarity.equals(\"TF/IDF\")) {\n+ assertThat(builtDocMapper.allFieldMapper().similarity(), nullValue());\n+ } else {\n+ assertThat(similarity, equalTo(builtDocMapper.allFieldMapper().similarity().name()));\n+ }\n+ assertThat(builtMapping.contains(\"fielddata\"), is(fieldData));\n+ if (allDefault) {\n+ BytesStreamOutput bytesStreamOutput = new BytesStreamOutput(0);\n+ XContentBuilder b = new XContentBuilder(XContentType.JSON.xContent(), bytesStreamOutput);\n+ XContentBuilder xContentBuilder = builtDocMapper.allFieldMapper().toXContent(b, ToXContent.EMPTY_PARAMS);\n+ xContentBuilder.flush();\n+ assertThat(bytesStreamOutput.size(), equalTo(0));\n+ }\n+\n+ }\n }", "filename": "src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java", "status": "modified" } ] }
{ "body": "The all field tries to optimize for default cases and doesn't generate XContent if everything is default. The settings are not tested well enough and there is already differences between master and 0.90. Master is already missing to check `autoboost` and `simiarity` and 0.90 has misses settings if `customFieldDataSettings` is set as the only setting as well.\n", "comments": [], "number": 4579, "title": "All field might loose configuration on serialization." }
{ "body": "See #4579 #4581 #4582\n", "number": 4583, "review_comments": [], "title": "Fix toXContent in the All Mapper" }
{ "commits": [ { "message": "Term Vector settings should be treated like flags without propergation\n\ntoday if a specific feature is disabled for term vectors with something\nlike 'store_term_vector_positions = false' term vectors might be disabeled\nalltogether even if 'store_term_vectors=true' in the mapping. This depends on the\norder of the values in the mapping since the more specific one might override\nthe less specific on.\n\nCloses #4582" }, { "message": "Simulate the entire toXContent instead of special caseing\n\nToday we try to detect if we need to generate the mapping or not in\nthe all mapper. This is error prone since it misses conditions if not\nexplicitly added. We should rather similate the generation instead.\n\nThis commit also adds a random test to check if the settings\nof the all field mapper are correctly applied.\n\nCloses #4579\nCloses #4581" } ], "files": [ { "diff": "@@ -117,24 +117,32 @@ public T docValues(boolean docValues) {\n }\n \n public T storeTermVectors(boolean termVectors) {\n- this.fieldType.setStoreTermVectors(termVectors);\n+ if (termVectors) {\n+ this.fieldType.setStoreTermVectors(termVectors);\n+ } // don't set it to false, it is default and might be flipped by a more specific option\n return builder;\n }\n \n public T storeTermVectorOffsets(boolean termVectorOffsets) {\n- this.fieldType.setStoreTermVectors(termVectorOffsets);\n+ if (termVectorOffsets) {\n+ this.fieldType.setStoreTermVectors(termVectorOffsets);\n+ }\n this.fieldType.setStoreTermVectorOffsets(termVectorOffsets);\n return builder;\n }\n \n public T storeTermVectorPositions(boolean termVectorPositions) {\n- this.fieldType.setStoreTermVectors(termVectorPositions);\n+ if (termVectorPositions) {\n+ this.fieldType.setStoreTermVectors(termVectorPositions);\n+ }\n this.fieldType.setStoreTermVectorPositions(termVectorPositions);\n return builder;\n }\n \n public T storeTermVectorPayloads(boolean termVectorPayloads) {\n- this.fieldType.setStoreTermVectors(termVectorPayloads);\n+ if (termVectorPayloads) {\n+ this.fieldType.setStoreTermVectors(termVectorPayloads);\n+ }\n this.fieldType.setStoreTermVectorPayloads(termVectorPayloads);\n return builder;\n }", "filename": "src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java", "status": "modified" }, { "diff": "@@ -28,6 +28,7 @@\n import org.apache.lucene.search.TermQuery;\n import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.Strings;\n+import org.elasticsearch.common.io.stream.BytesStreamOutput;\n import org.elasticsearch.common.lucene.Lucene;\n import org.elasticsearch.common.lucene.all.AllField;\n import org.elasticsearch.common.lucene.all.AllTermQuery;\n@@ -256,16 +257,26 @@ protected String contentType() {\n \n @Override\n public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {\n- // if all are defaults, no need to write it at all\n boolean includeDefaults = params.paramAsBoolean(\"include_defaults\", false);\n-\n- if (!includeDefaults && enabled == Defaults.ENABLED && fieldType.stored() == Defaults.FIELD_TYPE.stored() &&\n- fieldType.storeTermVectors() == Defaults.FIELD_TYPE.storeTermVectors() &&\n- indexAnalyzer == null && searchAnalyzer == null && customFieldDataSettings == null\n- && fieldType.omitNorms() == Defaults.FIELD_TYPE.omitNorms()) {\n- return builder;\n+ if (!includeDefaults) {\n+ // simulate the generation to make sure we don't add unnecessary content if all is default\n+ // if all are defaults, no need to write it at all - generating is twice is ok though\n+ BytesStreamOutput bytesStreamOutput = new BytesStreamOutput(0);\n+ XContentBuilder b = new XContentBuilder(builder.contentType().xContent(), bytesStreamOutput);\n+ long pos = bytesStreamOutput.position();\n+ innerToXContent(b, false);\n+ b.flush();\n+ if (pos == bytesStreamOutput.position()) {\n+ return builder;\n+ }\n }\n builder.startObject(CONTENT_TYPE);\n+ innerToXContent(builder, includeDefaults);\n+ builder.endObject();\n+ return builder;\n+ }\n+\n+ private void innerToXContent(XContentBuilder builder, boolean includeDefaults) throws IOException {\n if (includeDefaults || enabled != Defaults.ENABLED) {\n builder.field(\"enabled\", enabled);\n }\n@@ -276,7 +287,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n builder.field(\"store\", fieldType.stored());\n }\n if (includeDefaults || fieldType.storeTermVectors() != Defaults.FIELD_TYPE.storeTermVectors()) {\n- builder.field(\"store_term_vector\", fieldType.storeTermVectors());\n+ builder.field(\"store_term_vectors\", fieldType.storeTermVectors());\n }\n if (includeDefaults || fieldType.storeTermVectorOffsets() != Defaults.FIELD_TYPE.storeTermVectorOffsets()) {\n builder.field(\"store_term_vector_offsets\", fieldType.storeTermVectorOffsets());\n@@ -332,11 +343,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n } else if (includeDefaults) {\n builder.field(\"fielddata\", (Map) fieldDataType.getSettings().getAsMap());\n }\n-\n- builder.endObject();\n- return builder;\n }\n \n+\n @Override\n public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {\n // do nothing here, no merging, but also no exception", "filename": "src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java", "status": "modified" }, { "diff": "@@ -20,12 +20,18 @@\n package org.elasticsearch.index.mapper.all;\n \n import org.apache.lucene.index.Term;\n+import org.apache.lucene.search.Query;\n import org.apache.lucene.search.TermQuery;\n import org.elasticsearch.common.bytes.BytesArray;\n+import org.elasticsearch.common.collect.Tuple;\n+import org.elasticsearch.common.io.stream.BytesStreamOutput;\n import org.elasticsearch.common.lucene.all.AllEntries;\n import org.elasticsearch.common.lucene.all.AllField;\n import org.elasticsearch.common.lucene.all.AllTermQuery;\n import org.elasticsearch.common.lucene.all.AllTokenStream;\n+import org.elasticsearch.common.xcontent.ToXContent;\n+import org.elasticsearch.common.xcontent.XContentBuilder;\n+import org.elasticsearch.common.xcontent.XContentType;\n import org.elasticsearch.index.mapper.DocumentMapper;\n import org.elasticsearch.index.mapper.FieldMapper;\n import org.elasticsearch.index.mapper.MapperTestUtils;\n@@ -34,9 +40,14 @@\n import org.hamcrest.Matchers;\n import org.junit.Test;\n \n+import java.util.ArrayList;\n+import java.util.Collections;\n+import java.util.List;\n+\n import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;\n import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;\n-import static org.hamcrest.Matchers.equalTo;\n+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n+import static org.hamcrest.Matchers.*;\n \n /**\n *\n@@ -152,4 +163,121 @@ public void testSimpleAllMappersWithReparseWithStore() throws Exception {\n assertThat(text, equalTo(allEntries.buildText()));\n assertThat(field.fieldType().omitNorms(), equalTo(false));\n }\n+\n+ @Test\n+ public void testRandom() throws Exception {\n+ boolean omitNorms = false;\n+ boolean stored = false;\n+ boolean enabled = true;\n+ boolean autoBoost = false;\n+ boolean tv_stored = false;\n+ boolean tv_payloads = false;\n+ boolean tv_offsets = false;\n+ boolean tv_positions = false;\n+ String similarity = null;\n+ boolean fieldData = false;\n+ XContentBuilder mappingBuilder = jsonBuilder();\n+ mappingBuilder.startObject().startObject(\"test\");\n+ List<Tuple<String, Boolean>> booleanOptionList = new ArrayList<Tuple<String, Boolean>>();\n+ boolean allDefault = true;\n+ if (frequently()) {\n+ allDefault = false;\n+ mappingBuilder.startObject(\"_all\");\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"omit_norms\", omitNorms = randomBoolean()));\n+ }\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"store\", stored = randomBoolean()));\n+ }\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"store_term_vectors\", tv_stored = randomBoolean()));\n+ }\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"enabled\", enabled = randomBoolean()));\n+ }\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"auto_boost\", autoBoost = randomBoolean()));\n+ }\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"store_term_vector_offsets\", tv_offsets = randomBoolean()));\n+ }\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"store_term_vector_positions\", tv_positions = randomBoolean()));\n+ }\n+ if (randomBoolean()) {\n+ booleanOptionList.add(new Tuple<String, Boolean>(\"store_term_vector_payloads\", tv_payloads = randomBoolean()));\n+ }\n+ Collections.shuffle(booleanOptionList, getRandom());\n+ for (Tuple<String, Boolean> option : booleanOptionList) {\n+ mappingBuilder.field(option.v1(), option.v2().booleanValue());\n+ }\n+ tv_stored |= tv_positions || tv_payloads || tv_offsets;\n+ if (randomBoolean()) {\n+ mappingBuilder.field(\"similarity\", similarity = randomBoolean() ? \"BM25\" : \"TF/IDF\");\n+ }\n+ if (randomBoolean()) {\n+ fieldData = true;\n+ mappingBuilder.startObject(\"fielddata\");\n+ mappingBuilder.field(\"foo\", \"bar\");\n+ mappingBuilder.endObject();\n+ }\n+ mappingBuilder.endObject();\n+ }\n+\n+ String mapping = mappingBuilder.endObject().endObject().bytes().toUtf8();\n+ logger.info(mapping);\n+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);\n+ String builtMapping = docMapper.mappingSource().string();\n+ // reparse it\n+ DocumentMapper builtDocMapper = MapperTestUtils.newParser().parse(builtMapping);\n+\n+ byte[] json = jsonBuilder().startObject().startObject(\"test\")\n+ .field(\"foo\", \"bar\")\n+ .field(\"_id\", 1)\n+ .field(\"foobar\", \"foobar\")\n+ .endObject().endObject().bytes().array();\n+ Document doc = builtDocMapper.parse(new BytesArray(json)).rootDoc();\n+ AllField field = (AllField) doc.getField(\"_all\");\n+ if (enabled) {\n+ assertThat(field.fieldType().omitNorms(), equalTo(omitNorms));\n+ assertThat(field.fieldType().stored(), equalTo(stored));\n+ assertThat(field.fieldType().storeTermVectorOffsets(), equalTo(tv_offsets));\n+ assertThat(field.fieldType().storeTermVectorPayloads(), equalTo(tv_payloads));\n+ assertThat(field.fieldType().storeTermVectorPositions(), equalTo(tv_positions));\n+ assertThat(field.fieldType().storeTermVectors(), equalTo(tv_stored));\n+ AllEntries allEntries = ((AllTokenStream) field.tokenStream(docMapper.mappers().indexAnalyzer())).allEntries();\n+ assertThat(allEntries.fields().size(), equalTo(2));\n+ assertThat(allEntries.fields().contains(\"foobar\"), equalTo(true));\n+ assertThat(allEntries.fields().contains(\"foo\"), equalTo(true));\n+ if (!stored) {\n+ assertThat(field.stringValue(), nullValue());\n+ }\n+ String text = stored ? field.stringValue() : \"bar foobar\";\n+ assertThat(text.trim(), equalTo(allEntries.buildText().trim()));\n+ } else {\n+ assertThat(field, nullValue());\n+ }\n+\n+ Term term = new Term(\"foo\", \"bar\");\n+ Query query = builtDocMapper.allFieldMapper().queryStringTermQuery(term);\n+ if (autoBoost) {\n+ assertThat(query, equalTo((Query)new AllTermQuery(term)));\n+ } else {\n+ assertThat(query, equalTo((Query)new TermQuery(term)));\n+ }\n+ if (similarity == null || similarity.equals(\"TF/IDF\")) {\n+ assertThat(builtDocMapper.allFieldMapper().similarity(), nullValue());\n+ } else {\n+ assertThat(similarity, equalTo(builtDocMapper.allFieldMapper().similarity().name()));\n+ }\n+ assertThat(builtMapping.contains(\"fielddata\"), is(fieldData));\n+ if (allDefault) {\n+ BytesStreamOutput bytesStreamOutput = new BytesStreamOutput(0);\n+ XContentBuilder b = new XContentBuilder(XContentType.JSON.xContent(), bytesStreamOutput);\n+ XContentBuilder xContentBuilder = builtDocMapper.allFieldMapper().toXContent(b, ToXContent.EMPTY_PARAMS);\n+ xContentBuilder.flush();\n+ assertThat(bytesStreamOutput.size(), equalTo(0));\n+ }\n+\n+ }\n }", "filename": "src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java", "status": "modified" } ] }
{ "body": "Right now doc values are enabled on a field if its fielddata format is `doc_values` at creation time of the field mapper.\n\nSo if you create a field with the `doc_values` fielddata format, then use the update mapping API to change the format to `paged_bytes` instead and restart the node, the field mapper won't know that it needs to index doc values and the `doc_values` format won't be usable anymore.\n\nI'd like to have a `doc_values` setting on the same level as `index` and `store` in the mappings that would remain true even when the field data format becomes `paged_bytes` so that doc values keep being indexed and the fielddata format can be later set to `doc_values` again.\n", "comments": [ { "body": "+1 I think this should be antiviral as well.\n", "created_at": "2013-12-27T18:48:30Z" } ], "number": 4560, "title": "Explicit doc values setting" }
{ "body": "Once doc values are enabled on a field, they can't be disabled.\n\nClose #4560\n", "number": 4561, "review_comments": [], "title": "Explicit doc_values setting." }
{ "commits": [ { "message": "Explicit doc_values setting.\n\nOnce doc values are enabled on a field, they can't be disabled.\n\nClose #4560" } ], "files": [ { "diff": "@@ -88,6 +88,9 @@ searchable at all (as an individual field; it may still be included in\n `_all`). Setting to `no` disables `include_in_all`. Defaults to\n `analyzed`.\n \n+|`doc_values` |Set to `true` to store field values in a column-stride fashion.\n+Automatically set to `true` when the fielddata format is `doc_values`.\n+\n |`term_vector` |Possible values are `no`, `yes`, `with_offsets`,\n `with_positions`, `with_positions_offsets`. Defaults to `no`.\n \n@@ -195,6 +198,9 @@ and it can be retrieved from it).\n in `_source`, have `include_in_all` enabled, or `store` should be set to\n `true` for this to be useful.\n \n+|`doc_values` |Set to `true` to store field values in a column-stride fashion.\n+Automatically set to `true` when the fielddata format is `doc_values`.\n+\n |`precision_step` |The precision step (number of terms generated for\n each number value). Defaults to `4`.\n \n@@ -303,6 +309,9 @@ and it can be retrieved from it).\n in `_source`, have `include_in_all` enabled, or `store` should be set to\n `true` for this to be useful.\n \n+|`doc_values` |Set to `true` to store field values in a column-stride fashion.\n+Automatically set to `true` when the fielddata format is `doc_values`.\n+\n |`precision_step` |The precision step (number of terms generated for\n each number value). Defaults to `4`.\n ", "filename": "docs/reference/mapping/types/core-types.asciidoc", "status": "modified" }, { "diff": "@@ -64,6 +64,7 @@ public abstract class AbstractFieldMapper<T> implements FieldMapper<T> {\n \n public static class Defaults {\n public static final FieldType FIELD_TYPE = new FieldType();\n+ public static final boolean DOC_VALUES = false;\n \n static {\n FIELD_TYPE.setIndexed(true);\n@@ -81,6 +82,7 @@ public static class Defaults {\n public abstract static class Builder<T extends Builder, Y extends AbstractFieldMapper> extends Mapper.Builder<T, Y> {\n \n protected final FieldType fieldType;\n+ protected Boolean docValues;\n protected float boost = Defaults.BOOST;\n protected boolean omitNormsSet = false;\n protected String indexName;\n@@ -109,6 +111,11 @@ public T store(boolean store) {\n return builder;\n }\n \n+ public T docValues(boolean docValues) {\n+ this.docValues = docValues;\n+ return builder;\n+ }\n+\n public T storeTermVectors(boolean termVectors) {\n this.fieldType.setStoreTermVectors(termVectors);\n return builder;\n@@ -227,7 +234,7 @@ protected List<Field> initialValue() {\n protected Settings customFieldDataSettings;\n protected FieldDataType fieldDataType;\n \n- protected AbstractFieldMapper(Names names, float boost, FieldType fieldType, NamedAnalyzer indexAnalyzer,\n+ protected AbstractFieldMapper(Names names, float boost, FieldType fieldType, Boolean docValues, NamedAnalyzer indexAnalyzer,\n NamedAnalyzer searchAnalyzer, PostingsFormatProvider postingsFormat,\n DocValuesFormatProvider docValuesFormat, SimilarityProvider similarity,\n @Nullable Settings fieldDataSettings, Settings indexSettings) {\n@@ -266,8 +273,10 @@ protected AbstractFieldMapper(Names names, float boost, FieldType fieldType, Nam\n ImmutableSettings.builder().put(defaultFieldDataType().getSettings()).put(fieldDataSettings)\n );\n }\n- if (fieldDataType == null) {\n- docValues = false;\n+ if (docValues != null) {\n+ this.docValues = docValues;\n+ } else if (fieldDataType == null) {\n+ this.docValues = false;\n } else {\n this.docValues = FieldDataType.DOC_VALUES_FORMAT_VALUE.equals(fieldDataType.getFormat(indexSettings));\n }\n@@ -498,6 +507,11 @@ public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappi\n if (this.fieldType().stored() != fieldMergeWith.fieldType().stored()) {\n mergeContext.addConflict(\"mapper [\" + names.fullName() + \"] has different store values\");\n }\n+ if (!this.hasDocValues() && fieldMergeWith.hasDocValues()) {\n+ // don't add conflict if this mapper has doc values while the mapper to merge doesn't since doc values are implicitely set\n+ // when the doc_values field data format is configured\n+ mergeContext.addConflict(\"mapper [\" + names.fullName() + \"] has different \" + TypeParsers.DOC_VALUES + \" values\");\n+ }\n if (this.fieldType().tokenized() != fieldMergeWith.fieldType().tokenized()) {\n mergeContext.addConflict(\"mapper [\" + names.fullName() + \"] has different tokenize values\");\n }\n@@ -593,6 +607,9 @@ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults,\n if (includeDefaults || fieldType.stored() != defaultFieldType.stored()) {\n builder.field(\"store\", fieldType.stored());\n }\n+ if (includeDefaults || hasDocValues() != Defaults.DOC_VALUES) {\n+ builder.field(TypeParsers.DOC_VALUES, docValues);\n+ }\n if (includeDefaults || fieldType.storeTermVectors() != defaultFieldType.storeTermVectors()) {\n builder.field(\"term_vector\", termVectorOptionsToString(fieldType));\n }", "filename": "src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java", "status": "modified" }, { "diff": "@@ -121,7 +121,7 @@ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext\n \n protected BinaryFieldMapper(Names names, FieldType fieldType, Boolean compress, long compressThreshold,\n PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider) {\n- super(names, 1.0f, fieldType, null, null, postingsProvider, docValuesProvider, null, null, null);\n+ super(names, 1.0f, fieldType, null, null, null, postingsProvider, docValuesProvider, null, null, null);\n this.compress = compress;\n this.compressThreshold = compressThreshold;\n }", "filename": "src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java", "status": "modified" }, { "diff": "@@ -124,7 +124,7 @@ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext\n protected BooleanFieldMapper(Names names, float boost, FieldType fieldType, Boolean nullValue, PostingsFormatProvider postingsProvider,\n DocValuesFormatProvider docValuesProvider, SimilarityProvider similarity, @Nullable Settings fieldDataSettings,\n Settings indexSettings) {\n- super(names, boost, fieldType, Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER, postingsProvider, docValuesProvider, similarity, fieldDataSettings, indexSettings);\n+ super(names, boost, fieldType, null, Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER, postingsProvider, docValuesProvider, similarity, fieldDataSettings, indexSettings);\n this.nullValue = nullValue;\n }\n ", "filename": "src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java", "status": "modified" }, { "diff": "@@ -91,7 +91,7 @@ public Builder nullValue(byte nullValue) {\n public ByteFieldMapper build(BuilderContext context) {\n fieldType.setOmitNorms(fieldType.omitNorms() && boost == 1.0f);\n ByteFieldMapper fieldMapper = new ByteFieldMapper(buildNames(context),\n- precisionStep, boost, fieldType, nullValue, ignoreMalformed(context),\n+ precisionStep, boost, fieldType, docValues, nullValue, ignoreMalformed(context),\n postingsProvider, docValuesProvider, similarity, fieldDataSettings, context.indexSettings());\n fieldMapper.includeInAll(includeInAll);\n return fieldMapper;\n@@ -118,11 +118,11 @@ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext\n \n private String nullValueAsString;\n \n- protected ByteFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType,\n+ protected ByteFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType, Boolean docValues,\n Byte nullValue, Explicit<Boolean> ignoreMalformed, PostingsFormatProvider postingsProvider,\n DocValuesFormatProvider docValuesProvider, SimilarityProvider similarity,\n @Nullable Settings fieldDataSettings, Settings indexSettings) {\n- super(names, precisionStep, boost, fieldType,\n+ super(names, precisionStep, boost, fieldType, docValues,\n ignoreMalformed, new NamedAnalyzer(\"_byte/\" + precisionStep, new NumericIntegerAnalyzer(precisionStep)),\n new NamedAnalyzer(\"_byte/max\", new NumericIntegerAnalyzer(Integer.MAX_VALUE)), postingsProvider,\n docValuesProvider, similarity, fieldDataSettings, indexSettings);", "filename": "src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java", "status": "modified" }, { "diff": "@@ -195,7 +195,7 @@ private NamedAnalyzer getNamedAnalyzer(ParserContext parserContext, String name)\n \n public CompletionFieldMapper(Names names, NamedAnalyzer indexAnalyzer, NamedAnalyzer searchAnalyzer, PostingsFormatProvider postingsProvider, SimilarityProvider similarity, boolean payloads,\n boolean preserveSeparators, boolean preservePositionIncrements, int maxInputLength) {\n- super(names, 1.0f, Defaults.FIELD_TYPE, indexAnalyzer, searchAnalyzer, postingsProvider, null, similarity, null, null);\n+ super(names, 1.0f, Defaults.FIELD_TYPE, null, indexAnalyzer, searchAnalyzer, postingsProvider, null, similarity, null, null);\n analyzingSuggestLookupProvider = new AnalyzingCompletionLookupProvider(preserveSeparators, false, preservePositionIncrements, payloads);\n this.completionPostingsFormatProvider = new CompletionPostingsFormatProvider(\"completion\", postingsProvider, analyzingSuggestLookupProvider);\n this.preserveSeparators = preserveSeparators;", "filename": "src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java", "status": "modified" }, { "diff": "@@ -128,7 +128,7 @@ public DateFieldMapper build(BuilderContext context) {\n dateTimeFormatter = new FormatDateTimeFormatter(dateTimeFormatter.format(), dateTimeFormatter.parser(), dateTimeFormatter.printer(), locale);\n }\n DateFieldMapper fieldMapper = new DateFieldMapper(buildNames(context), dateTimeFormatter,\n- precisionStep, boost, fieldType, nullValue, timeUnit, roundCeil, ignoreMalformed(context),\n+ precisionStep, boost, fieldType, docValues, nullValue, timeUnit, roundCeil, ignoreMalformed(context),\n postingsProvider, docValuesProvider, similarity, fieldDataSettings, context.indexSettings());\n fieldMapper.includeInAll(includeInAll);\n return fieldMapper;\n@@ -200,11 +200,11 @@ public static Locale parseLocale(String locale) {\n \n protected final TimeUnit timeUnit;\n \n- protected DateFieldMapper(Names names, FormatDateTimeFormatter dateTimeFormatter, int precisionStep, float boost, FieldType fieldType,\n+ protected DateFieldMapper(Names names, FormatDateTimeFormatter dateTimeFormatter, int precisionStep, float boost, FieldType fieldType, Boolean docValues,\n String nullValue, TimeUnit timeUnit, boolean roundCeil, Explicit<Boolean> ignoreMalformed,\n PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider, SimilarityProvider similarity,\n @Nullable Settings fieldDataSettings, Settings indexSettings) {\n- super(names, precisionStep, boost, fieldType, ignoreMalformed, new NamedAnalyzer(\"_date/\" + precisionStep,\n+ super(names, precisionStep, boost, fieldType, docValues, ignoreMalformed, new NamedAnalyzer(\"_date/\" + precisionStep,\n new NumericDateAnalyzer(precisionStep, dateTimeFormatter.parser())),\n new NamedAnalyzer(\"_date/max\", new NumericDateAnalyzer(Integer.MAX_VALUE, dateTimeFormatter.parser())),\n postingsProvider, docValuesProvider, similarity, fieldDataSettings, indexSettings);", "filename": "src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java", "status": "modified" }, { "diff": "@@ -94,7 +94,7 @@ public Builder nullValue(double nullValue) {\n public DoubleFieldMapper build(BuilderContext context) {\n fieldType.setOmitNorms(fieldType.omitNorms() && boost == 1.0f);\n DoubleFieldMapper fieldMapper = new DoubleFieldMapper(buildNames(context),\n- precisionStep, boost, fieldType, nullValue, ignoreMalformed(context), postingsProvider, docValuesProvider,\n+ precisionStep, boost, fieldType, docValues, nullValue, ignoreMalformed(context), postingsProvider, docValuesProvider,\n similarity, fieldDataSettings, context.indexSettings());\n fieldMapper.includeInAll(includeInAll);\n return fieldMapper;\n@@ -122,11 +122,11 @@ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext\n \n private String nullValueAsString;\n \n- protected DoubleFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType,\n+ protected DoubleFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType, Boolean docValues,\n Double nullValue, Explicit<Boolean> ignoreMalformed,\n PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,\n SimilarityProvider similarity, @Nullable Settings fieldDataSettings, Settings indexSettings) {\n- super(names, precisionStep, boost, fieldType, ignoreMalformed,\n+ super(names, precisionStep, boost, fieldType, docValues, ignoreMalformed,\n NumericDoubleAnalyzer.buildNamedAnalyzer(precisionStep), NumericDoubleAnalyzer.buildNamedAnalyzer(Integer.MAX_VALUE),\n postingsProvider, docValuesProvider, similarity, fieldDataSettings, indexSettings);\n this.nullValue = nullValue;", "filename": "src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java", "status": "modified" }, { "diff": "@@ -95,7 +95,7 @@ public Builder nullValue(float nullValue) {\n public FloatFieldMapper build(BuilderContext context) {\n fieldType.setOmitNorms(fieldType.omitNorms() && boost == 1.0f);\n FloatFieldMapper fieldMapper = new FloatFieldMapper(buildNames(context),\n- precisionStep, boost, fieldType, nullValue, ignoreMalformed(context), postingsProvider, docValuesProvider,\n+ precisionStep, boost, fieldType, docValues, nullValue, ignoreMalformed(context), postingsProvider, docValuesProvider,\n similarity, fieldDataSettings, context.indexSettings());\n fieldMapper.includeInAll(includeInAll);\n return fieldMapper;\n@@ -122,11 +122,11 @@ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext\n \n private String nullValueAsString;\n \n- protected FloatFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType,\n+ protected FloatFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType, Boolean docValues,\n Float nullValue, Explicit<Boolean> ignoreMalformed,\n PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,\n SimilarityProvider similarity, @Nullable Settings fieldDataSettings, Settings indexSettings) {\n- super(names, precisionStep, boost, fieldType, ignoreMalformed,\n+ super(names, precisionStep, boost, fieldType, docValues, ignoreMalformed,\n NumericFloatAnalyzer.buildNamedAnalyzer(precisionStep), NumericFloatAnalyzer.buildNamedAnalyzer(Integer.MAX_VALUE),\n postingsProvider, docValuesProvider, similarity, fieldDataSettings, indexSettings);\n this.nullValue = nullValue;", "filename": "src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java", "status": "modified" }, { "diff": "@@ -90,7 +90,7 @@ public Builder nullValue(int nullValue) {\n @Override\n public IntegerFieldMapper build(BuilderContext context) {\n fieldType.setOmitNorms(fieldType.omitNorms() && boost == 1.0f);\n- IntegerFieldMapper fieldMapper = new IntegerFieldMapper(buildNames(context), precisionStep, boost, fieldType,\n+ IntegerFieldMapper fieldMapper = new IntegerFieldMapper(buildNames(context), precisionStep, boost, fieldType, docValues,\n nullValue, ignoreMalformed(context), postingsProvider, docValuesProvider, similarity, fieldDataSettings, context.indexSettings());\n fieldMapper.includeInAll(includeInAll);\n return fieldMapper;\n@@ -117,11 +117,11 @@ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext\n \n private String nullValueAsString;\n \n- protected IntegerFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType,\n+ protected IntegerFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType, Boolean docValues,\n Integer nullValue, Explicit<Boolean> ignoreMalformed,\n PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,\n SimilarityProvider similarity, @Nullable Settings fieldDataSettings, Settings indexSettings) {\n- super(names, precisionStep, boost, fieldType, ignoreMalformed,\n+ super(names, precisionStep, boost, fieldType, docValues, ignoreMalformed,\n NumericIntegerAnalyzer.buildNamedAnalyzer(precisionStep), NumericIntegerAnalyzer.buildNamedAnalyzer(Integer.MAX_VALUE),\n postingsProvider, docValuesProvider, similarity, fieldDataSettings, indexSettings);\n this.nullValue = nullValue;", "filename": "src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java", "status": "modified" }, { "diff": "@@ -90,7 +90,7 @@ public Builder nullValue(long nullValue) {\n @Override\n public LongFieldMapper build(BuilderContext context) {\n fieldType.setOmitNorms(fieldType.omitNorms() && boost == 1.0f);\n- LongFieldMapper fieldMapper = new LongFieldMapper(buildNames(context), precisionStep, boost, fieldType, nullValue,\n+ LongFieldMapper fieldMapper = new LongFieldMapper(buildNames(context), precisionStep, boost, fieldType, docValues, nullValue,\n ignoreMalformed(context), postingsProvider, docValuesProvider, similarity, fieldDataSettings, context.indexSettings());\n fieldMapper.includeInAll(includeInAll);\n return fieldMapper;\n@@ -117,11 +117,11 @@ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext\n \n private String nullValueAsString;\n \n- protected LongFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType,\n+ protected LongFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType, Boolean docValues,\n Long nullValue, Explicit<Boolean> ignoreMalformed,\n PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,\n SimilarityProvider similarity, @Nullable Settings fieldDataSettings, Settings indexSettings) {\n- super(names, precisionStep, boost, fieldType, ignoreMalformed,\n+ super(names, precisionStep, boost, fieldType, docValues, ignoreMalformed,\n NumericLongAnalyzer.buildNamedAnalyzer(precisionStep), NumericLongAnalyzer.buildNamedAnalyzer(Integer.MAX_VALUE),\n postingsProvider, docValuesProvider, similarity, fieldDataSettings, indexSettings);\n this.nullValue = nullValue;", "filename": "src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java", "status": "modified" }, { "diff": "@@ -143,13 +143,13 @@ protected NumericTokenStream initialValue() {\n }\n };\n \n- protected NumberFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType,\n+ protected NumberFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType, Boolean docValues,\n Explicit<Boolean> ignoreMalformed, NamedAnalyzer indexAnalyzer,\n NamedAnalyzer searchAnalyzer, PostingsFormatProvider postingsProvider,\n DocValuesFormatProvider docValuesProvider, SimilarityProvider similarity,\n @Nullable Settings fieldDataSettings, Settings indexSettings) {\n // LUCENE 4 UPGRADE: Since we can't do anything before the super call, we have to push the boost check down to subclasses\n- super(names, boost, fieldType, indexAnalyzer, searchAnalyzer, postingsProvider, docValuesProvider, similarity, fieldDataSettings, indexSettings);\n+ super(names, boost, fieldType, docValues, indexAnalyzer, searchAnalyzer, postingsProvider, docValuesProvider, similarity, fieldDataSettings, indexSettings);\n if (precisionStep <= 0 || precisionStep >= maxPrecisionStep()) {\n this.precisionStep = Integer.MAX_VALUE;\n } else {", "filename": "src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java", "status": "modified" }, { "diff": "@@ -91,7 +91,7 @@ public Builder nullValue(short nullValue) {\n @Override\n public ShortFieldMapper build(BuilderContext context) {\n fieldType.setOmitNorms(fieldType.omitNorms() && boost == 1.0f);\n- ShortFieldMapper fieldMapper = new ShortFieldMapper(buildNames(context), precisionStep, boost, fieldType, nullValue,\n+ ShortFieldMapper fieldMapper = new ShortFieldMapper(buildNames(context), precisionStep, boost, fieldType, docValues, nullValue,\n ignoreMalformed(context), postingsProvider, docValuesProvider, similarity, fieldDataSettings, context.indexSettings());\n fieldMapper.includeInAll(includeInAll);\n return fieldMapper;\n@@ -118,11 +118,11 @@ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext\n \n private String nullValueAsString;\n \n- protected ShortFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType,\n+ protected ShortFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType, Boolean docValues,\n Short nullValue, Explicit<Boolean> ignoreMalformed,\n PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,\n SimilarityProvider similarity, @Nullable Settings fieldDataSettings, Settings indexSettings) {\n- super(names, precisionStep, boost, fieldType, ignoreMalformed, new NamedAnalyzer(\"_short/\" + precisionStep,\n+ super(names, precisionStep, boost, fieldType, docValues, ignoreMalformed, new NamedAnalyzer(\"_short/\" + precisionStep,\n new NumericIntegerAnalyzer(precisionStep)), new NamedAnalyzer(\"_short/max\", new NumericIntegerAnalyzer(Integer.MAX_VALUE)),\n postingsProvider, docValuesProvider, similarity, fieldDataSettings, indexSettings);\n this.nullValue = nullValue;", "filename": "src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java", "status": "modified" }, { "diff": "@@ -134,7 +134,7 @@ public StringFieldMapper build(BuilderContext context) {\n }\n }\n StringFieldMapper fieldMapper = new StringFieldMapper(buildNames(context),\n- boost, fieldType, nullValue, indexAnalyzer, searchAnalyzer, searchQuotedAnalyzer,\n+ boost, fieldType, docValues, nullValue, indexAnalyzer, searchAnalyzer, searchQuotedAnalyzer,\n positionOffsetGap, ignoreAbove, postingsProvider, docValuesProvider, similarity, fieldDataSettings, context.indexSettings());\n fieldMapper.includeInAll(includeInAll);\n return fieldMapper;\n@@ -188,12 +188,12 @@ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext\n \n private int ignoreAbove;\n \n- protected StringFieldMapper(Names names, float boost, FieldType fieldType,\n+ protected StringFieldMapper(Names names, float boost, FieldType fieldType, Boolean docValues,\n String nullValue, NamedAnalyzer indexAnalyzer, NamedAnalyzer searchAnalyzer,\n NamedAnalyzer searchQuotedAnalyzer, int positionOffsetGap, int ignoreAbove,\n PostingsFormatProvider postingsFormat, DocValuesFormatProvider docValuesFormat,\n SimilarityProvider similarity, @Nullable Settings fieldDataSettings, Settings indexSettings) {\n- super(names, boost, fieldType, indexAnalyzer, searchAnalyzer, postingsFormat, docValuesFormat, similarity, fieldDataSettings, indexSettings);\n+ super(names, boost, fieldType, docValues, indexAnalyzer, searchAnalyzer, postingsFormat, docValuesFormat, similarity, fieldDataSettings, indexSettings);\n if (fieldType.tokenized() && fieldType.indexed() && hasDocValues()) {\n throw new MapperParsingException(\"Field [\" + names.fullName() + \"] cannot be analyzed and have doc values\");\n }", "filename": "src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java", "status": "modified" }, { "diff": "@@ -78,7 +78,7 @@ public NamedAnalyzer analyzer() {\n @Override\n public TokenCountFieldMapper build(BuilderContext context) {\n fieldType.setOmitNorms(fieldType.omitNorms() && boost == 1.0f);\n- TokenCountFieldMapper fieldMapper = new TokenCountFieldMapper(buildNames(context), precisionStep, boost, fieldType, nullValue,\n+ TokenCountFieldMapper fieldMapper = new TokenCountFieldMapper(buildNames(context), precisionStep, boost, fieldType, docValues, nullValue,\n ignoreMalformed(context), postingsProvider, docValuesProvider, similarity, fieldDataSettings, context.indexSettings(),\n analyzer);\n fieldMapper.includeInAll(includeInAll);\n@@ -114,10 +114,10 @@ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext\n \n private NamedAnalyzer analyzer;\n \n- protected TokenCountFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType, Integer nullValue,\n+ protected TokenCountFieldMapper(Names names, int precisionStep, float boost, FieldType fieldType, Boolean docValues, Integer nullValue,\n Explicit<Boolean> ignoreMalformed, PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,\n SimilarityProvider similarity, Settings fieldDataSettings, Settings indexSettings, NamedAnalyzer analyzer) {\n- super(names, precisionStep, boost, fieldType, nullValue, ignoreMalformed, postingsProvider, docValuesProvider, similarity,\n+ super(names, precisionStep, boost, fieldType, docValues, nullValue, ignoreMalformed, postingsProvider, docValuesProvider, similarity,\n fieldDataSettings, indexSettings);\n this.analyzer = analyzer;\n }", "filename": "src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java", "status": "modified" }, { "diff": "@@ -42,6 +42,7 @@\n */\n public class TypeParsers {\n \n+ public static final String DOC_VALUES = \"doc_values\";\n public static final String INDEX_OPTIONS_DOCS = \"docs\";\n public static final String INDEX_OPTIONS_FREQS = \"freqs\";\n public static final String INDEX_OPTIONS_POSITIONS = \"positions\";\n@@ -76,6 +77,8 @@ public static void parseField(AbstractFieldMapper.Builder builder, String name,\n parseIndex(name, propNode.toString(), builder);\n } else if (propName.equals(\"tokenized\")) {\n builder.tokenized(nodeBooleanValue(propNode));\n+ } else if (propName.equals(DOC_VALUES)) {\n+ builder.docValues(nodeBooleanValue(propNode));\n } else if (propName.equals(\"term_vector\")) {\n parseTermVector(name, propNode.toString(), builder);\n } else if (propName.equals(\"boost\")) {", "filename": "src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java", "status": "modified" }, { "diff": "@@ -196,7 +196,7 @@ public GeoPointFieldMapper build(BuilderContext context) {\n // store them as a single token.\n fieldType.setTokenized(false);\n \n- return new GeoPointFieldMapper(buildNames(context), fieldType, indexAnalyzer, searchAnalyzer, postingsProvider, docValuesProvider, similarity, fieldDataSettings, context.indexSettings(), origPathType, enableLatLon, enableGeoHash, enableGeohashPrefix, precisionStep, geoHashPrecision, latMapper, lonMapper, geohashMapper, validateLon, validateLat, normalizeLon, normalizeLat);\n+ return new GeoPointFieldMapper(buildNames(context), fieldType, docValues, indexAnalyzer, searchAnalyzer, postingsProvider, docValuesProvider, similarity, fieldDataSettings, context.indexSettings(), origPathType, enableLatLon, enableGeoHash, enableGeohashPrefix, precisionStep, geoHashPrecision, latMapper, lonMapper, geohashMapper, validateLon, validateLat, normalizeLon, normalizeLat);\n }\n }\n \n@@ -392,15 +392,15 @@ public GeoPoint decode(long latBits, long lonBits, GeoPoint out) {\n private final boolean normalizeLon;\n private final boolean normalizeLat;\n \n- public GeoPointFieldMapper(FieldMapper.Names names, FieldType fieldType,\n+ public GeoPointFieldMapper(FieldMapper.Names names, FieldType fieldType, Boolean docValues,\n NamedAnalyzer indexAnalyzer, NamedAnalyzer searchAnalyzer,\n PostingsFormatProvider postingsFormat, DocValuesFormatProvider docValuesFormat,\n SimilarityProvider similarity, @Nullable Settings fieldDataSettings, Settings indexSettings,\n ContentPath.Type pathType, boolean enableLatLon, boolean enableGeoHash, boolean enableGeohashPrefix, Integer precisionStep, int geoHashPrecision,\n DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geohashMapper,\n boolean validateLon, boolean validateLat,\n boolean normalizeLon, boolean normalizeLat) {\n- super(names, 1f, fieldType, null, indexAnalyzer, postingsFormat, docValuesFormat, similarity, fieldDataSettings, indexSettings);\n+ super(names, 1f, fieldType, docValues, null, indexAnalyzer, postingsFormat, docValuesFormat, similarity, fieldDataSettings, indexSettings);\n this.pathType = pathType;\n this.enableLatLon = enableLatLon;\n this.enableGeoHash = enableGeoHash || enableGeohashPrefix; // implicitly enable geohashes if geohash_prefix is set", "filename": "src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java", "status": "modified" }, { "diff": "@@ -196,7 +196,7 @@ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext\n \n public GeoShapeFieldMapper(FieldMapper.Names names, SpatialPrefixTree tree, String defaultStrategyName, double distanceErrorPct,\n FieldType fieldType, PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider) {\n- super(names, 1, fieldType, null, null, postingsProvider, docValuesProvider, null, null, null);\n+ super(names, 1, fieldType, null, null, null, postingsProvider, docValuesProvider, null, null, null);\n this.recursiveStrategy = new RecursivePrefixTreeStrategy(tree, names.indexName());\n this.recursiveStrategy.setDistErrPct(distanceErrorPct);\n this.termStrategy = new TermQueryPrefixTreeStrategy(tree, names.indexName());", "filename": "src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java", "status": "modified" }, { "diff": "@@ -145,7 +145,7 @@ protected AllFieldMapper(String name, FieldType fieldType, NamedAnalyzer indexAn\n boolean enabled, boolean autoBoost, PostingsFormatProvider postingsProvider,\n DocValuesFormatProvider docValuesProvider, SimilarityProvider similarity, @Nullable Settings fieldDataSettings,\n Settings indexSettings) {\n- super(new Names(name, name, name, name), 1.0f, fieldType, indexAnalyzer, searchAnalyzer, postingsProvider, docValuesProvider,\n+ super(new Names(name, name, name, name), 1.0f, fieldType, null, indexAnalyzer, searchAnalyzer, postingsProvider, docValuesProvider,\n similarity, fieldDataSettings, indexSettings);\n this.enabled = enabled;\n this.autoBoost = autoBoost;", "filename": "src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java", "status": "modified" }, { "diff": "@@ -90,7 +90,7 @@ public Builder nullValue(float nullValue) {\n @Override\n public BoostFieldMapper build(BuilderContext context) {\n return new BoostFieldMapper(name, buildIndexName(context),\n- precisionStep, boost, fieldType, nullValue, postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());\n+ precisionStep, boost, fieldType, docValues, nullValue, postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());\n }\n }\n \n@@ -118,13 +118,13 @@ public BoostFieldMapper() {\n }\n \n protected BoostFieldMapper(String name, String indexName) {\n- this(name, indexName, Defaults.PRECISION_STEP, Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE),\n+ this(name, indexName, Defaults.PRECISION_STEP, Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE), null,\n Defaults.NULL_VALUE, null, null, null, ImmutableSettings.EMPTY);\n }\n \n- protected BoostFieldMapper(String name, String indexName, int precisionStep, float boost, FieldType fieldType, Float nullValue,\n+ protected BoostFieldMapper(String name, String indexName, int precisionStep, float boost, FieldType fieldType, Boolean docValues, Float nullValue,\n PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider, @Nullable Settings fieldDataSettings, Settings indexSettings) {\n- super(new Names(name, indexName, indexName, name), precisionStep, boost, fieldType, Defaults.IGNORE_MALFORMED,\n+ super(new Names(name, indexName, indexName, name), precisionStep, boost, fieldType, docValues, Defaults.IGNORE_MALFORMED,\n NumericFloatAnalyzer.buildNamedAnalyzer(precisionStep), NumericFloatAnalyzer.buildNamedAnalyzer(Integer.MAX_VALUE),\n postingsProvider, docValuesProvider, null, fieldDataSettings, indexSettings);\n this.nullValue = nullValue;", "filename": "src/main/java/org/elasticsearch/index/mapper/internal/BoostFieldMapper.java", "status": "modified" }, { "diff": "@@ -97,7 +97,7 @@ public Builder path(String path) {\n \n @Override\n public IdFieldMapper build(BuilderContext context) {\n- return new IdFieldMapper(name, indexName, boost, fieldType, path, postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());\n+ return new IdFieldMapper(name, indexName, boost, fieldType, docValues, path, postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());\n }\n }\n \n@@ -120,21 +120,21 @@ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext\n private final String path;\n \n public IdFieldMapper() {\n- this(Defaults.NAME, Defaults.INDEX_NAME, new FieldType(Defaults.FIELD_TYPE));\n+ this(new FieldType(Defaults.FIELD_TYPE));\n }\n \n public IdFieldMapper(FieldType fieldType) {\n- this(Defaults.NAME, Defaults.INDEX_NAME, fieldType);\n+ this(Defaults.NAME, Defaults.INDEX_NAME, fieldType, null);\n }\n \n- protected IdFieldMapper(String name, String indexName, FieldType fieldType) {\n- this(name, indexName, Defaults.BOOST, fieldType, Defaults.PATH, null, null, null, ImmutableSettings.EMPTY);\n+ protected IdFieldMapper(String name, String indexName, FieldType fieldType, Boolean docValues) {\n+ this(name, indexName, Defaults.BOOST, fieldType, docValues, Defaults.PATH, null, null, null, ImmutableSettings.EMPTY);\n }\n \n- protected IdFieldMapper(String name, String indexName, float boost, FieldType fieldType, String path,\n+ protected IdFieldMapper(String name, String indexName, float boost, FieldType fieldType, Boolean docValues, String path,\n PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,\n @Nullable Settings fieldDataSettings, Settings indexSettings) {\n- super(new Names(name, indexName, indexName, name), boost, fieldType, Lucene.KEYWORD_ANALYZER,\n+ super(new Names(name, indexName, indexName, name), boost, fieldType, docValues, Lucene.KEYWORD_ANALYZER,\n Lucene.KEYWORD_ANALYZER, postingsProvider, docValuesProvider, null, fieldDataSettings, indexSettings);\n this.path = path;\n }", "filename": "src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java", "status": "modified" }, { "diff": "@@ -85,7 +85,7 @@ public Builder enabled(EnabledAttributeMapper enabledState) {\n \n @Override\n public IndexFieldMapper build(BuilderContext context) {\n- return new IndexFieldMapper(name, indexName, boost, fieldType, enabledState, postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());\n+ return new IndexFieldMapper(name, indexName, boost, fieldType, docValues, enabledState, postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());\n }\n }\n \n@@ -114,12 +114,12 @@ public IndexFieldMapper() {\n }\n \n protected IndexFieldMapper(String name, String indexName) {\n- this(name, indexName, Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE), Defaults.ENABLED_STATE, null, null, null, ImmutableSettings.EMPTY);\n+ this(name, indexName, Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE), null, Defaults.ENABLED_STATE, null, null, null, ImmutableSettings.EMPTY);\n }\n \n- public IndexFieldMapper(String name, String indexName, float boost, FieldType fieldType, EnabledAttributeMapper enabledState,\n+ public IndexFieldMapper(String name, String indexName, float boost, FieldType fieldType, Boolean docValues, EnabledAttributeMapper enabledState,\n PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider, @Nullable Settings fieldDataSettings, Settings indexSettings) {\n- super(new Names(name, indexName, indexName, name), boost, fieldType, Lucene.KEYWORD_ANALYZER,\n+ super(new Names(name, indexName, indexName, name), boost, fieldType, docValues, Lucene.KEYWORD_ANALYZER,\n Lucene.KEYWORD_ANALYZER, postingsProvider, docValuesProvider, null, fieldDataSettings, indexSettings);\n this.enabledState = enabledState;\n }", "filename": "src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java", "status": "modified" }, { "diff": "@@ -126,17 +126,14 @@ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext\n private final BytesRef typeAsBytes;\n \n protected ParentFieldMapper(String name, String indexName, String type, PostingsFormatProvider postingsFormat, @Nullable Settings fieldDataSettings, Settings indexSettings) {\n- super(new Names(name, indexName, indexName, name), Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE),\n+ super(new Names(name, indexName, indexName, name), Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE), null,\n Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER, postingsFormat, null, null, fieldDataSettings, indexSettings);\n this.type = type;\n- this.typeAsBytes = new BytesRef(type);\n+ this.typeAsBytes = type == null ? null : new BytesRef(type);\n }\n \n public ParentFieldMapper() {\n- super(new Names(Defaults.NAME, Defaults.NAME, Defaults.NAME, Defaults.NAME), Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE),\n- Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER, null, null, null, null, null);\n- type = null;\n- typeAsBytes = null;\n+ this(Defaults.NAME, Defaults.NAME, null, null, null, null);\n }\n \n public String type() {", "filename": "src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java", "status": "modified" }, { "diff": "@@ -125,7 +125,7 @@ public RoutingFieldMapper() {\n \n protected RoutingFieldMapper(FieldType fieldType, boolean required, String path, PostingsFormatProvider postingsProvider,\n DocValuesFormatProvider docValuesProvider, @Nullable Settings fieldDataSettings, Settings indexSettings) {\n- super(new Names(Defaults.NAME, Defaults.NAME, Defaults.NAME, Defaults.NAME), 1.0f, fieldType, Lucene.KEYWORD_ANALYZER,\n+ super(new Names(Defaults.NAME, Defaults.NAME, Defaults.NAME, Defaults.NAME), 1.0f, fieldType, null, Lucene.KEYWORD_ANALYZER,\n Lucene.KEYWORD_ANALYZER, postingsProvider, docValuesProvider, null, fieldDataSettings, indexSettings);\n this.required = required;\n this.path = path;", "filename": "src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java", "status": "modified" }, { "diff": "@@ -101,7 +101,7 @@ public SizeFieldMapper() {\n \n public SizeFieldMapper(EnabledAttributeMapper enabled, FieldType fieldType, PostingsFormatProvider postingsProvider,\n DocValuesFormatProvider docValuesProvider, @Nullable Settings fieldDataSettings, Settings indexSettings) {\n- super(new Names(Defaults.NAME), Defaults.PRECISION_STEP, Defaults.BOOST, fieldType, Defaults.NULL_VALUE,\n+ super(new Names(Defaults.NAME), Defaults.PRECISION_STEP, Defaults.BOOST, fieldType, null, Defaults.NULL_VALUE,\n Defaults.IGNORE_MALFORMED, postingsProvider, docValuesProvider, null, fieldDataSettings, indexSettings);\n this.enabledState = enabled;\n }", "filename": "src/main/java/org/elasticsearch/index/mapper/internal/SizeFieldMapper.java", "status": "modified" }, { "diff": "@@ -195,7 +195,7 @@ public SourceFieldMapper() {\n \n protected SourceFieldMapper(String name, boolean enabled, String format, Boolean compress, long compressThreshold,\n String[] includes, String[] excludes) {\n- super(new Names(name, name, name, name), Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE),\n+ super(new Names(name, name, name, name), Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE), null,\n Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER, null, null, null, null, null); // Only stored.\n this.enabled = enabled;\n this.compress = compress;", "filename": "src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java", "status": "modified" }, { "diff": "@@ -126,7 +126,7 @@ protected TTLFieldMapper(FieldType fieldType, EnabledAttributeMapper enabled, lo\n PostingsFormatProvider postingsProvider, DocValuesFormatProvider docValuesProvider,\n @Nullable Settings fieldDataSettings, Settings indexSettings) {\n super(new Names(Defaults.NAME, Defaults.NAME, Defaults.NAME, Defaults.NAME), Defaults.PRECISION_STEP,\n- Defaults.BOOST, fieldType, Defaults.NULL_VALUE, ignoreMalformed,\n+ Defaults.BOOST, fieldType, null, Defaults.NULL_VALUE, ignoreMalformed,\n postingsProvider, docValuesProvider, null, fieldDataSettings, indexSettings);\n this.enabledState = enabled;\n this.defaultTTL = defaultTTL;", "filename": "src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java", "status": "modified" }, { "diff": "@@ -104,7 +104,7 @@ public TimestampFieldMapper build(BuilderContext context) {\n Settings settings = context.indexSettings();\n roundCeil = settings.getAsBoolean(\"index.mapping.date.round_ceil\", settings.getAsBoolean(\"index.mapping.date.parse_upper_inclusive\", Defaults.ROUND_CEIL));\n }\n- return new TimestampFieldMapper(fieldType, enabledState, path, dateTimeFormatter, roundCeil,\n+ return new TimestampFieldMapper(fieldType, docValues, enabledState, path, dateTimeFormatter, roundCeil,\n ignoreMalformed(context), postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());\n }\n }\n@@ -136,17 +136,17 @@ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext\n private final String path;\n \n public TimestampFieldMapper() {\n- this(new FieldType(Defaults.FIELD_TYPE), Defaults.ENABLED, Defaults.PATH, Defaults.DATE_TIME_FORMATTER,\n+ this(new FieldType(Defaults.FIELD_TYPE), null, Defaults.ENABLED, Defaults.PATH, Defaults.DATE_TIME_FORMATTER,\n Defaults.ROUND_CEIL, Defaults.IGNORE_MALFORMED, null, null, null, ImmutableSettings.EMPTY);\n }\n \n- protected TimestampFieldMapper(FieldType fieldType, EnabledAttributeMapper enabledState, String path,\n+ protected TimestampFieldMapper(FieldType fieldType, Boolean docValues, EnabledAttributeMapper enabledState, String path,\n FormatDateTimeFormatter dateTimeFormatter, boolean roundCeil,\n Explicit<Boolean> ignoreMalformed, PostingsFormatProvider postingsProvider,\n DocValuesFormatProvider docValuesProvider, @Nullable Settings fieldDataSettings,\n Settings indexSettings) {\n super(new Names(Defaults.NAME, Defaults.NAME, Defaults.NAME, Defaults.NAME), dateTimeFormatter,\n- Defaults.PRECISION_STEP, Defaults.BOOST, fieldType,\n+ Defaults.PRECISION_STEP, Defaults.BOOST, fieldType, docValues,\n Defaults.NULL_VALUE, TimeUnit.MILLISECONDS /*always milliseconds*/,\n roundCeil, ignoreMalformed, postingsProvider, docValuesProvider, null, fieldDataSettings, indexSettings);\n this.enabledState = enabledState;", "filename": "src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java", "status": "modified" }, { "diff": "@@ -108,7 +108,7 @@ protected TypeFieldMapper(String name, String indexName) {\n \n public TypeFieldMapper(String name, String indexName, float boost, FieldType fieldType, PostingsFormatProvider postingsProvider,\n DocValuesFormatProvider docValuesProvider, @Nullable Settings fieldDataSettings, Settings indexSettings) {\n- super(new Names(name, indexName, indexName, name), boost, fieldType, Lucene.KEYWORD_ANALYZER,\n+ super(new Names(name, indexName, indexName, name), boost, fieldType, null, Lucene.KEYWORD_ANALYZER,\n Lucene.KEYWORD_ANALYZER, postingsProvider, docValuesProvider, null, fieldDataSettings, indexSettings);\n }\n ", "filename": "src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java", "status": "modified" }, { "diff": "@@ -86,7 +86,7 @@ public Builder() {\n \n @Override\n public UidFieldMapper build(BuilderContext context) {\n- return new UidFieldMapper(name, indexName, postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());\n+ return new UidFieldMapper(name, indexName, docValues, postingsProvider, docValuesProvider, fieldDataSettings, context.indexSettings());\n }\n }\n \n@@ -104,11 +104,11 @@ public UidFieldMapper() {\n }\n \n protected UidFieldMapper(String name) {\n- this(name, name, null, null, null, ImmutableSettings.EMPTY);\n+ this(name, name, null, null, null, null, ImmutableSettings.EMPTY);\n }\n \n- protected UidFieldMapper(String name, String indexName, PostingsFormatProvider postingsFormat, DocValuesFormatProvider docValuesFormat, @Nullable Settings fieldDataSettings, Settings indexSettings) {\n- super(new Names(name, indexName, indexName, name), Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE),\n+ protected UidFieldMapper(String name, String indexName, Boolean docValues, PostingsFormatProvider postingsFormat, DocValuesFormatProvider docValuesFormat, @Nullable Settings fieldDataSettings, Settings indexSettings) {\n+ super(new Names(name, indexName, indexName, name), Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE), docValues,\n Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER, postingsFormat, docValuesFormat, null, fieldDataSettings, indexSettings);\n }\n ", "filename": "src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java", "status": "modified" } ] }
{ "body": "I have templates that define specific mappings. In testing 0.90.8 today, I noticed that the template mappings are not being used when I load data in to an index that matches the template. This has worked in all previous versions of ElasticSearch that I have used (0.19x -> 0.90.7).\n", "comments": [ { "body": "Hey @seallison can you gimme more information what you mean by template mapping and maybe provide a small recreation gist for the problem you see?\n", "created_at": "2013-12-18T22:30:06Z" }, { "body": "Hey,\n\nthere were a couple of bugs fixed for 0.90.8 regarding file based mapping template loading - actually template loading by file was not working until 0.90.8 in a couple of previous 0.90 releases (wondering that this has worked for you so far). I am very happy to help and debug the issue with you, if you help with a bit more information.\n", "created_at": "2013-12-18T22:54:56Z" }, { "body": "This is an example of what I'm talking about: https://gist.github.com/seallison/8031640\n\nLet me know if you need any additional information. Thanks!\n", "created_at": "2013-12-18T23:32:16Z" }, { "body": "@spinscale all of my mappings have been in templates that I construct like in my gist. I've deployed this using 0.90.0, 0.90.3, and 0.90.5 without issue.\n", "created_at": "2013-12-18T23:38:12Z" }, { "body": "I can see why it happens. A quick fix is to remove the most outer data structure named `template_testlocations` and directly start with `template` like this:\n\n```\n{\n \"template\": \"locations*\",\n \"mappings\": {\n \"locations\": {\n \"_source\": {\n \"compress\": true\n },\n \"_all\": {\n \"enabled\": false\n },\n \"properties\": {\n \"contactPersonId\" : {\n \"index_analyzer\": \"keyword\",\n \"type\" : \"string\"\n },\n \"state\": {\n \"index_analyzer\": \"keyword\",\n \"type\": \"string\"\n }\n }\n }\n }\n }\n```\n", "created_at": "2013-12-19T09:49:21Z" }, { "body": "We gonna revert the revert to make sure people that were on `0.90.8` and move to the next version will have a better user experience if we support both formats hence I reopened https://github.com/elasticsearch/elasticsearch/pull/4517\n", "created_at": "2013-12-22T20:58:17Z" } ], "number": 4511, "title": "template mappings are not loading in 0.90.8" }
{ "body": "Closes #4511\n", "number": 4530, "review_comments": [], "title": "Reverting back to 0.90.7 config/templates loading behaviour" }
{ "commits": [ { "message": "Reverting back to 0.90.7 config/templates loading behaviour\n\nCloses #4511" } ], "files": [ { "diff": "@@ -482,7 +482,7 @@ private List<IndexTemplateMetaData> findTemplates(Request request, ClusterState\n try {\n byte[] templatesData = Streams.copyToByteArray(templatesFile);\n parser = XContentHelper.createParser(templatesData, 0, templatesData.length);\n- IndexTemplateMetaData template = IndexTemplateMetaData.Builder.fromXContent(parser);\n+ IndexTemplateMetaData template = IndexTemplateMetaData.Builder.fromXContentStandalone(parser);\n if (Regex.simpleMatch(template.template(), request.index)) {\n templates.add(template);\n }", "filename": "src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java", "status": "modified" }, { "diff": "@@ -58,7 +58,7 @@ protected Settings nodeSettings(int nodeOrdinal) {\n \n File dst = new File(templatesDir, \"template.json\");\n // random template, one uses the 'setting.index.number_of_shards', the other 'settings.number_of_shards'\n- String template = Streams.copyToStringFromClasspath(\"/org/elasticsearch/indices/template/template\" + randomInt(1) + \".json\");\n+ String template = Streams.copyToStringFromClasspath(\"/org/elasticsearch/indices/template/template\" + randomInt(2) + \".json\");\n Files.write(template, dst, Charsets.UTF_8);\n } catch (Exception e) {\n throw new RuntimeException(e);", "filename": "src/test/java/org/elasticsearch/indices/template/IndexTemplateFileLoadingTests.java", "status": "modified" }, { "diff": "@@ -1,7 +1,9 @@\n {\n- \"template\" : \"foo*\",\n- \"settings\" : {\n- \"index.number_of_shards\": 10,\n- \"index.number_of_replicas\": 0\n- }\n+ \"template_1\" : {\n+ \"template\" : \"foo*\",\n+ \"settings\" : {\n+ \"index.number_of_shards\": 10,\n+ \"index.number_of_replicas\": 0\n+ }\n+ }\n }\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/indices/template/template0.json", "status": "modified" }, { "diff": "@@ -1,7 +1,9 @@\n {\n- \"template\" : \"foo*\",\n- \"settings\" : {\n- \"number_of_shards\": 10,\n- \"number_of_replicas\": 0\n+ \"template_1\" : {\n+ \"template\" : \"foo*\",\n+ \"settings\" : {\n+ \"number_of_shards\": 10,\n+ \"number_of_replicas\": 0\n+ }\n }\n }\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/indices/template/template1.json", "status": "modified" }, { "diff": "@@ -1,9 +1,11 @@\n {\n- \"template\" : \"foo*\",\n- \"settings\" : {\n- \"index\" : {\n- \"number_of_shards\": 10,\n- \"number_of_replicas\": 0\n+ \"template_1\" : {\n+ \"template\" : \"foo*\",\n+ \"settings\" : {\n+ \"index\" : {\n+ \"number_of_shards\": 10,\n+ \"number_of_replicas\": 0\n+ }\n }\n }\n }\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/indices/template/template2.json", "status": "modified" } ] }
{ "body": "I have templates that define specific mappings. In testing 0.90.8 today, I noticed that the template mappings are not being used when I load data in to an index that matches the template. This has worked in all previous versions of ElasticSearch that I have used (0.19x -> 0.90.7).\n", "comments": [ { "body": "Hey @seallison can you gimme more information what you mean by template mapping and maybe provide a small recreation gist for the problem you see?\n", "created_at": "2013-12-18T22:30:06Z" }, { "body": "Hey,\n\nthere were a couple of bugs fixed for 0.90.8 regarding file based mapping template loading - actually template loading by file was not working until 0.90.8 in a couple of previous 0.90 releases (wondering that this has worked for you so far). I am very happy to help and debug the issue with you, if you help with a bit more information.\n", "created_at": "2013-12-18T22:54:56Z" }, { "body": "This is an example of what I'm talking about: https://gist.github.com/seallison/8031640\n\nLet me know if you need any additional information. Thanks!\n", "created_at": "2013-12-18T23:32:16Z" }, { "body": "@spinscale all of my mappings have been in templates that I construct like in my gist. I've deployed this using 0.90.0, 0.90.3, and 0.90.5 without issue.\n", "created_at": "2013-12-18T23:38:12Z" }, { "body": "I can see why it happens. A quick fix is to remove the most outer data structure named `template_testlocations` and directly start with `template` like this:\n\n```\n{\n \"template\": \"locations*\",\n \"mappings\": {\n \"locations\": {\n \"_source\": {\n \"compress\": true\n },\n \"_all\": {\n \"enabled\": false\n },\n \"properties\": {\n \"contactPersonId\" : {\n \"index_analyzer\": \"keyword\",\n \"type\" : \"string\"\n },\n \"state\": {\n \"index_analyzer\": \"keyword\",\n \"type\": \"string\"\n }\n }\n }\n }\n }\n```\n", "created_at": "2013-12-19T09:49:21Z" }, { "body": "We gonna revert the revert to make sure people that were on `0.90.8` and move to the next version will have a better user experience if we support both formats hence I reopened https://github.com/elasticsearch/elasticsearch/pull/4517\n", "created_at": "2013-12-22T20:58:17Z" } ], "number": 4511, "title": "template mappings are not loading in 0.90.8" }
{ "body": "The fixes introduced in #4235 and #4411 do not take into account, that a\ntemplate JSON in the config/ directory includes a template name, as opposed\nwhen calling the Put Template API.\n\nThis PR allows to put both formats (either specifying a template name or not)\ninto files. However you template name/id may not be one of the template\nelement names like \"template\", \"settings\", \"order\" or \"mapping\".\n\nCloses #4511\n", "number": 4517, "review_comments": [], "title": "Fix loading templates in config/ directory" }
{ "commits": [ { "message": "Fix loading templates in config/ directory\n\nThe fixes introduced in #4235 and #4411 do not take into account, that a\ntemplate JSON in the config/ directory includes a template name, as opposed\nwhen calling the Put Template API.\n\nThis PR allows to put both formats (either specifying a template name or not)\ninto files. However you template name/id may not be one of the template\nelement names like \"template\", \"settings\", \"order\" or \"mapping\".\n\nCloses #4511" } ], "files": [ { "diff": "@@ -20,6 +20,7 @@\n package org.elasticsearch.cluster.metadata;\n \n import com.carrotsearch.hppc.cursors.ObjectObjectCursor;\n+import com.google.common.collect.Sets;\n import org.elasticsearch.common.collect.ImmutableOpenMap;\n import org.elasticsearch.common.collect.MapBuilder;\n import org.elasticsearch.common.compress.CompressedString;\n@@ -35,6 +36,7 @@\n \n import java.io.IOException;\n import java.util.Map;\n+import java.util.Set;\n \n /**\n *\n@@ -147,6 +149,11 @@ public int hashCode() {\n \n public static class Builder {\n \n+ private static final Set<String> VALID_FIELDS = Sets.newHashSet(\"template\", \"order\", \"mappings\", \"settings\");\n+ static {\n+ VALID_FIELDS.addAll(IndexMetaData.customFactories.keySet());\n+ }\n+\n private String name;\n \n private int order;\n@@ -296,8 +303,8 @@ public static IndexTemplateMetaData fromXContentStandalone(XContentParser parser\n public static IndexTemplateMetaData fromXContent(XContentParser parser) throws IOException {\n Builder builder = new Builder(parser.currentName());\n \n- String currentFieldName = null;\n- XContentParser.Token token = parser.nextToken();\n+ String currentFieldName = skipTemplateName(parser);\n+ XContentParser.Token token;\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n if (token == XContentParser.Token.FIELD_NAME) {\n currentFieldName = parser.currentName();\n@@ -359,6 +366,24 @@ public static IndexTemplateMetaData fromXContent(XContentParser parser) throws I\n return builder.build();\n }\n \n+ private static String skipTemplateName(XContentParser parser) throws IOException {\n+ XContentParser.Token token = parser.nextToken();\n+ if (token != null && token == XContentParser.Token.START_OBJECT) {\n+ token = parser.nextToken();\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ String currentFieldName = parser.currentName();\n+ if (VALID_FIELDS.contains(currentFieldName)) {\n+ return currentFieldName;\n+ } else {\n+ // we just hit the template name, which should be ignored and we move on\n+ parser.nextToken();\n+ }\n+ }\n+ }\n+\n+ return null;\n+ }\n+\n public static IndexTemplateMetaData readFrom(StreamInput in) throws IOException {\n Builder builder = new Builder(in.readString());\n builder.order(in.readInt());", "filename": "src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java", "status": "modified" }, { "diff": "@@ -29,6 +29,7 @@\n import static org.elasticsearch.cluster.metadata.AliasMetaData.newAliasMetaDataBuilder;\n import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;\n import static org.hamcrest.Matchers.equalTo;\n+import static org.hamcrest.Matchers.is;\n import static org.hamcrest.Matchers.nullValue;\n \n /**\n@@ -89,6 +90,11 @@ public void testSimpleJsonFromAndTo() throws IOException {\n .putAlias(newAliasMetaDataBuilder(\"alias1\").filter(ALIAS_FILTER1))\n .putAlias(newAliasMetaDataBuilder(\"alias2\"))\n .putAlias(newAliasMetaDataBuilder(\"alias4\").filter(ALIAS_FILTER2)))\n+ .put(IndexTemplateMetaData.builder(\"foo\")\n+ .template(\"bar\")\n+ .order(1).settings(settingsBuilder()\n+ .put(\"setting1\", \"value1\")\n+ .put(\"setting2\", \"value2\")))\n .build();\n \n String metaDataSource = MetaData.Builder.toXContent(metaData);\n@@ -172,6 +178,12 @@ public void testSimpleJsonFromAndTo() throws IOException {\n assertThat(indexMetaData.aliases().get(\"alias3\").filter(), nullValue());\n assertThat(indexMetaData.aliases().get(\"alias4\").alias(), equalTo(\"alias4\"));\n assertThat(indexMetaData.aliases().get(\"alias4\").filter().string(), equalTo(ALIAS_FILTER2));\n+\n+ // templates\n+ assertThat(parsedMetaData.templates().get(\"foo\").name(), is(\"foo\"));\n+ assertThat(parsedMetaData.templates().get(\"foo\").template(), is(\"bar\"));\n+ assertThat(parsedMetaData.templates().get(\"foo\").settings().get(\"index.setting1\"), is(\"value1\"));\n+ assertThat(parsedMetaData.templates().get(\"foo\").settings().getByPrefix(\"index.\").get(\"setting2\"), is(\"value2\"));\n }\n \n private static final String MAPPING_SOURCE1 = \"{\\\"mapping1\\\":{\\\"text1\\\":{\\\"type\\\":\\\"string\\\"}}}\";", "filename": "src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java", "status": "modified" }, { "diff": "@@ -58,7 +58,7 @@ protected Settings nodeSettings(int nodeOrdinal) {\n \n File dst = new File(templatesDir, \"template.json\");\n // random template, one uses the 'setting.index.number_of_shards', the other 'settings.number_of_shards'\n- String template = Streams.copyToStringFromClasspath(\"/org/elasticsearch/indices/template/template\" + randomInt(1) + \".json\");\n+ String template = Streams.copyToStringFromClasspath(\"/org/elasticsearch/indices/template/template\" + randomInt(5) + \".json\");\n Files.write(template, dst, Charsets.UTF_8);\n } catch (Exception e) {\n throw new RuntimeException(e);", "filename": "src/test/java/org/elasticsearch/indices/template/IndexTemplateFileLoadingTests.java", "status": "modified" }, { "diff": "@@ -0,0 +1,9 @@\n+{\n+ \"mytemplate\" : {\n+ \"template\" : \"foo*\",\n+ \"settings\" : {\n+ \"index.number_of_shards\": 10,\n+ \"index.number_of_replicas\": 0\n+ }\n+ }\n+}\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/indices/template/template3.json", "status": "added" }, { "diff": "@@ -0,0 +1,9 @@\n+{\n+ \"mytemplate\" : {\n+ \"template\" : \"foo*\",\n+ \"settings\" : {\n+ \"number_of_shards\": 10,\n+ \"number_of_replicas\": 0\n+ }\n+ }\n+}\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/indices/template/template4.json", "status": "added" }, { "diff": "@@ -0,0 +1,11 @@\n+{\n+ \"mytemplate\" : {\n+ \"template\" : \"foo*\",\n+ \"settings\" : {\n+ \"index\" : {\n+ \"number_of_shards\": 10,\n+ \"number_of_replicas\": 0\n+ }\n+ }\n+ }\n+}\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/indices/template/template5.json", "status": "added" } ] }
{ "body": "I've got it working using the curl query to create a new template.\nBut when i set it up in the/templates directory it doesnt seem to get picked up at all.\nI've seen a bug raised looking similar to this. What is the work around- can i explicitly set the templates path in the config file? Also should i be able to see anything in the logs telling me this the template files are being picked up or not?\n\nAlso while looking into this issue i was following the page to see if my templates are created ok : http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-templates.html to get an output of all the loaded up templates, And i've found it doesnt seem to work unless i specify the correct template exact name. So calling curl -XGET localhost:9200/_template/temp\\* or curl -XGET localhost:9200/_template/ doesnt seem to bring back any output at all.\n\nThis was using elasticsearch-0.90.3\n", "comments": [ { "body": "Hey,\n\nit seems you have hit this https://github.com/elasticsearch/elasticsearch/issues/4235\n\nThis will be fixed in the upcoming 0.90.8 release. If you want to test, you could test elasticsearch 1.0.0beta2 or the current 0.90 branch - would be happy to get feedback\n", "created_at": "2013-12-12T08:15:08Z" }, { "body": "I found another subtle bug, the #4235 works only if you use \n\n```\n\"settings\" : { \"index.number_of_shards\" : 12 } }\n```\n\nbut does not work for the shorter version\n\n```\n\"settings\" : { \"number_of_shards\" : 12 } }\n```\n\nwill fix\n", "created_at": "2013-12-12T09:34:41Z" } ], "number": 4411, "title": "problem picking up templates in the config/templates directory" }
{ "body": "The fixes introduced in #4235 and #4411 do not take into account, that a\ntemplate JSON in the config/ directory includes a template name, as opposed\nwhen calling the Put Template API.\n\nThis PR allows to put both formats (either specifying a template name or not)\ninto files. However you template name/id may not be one of the template\nelement names like \"template\", \"settings\", \"order\" or \"mapping\".\n\nCloses #4511\n", "number": 4517, "review_comments": [], "title": "Fix loading templates in config/ directory" }
{ "commits": [ { "message": "Fix loading templates in config/ directory\n\nThe fixes introduced in #4235 and #4411 do not take into account, that a\ntemplate JSON in the config/ directory includes a template name, as opposed\nwhen calling the Put Template API.\n\nThis PR allows to put both formats (either specifying a template name or not)\ninto files. However you template name/id may not be one of the template\nelement names like \"template\", \"settings\", \"order\" or \"mapping\".\n\nCloses #4511" } ], "files": [ { "diff": "@@ -20,6 +20,7 @@\n package org.elasticsearch.cluster.metadata;\n \n import com.carrotsearch.hppc.cursors.ObjectObjectCursor;\n+import com.google.common.collect.Sets;\n import org.elasticsearch.common.collect.ImmutableOpenMap;\n import org.elasticsearch.common.collect.MapBuilder;\n import org.elasticsearch.common.compress.CompressedString;\n@@ -35,6 +36,7 @@\n \n import java.io.IOException;\n import java.util.Map;\n+import java.util.Set;\n \n /**\n *\n@@ -147,6 +149,11 @@ public int hashCode() {\n \n public static class Builder {\n \n+ private static final Set<String> VALID_FIELDS = Sets.newHashSet(\"template\", \"order\", \"mappings\", \"settings\");\n+ static {\n+ VALID_FIELDS.addAll(IndexMetaData.customFactories.keySet());\n+ }\n+\n private String name;\n \n private int order;\n@@ -296,8 +303,8 @@ public static IndexTemplateMetaData fromXContentStandalone(XContentParser parser\n public static IndexTemplateMetaData fromXContent(XContentParser parser) throws IOException {\n Builder builder = new Builder(parser.currentName());\n \n- String currentFieldName = null;\n- XContentParser.Token token = parser.nextToken();\n+ String currentFieldName = skipTemplateName(parser);\n+ XContentParser.Token token;\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n if (token == XContentParser.Token.FIELD_NAME) {\n currentFieldName = parser.currentName();\n@@ -359,6 +366,24 @@ public static IndexTemplateMetaData fromXContent(XContentParser parser) throws I\n return builder.build();\n }\n \n+ private static String skipTemplateName(XContentParser parser) throws IOException {\n+ XContentParser.Token token = parser.nextToken();\n+ if (token != null && token == XContentParser.Token.START_OBJECT) {\n+ token = parser.nextToken();\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ String currentFieldName = parser.currentName();\n+ if (VALID_FIELDS.contains(currentFieldName)) {\n+ return currentFieldName;\n+ } else {\n+ // we just hit the template name, which should be ignored and we move on\n+ parser.nextToken();\n+ }\n+ }\n+ }\n+\n+ return null;\n+ }\n+\n public static IndexTemplateMetaData readFrom(StreamInput in) throws IOException {\n Builder builder = new Builder(in.readString());\n builder.order(in.readInt());", "filename": "src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java", "status": "modified" }, { "diff": "@@ -29,6 +29,7 @@\n import static org.elasticsearch.cluster.metadata.AliasMetaData.newAliasMetaDataBuilder;\n import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;\n import static org.hamcrest.Matchers.equalTo;\n+import static org.hamcrest.Matchers.is;\n import static org.hamcrest.Matchers.nullValue;\n \n /**\n@@ -89,6 +90,11 @@ public void testSimpleJsonFromAndTo() throws IOException {\n .putAlias(newAliasMetaDataBuilder(\"alias1\").filter(ALIAS_FILTER1))\n .putAlias(newAliasMetaDataBuilder(\"alias2\"))\n .putAlias(newAliasMetaDataBuilder(\"alias4\").filter(ALIAS_FILTER2)))\n+ .put(IndexTemplateMetaData.builder(\"foo\")\n+ .template(\"bar\")\n+ .order(1).settings(settingsBuilder()\n+ .put(\"setting1\", \"value1\")\n+ .put(\"setting2\", \"value2\")))\n .build();\n \n String metaDataSource = MetaData.Builder.toXContent(metaData);\n@@ -172,6 +178,12 @@ public void testSimpleJsonFromAndTo() throws IOException {\n assertThat(indexMetaData.aliases().get(\"alias3\").filter(), nullValue());\n assertThat(indexMetaData.aliases().get(\"alias4\").alias(), equalTo(\"alias4\"));\n assertThat(indexMetaData.aliases().get(\"alias4\").filter().string(), equalTo(ALIAS_FILTER2));\n+\n+ // templates\n+ assertThat(parsedMetaData.templates().get(\"foo\").name(), is(\"foo\"));\n+ assertThat(parsedMetaData.templates().get(\"foo\").template(), is(\"bar\"));\n+ assertThat(parsedMetaData.templates().get(\"foo\").settings().get(\"index.setting1\"), is(\"value1\"));\n+ assertThat(parsedMetaData.templates().get(\"foo\").settings().getByPrefix(\"index.\").get(\"setting2\"), is(\"value2\"));\n }\n \n private static final String MAPPING_SOURCE1 = \"{\\\"mapping1\\\":{\\\"text1\\\":{\\\"type\\\":\\\"string\\\"}}}\";", "filename": "src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java", "status": "modified" }, { "diff": "@@ -58,7 +58,7 @@ protected Settings nodeSettings(int nodeOrdinal) {\n \n File dst = new File(templatesDir, \"template.json\");\n // random template, one uses the 'setting.index.number_of_shards', the other 'settings.number_of_shards'\n- String template = Streams.copyToStringFromClasspath(\"/org/elasticsearch/indices/template/template\" + randomInt(1) + \".json\");\n+ String template = Streams.copyToStringFromClasspath(\"/org/elasticsearch/indices/template/template\" + randomInt(5) + \".json\");\n Files.write(template, dst, Charsets.UTF_8);\n } catch (Exception e) {\n throw new RuntimeException(e);", "filename": "src/test/java/org/elasticsearch/indices/template/IndexTemplateFileLoadingTests.java", "status": "modified" }, { "diff": "@@ -0,0 +1,9 @@\n+{\n+ \"mytemplate\" : {\n+ \"template\" : \"foo*\",\n+ \"settings\" : {\n+ \"index.number_of_shards\": 10,\n+ \"index.number_of_replicas\": 0\n+ }\n+ }\n+}\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/indices/template/template3.json", "status": "added" }, { "diff": "@@ -0,0 +1,9 @@\n+{\n+ \"mytemplate\" : {\n+ \"template\" : \"foo*\",\n+ \"settings\" : {\n+ \"number_of_shards\": 10,\n+ \"number_of_replicas\": 0\n+ }\n+ }\n+}\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/indices/template/template4.json", "status": "added" }, { "diff": "@@ -0,0 +1,11 @@\n+{\n+ \"mytemplate\" : {\n+ \"template\" : \"foo*\",\n+ \"settings\" : {\n+ \"index\" : {\n+ \"number_of_shards\": 10,\n+ \"number_of_replicas\": 0\n+ }\n+ }\n+ }\n+}\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/indices/template/template5.json", "status": "added" } ] }
{ "body": "Seems that with 0.90.7/master loading files via the `config/templates`directory does not work.\n\nThe problem is, that the parsed file jumps over the first while parsing, which results in the template not being set, if it is the first.\n\nGoing to send a PR in a second.\n", "comments": [ { "body": "This still doesn't seem to work for me.\n\nI've used example given in http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-templates.html#config but after restart, `curl -XGET http://localhost:9200/_template` is empty and doesn't seem to be applied.\n\nElasticsearch 0.9.10.\n", "created_at": "2014-01-17T13:51:18Z" } ], "number": 4235, "title": "Loading templates via templates/ directory is not working" }
{ "body": "The fixes introduced in #4235 and #4411 do not take into account, that a\ntemplate JSON in the config/ directory includes a template name, as opposed\nwhen calling the Put Template API.\n\nThis PR allows to put both formats (either specifying a template name or not)\ninto files. However you template name/id may not be one of the template\nelement names like \"template\", \"settings\", \"order\" or \"mapping\".\n\nCloses #4511\n", "number": 4517, "review_comments": [], "title": "Fix loading templates in config/ directory" }
{ "commits": [ { "message": "Fix loading templates in config/ directory\n\nThe fixes introduced in #4235 and #4411 do not take into account, that a\ntemplate JSON in the config/ directory includes a template name, as opposed\nwhen calling the Put Template API.\n\nThis PR allows to put both formats (either specifying a template name or not)\ninto files. However you template name/id may not be one of the template\nelement names like \"template\", \"settings\", \"order\" or \"mapping\".\n\nCloses #4511" } ], "files": [ { "diff": "@@ -20,6 +20,7 @@\n package org.elasticsearch.cluster.metadata;\n \n import com.carrotsearch.hppc.cursors.ObjectObjectCursor;\n+import com.google.common.collect.Sets;\n import org.elasticsearch.common.collect.ImmutableOpenMap;\n import org.elasticsearch.common.collect.MapBuilder;\n import org.elasticsearch.common.compress.CompressedString;\n@@ -35,6 +36,7 @@\n \n import java.io.IOException;\n import java.util.Map;\n+import java.util.Set;\n \n /**\n *\n@@ -147,6 +149,11 @@ public int hashCode() {\n \n public static class Builder {\n \n+ private static final Set<String> VALID_FIELDS = Sets.newHashSet(\"template\", \"order\", \"mappings\", \"settings\");\n+ static {\n+ VALID_FIELDS.addAll(IndexMetaData.customFactories.keySet());\n+ }\n+\n private String name;\n \n private int order;\n@@ -296,8 +303,8 @@ public static IndexTemplateMetaData fromXContentStandalone(XContentParser parser\n public static IndexTemplateMetaData fromXContent(XContentParser parser) throws IOException {\n Builder builder = new Builder(parser.currentName());\n \n- String currentFieldName = null;\n- XContentParser.Token token = parser.nextToken();\n+ String currentFieldName = skipTemplateName(parser);\n+ XContentParser.Token token;\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n if (token == XContentParser.Token.FIELD_NAME) {\n currentFieldName = parser.currentName();\n@@ -359,6 +366,24 @@ public static IndexTemplateMetaData fromXContent(XContentParser parser) throws I\n return builder.build();\n }\n \n+ private static String skipTemplateName(XContentParser parser) throws IOException {\n+ XContentParser.Token token = parser.nextToken();\n+ if (token != null && token == XContentParser.Token.START_OBJECT) {\n+ token = parser.nextToken();\n+ if (token == XContentParser.Token.FIELD_NAME) {\n+ String currentFieldName = parser.currentName();\n+ if (VALID_FIELDS.contains(currentFieldName)) {\n+ return currentFieldName;\n+ } else {\n+ // we just hit the template name, which should be ignored and we move on\n+ parser.nextToken();\n+ }\n+ }\n+ }\n+\n+ return null;\n+ }\n+\n public static IndexTemplateMetaData readFrom(StreamInput in) throws IOException {\n Builder builder = new Builder(in.readString());\n builder.order(in.readInt());", "filename": "src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java", "status": "modified" }, { "diff": "@@ -29,6 +29,7 @@\n import static org.elasticsearch.cluster.metadata.AliasMetaData.newAliasMetaDataBuilder;\n import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;\n import static org.hamcrest.Matchers.equalTo;\n+import static org.hamcrest.Matchers.is;\n import static org.hamcrest.Matchers.nullValue;\n \n /**\n@@ -89,6 +90,11 @@ public void testSimpleJsonFromAndTo() throws IOException {\n .putAlias(newAliasMetaDataBuilder(\"alias1\").filter(ALIAS_FILTER1))\n .putAlias(newAliasMetaDataBuilder(\"alias2\"))\n .putAlias(newAliasMetaDataBuilder(\"alias4\").filter(ALIAS_FILTER2)))\n+ .put(IndexTemplateMetaData.builder(\"foo\")\n+ .template(\"bar\")\n+ .order(1).settings(settingsBuilder()\n+ .put(\"setting1\", \"value1\")\n+ .put(\"setting2\", \"value2\")))\n .build();\n \n String metaDataSource = MetaData.Builder.toXContent(metaData);\n@@ -172,6 +178,12 @@ public void testSimpleJsonFromAndTo() throws IOException {\n assertThat(indexMetaData.aliases().get(\"alias3\").filter(), nullValue());\n assertThat(indexMetaData.aliases().get(\"alias4\").alias(), equalTo(\"alias4\"));\n assertThat(indexMetaData.aliases().get(\"alias4\").filter().string(), equalTo(ALIAS_FILTER2));\n+\n+ // templates\n+ assertThat(parsedMetaData.templates().get(\"foo\").name(), is(\"foo\"));\n+ assertThat(parsedMetaData.templates().get(\"foo\").template(), is(\"bar\"));\n+ assertThat(parsedMetaData.templates().get(\"foo\").settings().get(\"index.setting1\"), is(\"value1\"));\n+ assertThat(parsedMetaData.templates().get(\"foo\").settings().getByPrefix(\"index.\").get(\"setting2\"), is(\"value2\"));\n }\n \n private static final String MAPPING_SOURCE1 = \"{\\\"mapping1\\\":{\\\"text1\\\":{\\\"type\\\":\\\"string\\\"}}}\";", "filename": "src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java", "status": "modified" }, { "diff": "@@ -58,7 +58,7 @@ protected Settings nodeSettings(int nodeOrdinal) {\n \n File dst = new File(templatesDir, \"template.json\");\n // random template, one uses the 'setting.index.number_of_shards', the other 'settings.number_of_shards'\n- String template = Streams.copyToStringFromClasspath(\"/org/elasticsearch/indices/template/template\" + randomInt(1) + \".json\");\n+ String template = Streams.copyToStringFromClasspath(\"/org/elasticsearch/indices/template/template\" + randomInt(5) + \".json\");\n Files.write(template, dst, Charsets.UTF_8);\n } catch (Exception e) {\n throw new RuntimeException(e);", "filename": "src/test/java/org/elasticsearch/indices/template/IndexTemplateFileLoadingTests.java", "status": "modified" }, { "diff": "@@ -0,0 +1,9 @@\n+{\n+ \"mytemplate\" : {\n+ \"template\" : \"foo*\",\n+ \"settings\" : {\n+ \"index.number_of_shards\": 10,\n+ \"index.number_of_replicas\": 0\n+ }\n+ }\n+}\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/indices/template/template3.json", "status": "added" }, { "diff": "@@ -0,0 +1,9 @@\n+{\n+ \"mytemplate\" : {\n+ \"template\" : \"foo*\",\n+ \"settings\" : {\n+ \"number_of_shards\": 10,\n+ \"number_of_replicas\": 0\n+ }\n+ }\n+}\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/indices/template/template4.json", "status": "added" }, { "diff": "@@ -0,0 +1,11 @@\n+{\n+ \"mytemplate\" : {\n+ \"template\" : \"foo*\",\n+ \"settings\" : {\n+ \"index\" : {\n+ \"number_of_shards\": 10,\n+ \"number_of_replicas\": 0\n+ }\n+ }\n+ }\n+}\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/indices/template/template5.json", "status": "added" } ] }
{ "body": "When dates are specified without a year, for example: `Dec 15 10:00:00` they are treated as dates in 2000 during indexing and range searches except for the upper included bound `lte`, where they are treated as dates in 1970. Repro: https://gist.github.com/imotov/7978186. Might be related to #2731.\n", "comments": [], "number": 4451, "title": "Inconsistent treatment of dates without year" }
{ "body": "Fixes #4451\n\nDate fields without date (HH:mm:ss, for example) are parsed as time on Jan 1, 1970 UTC. However, before this change partial dates without year (MMM dd HH:mm:ss, for example) were parsed as as days of they year 2000. This change makes all partial dates to be treated based on year 1970. This is breaking change - before this change \"Dec 15, 10:00:00\" in most cases was parsed (and indexed) as \"2000-12-15T10:00:00Z\". After this change, it will be consistently parsed and indexed as \"1970-12-15T10:00:00Z\"\n", "number": 4496, "review_comments": [], "title": "Make partial dates without year to be 1970 based instead of 2000" }
{ "commits": [ { "message": "Make partial dates without year to be 1970 based instead of 2000\n\nFixes #4451\n\nDate fields without date (HH:mm:ss, for example) are parsed as time on Jan 1, 1970 UTC. However, before this change partial dates without year (MMM dd HH:mm:ss, for example) were parsed as as days of they year 2000. This change makes all partial dates to be treated based on year 1970. This is breaking change - before this change \"Dec 15, 10:00:00\" in most cases was parsed (and indexed) as \"2000-12-15T10:00:00Z\". After this change, it will be consistently parsed and indexed as \"1970-12-15T10:00:00Z\"" } ], "files": [ { "diff": "@@ -44,8 +44,8 @@ public FormatDateTimeFormatter(String format, DateTimeFormatter parser, Locale l\n public FormatDateTimeFormatter(String format, DateTimeFormatter parser, DateTimeFormatter printer, Locale locale) {\n this.format = format;\n this.locale = locale;\n- this.printer = locale == null ? printer : printer.withLocale(locale);\n- this.parser = locale == null ? parser : parser.withLocale(locale);\n+ this.printer = locale == null ? printer.withDefaultYear(1970) : printer.withLocale(locale).withDefaultYear(1970);\n+ this.parser = locale == null ? parser.withDefaultYear(1970) : parser.withLocale(locale).withDefaultYear(1970);\n }\n \n public String format() {", "filename": "src/main/java/org/elasticsearch/common/joda/FormatDateTimeFormatter.java", "status": "modified" }, { "diff": "@@ -224,6 +224,30 @@ public void testHourFormat() throws Exception {\n assertThat(rangeFilter.getMin(), equalTo(new DateTime(TimeValue.timeValueHours(10).millis()).getMillis()));\n }\n \n+\n+ @Test\n+ public void testDayWithoutYearFormat() throws Exception {\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .field(\"date_detection\", false)\n+ .startObject(\"properties\").startObject(\"date_field\").field(\"type\", \"date\").field(\"format\", \"MMM dd HH:mm:ss\").endObject().endObject()\n+ .endObject().endObject().string();\n+\n+ DocumentMapper defaultMapper = mapper(mapping);\n+\n+ ParsedDocument doc = defaultMapper.parse(\"type\", \"1\", XContentFactory.jsonBuilder()\n+ .startObject()\n+ .field(\"date_field\", \"Jan 02 10:00:00\")\n+ .endObject()\n+ .bytes());\n+ assertThat(((LongFieldMapper.CustomLongNumericField) doc.rootDoc().getField(\"date_field\")).numericAsString(), equalTo(Long.toString(new DateTime(TimeValue.timeValueHours(34).millis(), DateTimeZone.UTC).getMillis())));\n+\n+ Filter filter = defaultMapper.mappers().smartNameFieldMapper(\"date_field\").rangeFilter(\"Jan 02 10:00:00\", \"Jan 02 11:00:00\", true, true, null);\n+ assertThat(filter, instanceOf(NumericRangeFilter.class));\n+ NumericRangeFilter<Long> rangeFilter = (NumericRangeFilter<Long>) filter;\n+ assertThat(rangeFilter.getMax(), equalTo(new DateTime(TimeValue.timeValueHours(35).millis() + 999).getMillis())); // +999 to include the 00-01 minute\n+ assertThat(rangeFilter.getMin(), equalTo(new DateTime(TimeValue.timeValueHours(34).millis()).getMillis()));\n+ }\n+\n @Test\n public void testIgnoreMalformedOption() throws Exception {\n String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")", "filename": "src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java", "status": "modified" } ] }
{ "body": "ES Version: 0.90.7, Java version: 1.7 update 45 64 bit Server VM.\n\nI have a 7 node cluster with 5 master nodes and 2 client nodes. \nWhen I was shutting down all nodes to do a full cluster restart, one node did not die and looks there is a deadlock.\n\nStack Trace:\n\n2013-12-03 22:07:50\nFull thread dump Java HotSpot(TM) 64-Bit Server VM (24.45-b08 mixed mode):\n\n\"Attach Listener\" daemon prio=10 tid=0x00007f8ed4028000 nid=0x5d32 waiting on condition [0x0000000000000000]\n java.lang.Thread.State: RUNNABLE\n\n\"Thread-1\" prio=10 tid=0x00007f8e88698000 nid=0x5c6e waiting on condition [0x00007f8e7e861000]\n java.lang.Thread.State: WAITING (parking)\n at sun.misc.Unsafe.park(Native Method)\n - parking to wait for <0x00000005fdba9278> (a java.util.concurrent.locks.ReentrantLock$NonfairSync)\n at java.util.concurrent.locks.LockSupport.park(LockSupport.java:186)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.parkAndCheckInterrupt(AbstractQueuedSynchronizer.java:834)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireQueued(AbstractQueuedSynchronizer.java:867)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:1197)\n at java.util.concurrent.locks.ReentrantLock$NonfairSync.lock(ReentrantLock.java:214)\n at java.util.concurrent.locks.ReentrantLock.lock(ReentrantLock.java:290)\n at java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1461)\n at org.elasticsearch.threadpool.ThreadPool.awaitTermination(ThreadPool.java:249)\n at org.elasticsearch.node.internal.InternalNode.close(InternalNode.java:342)\n at org.elasticsearch.bootstrap.Bootstrap$1.run(Bootstrap.java:73)\n\n\"SIGTERM handler\" daemon prio=10 tid=0x00007f8ed4042000 nid=0x5c6b in Object.wait() [0x00007f8ee7915000]\n java.lang.Thread.State: WAITING (on object monitor)\n at java.lang.Object.wait(Native Method)\n - waiting on <0x00000005fd3d76c8> (a org.elasticsearch.bootstrap.Bootstrap$1)\n at java.lang.Thread.join(Thread.java:1280)\n - locked <0x00000005fd3d76c8> (a org.elasticsearch.bootstrap.Bootstrap$1)\n at java.lang.Thread.join(Thread.java:1354)\n at java.lang.ApplicationShutdownHooks.runHooks(ApplicationShutdownHooks.java:106)\n at java.lang.ApplicationShutdownHooks$1.run(ApplicationShutdownHooks.java:46)\n at java.lang.Shutdown.runHooks(Shutdown.java:123)\n at java.lang.Shutdown.sequence(Shutdown.java:167)\n at java.lang.Shutdown.exit(Shutdown.java:212)\n - locked <0x00000005fd340058> (a java.lang.Class for java.lang.Shutdown)\n at java.lang.Terminator$1.handle(Terminator.java:52)\n at sun.misc.Signal$1.run(Signal.java:212)\n at java.lang.Thread.run(Thread.java:744)\n\n\"elasticsearch[AG 8][search][T#7]\" daemon prio=10 tid=0x00007f8e84112800 nid=0x799a waiting on condition [0x00007f8ee7c62000]\n java.lang.Thread.State: WAITING (parking)\n at sun.misc.Unsafe.park(Native Method)\n - parking to wait for <0x00000005fdba9278> (a java.util.concurrent.locks.ReentrantLock$NonfairSync)\n at java.util.concurrent.locks.LockSupport.park(LockSupport.java:186)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.parkAndCheckInterrupt(AbstractQueuedSynchronizer.java:834)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireQueued(AbstractQueuedSynchronizer.java:867)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:1197)\n at java.util.concurrent.locks.ReentrantLock$NonfairSync.lock(ReentrantLock.java:214)\n at java.util.concurrent.locks.ReentrantLock.lock(ReentrantLock.java:290)\n at java.util.concurrent.ThreadPoolExecutor.processWorkerExit(ThreadPoolExecutor.java:998)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1163)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:744)\n\n\"elasticsearch[AG 8][search][T#12]\" daemon prio=10 tid=0x00007f8e8c11f800 nid=0x7999 waiting on condition [0x00007f8ee7ca3000]\n java.lang.Thread.State: WAITING (parking)\n at sun.misc.Unsafe.park(Native Method)\n - parking to wait for <0x00000005fdba9278> (a java.util.concurrent.locks.ReentrantLock$NonfairSync)\n at java.util.concurrent.locks.LockSupport.park(LockSupport.java:186)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.parkAndCheckInterrupt(AbstractQueuedSynchronizer.java:834)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireQueued(AbstractQueuedSynchronizer.java:867)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:1197)\n at java.util.concurrent.locks.ReentrantLock$NonfairSync.lock(ReentrantLock.java:214)\n at java.util.concurrent.locks.ReentrantLock.lock(ReentrantLock.java:290)\n at java.util.concurrent.ThreadPoolExecutor.processWorkerExit(ThreadPoolExecutor.java:998)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1163)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:744)\n\n\"elasticsearch[AG 8][search][T#3]\" daemon prio=10 tid=0x00007f8e8011e000 nid=0x7997 waiting on condition [0x00007f8ee7d25000]\n java.lang.Thread.State: WAITING (parking)\n at sun.misc.Unsafe.park(Native Method)\n - parking to wait for <0x00000005fdba9278> (a java.util.concurrent.locks.ReentrantLock$NonfairSync)\n at java.util.concurrent.locks.LockSupport.park(LockSupport.java:186)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.parkAndCheckInterrupt(AbstractQueuedSynchronizer.java:834)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireQueued(AbstractQueuedSynchronizer.java:867)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:1197)\n at java.util.concurrent.locks.ReentrantLock$NonfairSync.lock(ReentrantLock.java:214)\n at java.util.concurrent.locks.ReentrantLock.lock(ReentrantLock.java:290)\n at java.util.concurrent.ThreadPoolExecutor.processWorkerExit(ThreadPoolExecutor.java:998)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1163)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:744)\n\n\"elasticsearch[AG 8][search][T#2]\" daemon prio=10 tid=0x00007f8e8c11d800 nid=0x7996 waiting on condition [0x00007f8ee7d66000]\n java.lang.Thread.State: WAITING (parking)\n at sun.misc.Unsafe.park(Native Method)\n - parking to wait for <0x00000005fdba9278> (a java.util.concurrent.locks.ReentrantLock$NonfairSync)\n at java.util.concurrent.locks.LockSupport.park(LockSupport.java:186)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.parkAndCheckInterrupt(AbstractQueuedSynchronizer.java:834)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireQueued(AbstractQueuedSynchronizer.java:867)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:1197)\n at java.util.concurrent.locks.ReentrantLock$NonfairSync.lock(ReentrantLock.java:214)\n at java.util.concurrent.locks.ReentrantLock.lock(ReentrantLock.java:290)\n at java.util.concurrent.ThreadPoolExecutor.processWorkerExit(ThreadPoolExecutor.java:998)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1163)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:744)\n\n\"elasticsearch[AG 8][search][T#4]\" daemon prio=10 tid=0x00007f8e84111000 nid=0x7995 waiting on condition [0x00007f8ee7da7000]\n java.lang.Thread.State: WAITING (parking)\n at sun.misc.Unsafe.park(Native Method)\n - parking to wait for <0x00000005fdba9278> (a java.util.concurrent.locks.ReentrantLock$NonfairSync)\n at java.util.concurrent.locks.LockSupport.park(LockSupport.java:186)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.parkAndCheckInterrupt(AbstractQueuedSynchronizer.java:834)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireQueued(AbstractQueuedSynchronizer.java:867)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:1197)\n at java.util.concurrent.locks.ReentrantLock$NonfairSync.lock(ReentrantLock.java:214)\n at java.util.concurrent.locks.ReentrantLock.lock(ReentrantLock.java:290)\n at java.util.concurrent.ThreadPoolExecutor.processWorkerExit(ThreadPoolExecutor.java:998)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1163)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:744)\n\n\"elasticsearch[AG 8][search][T#8]\" daemon prio=10 tid=0x0000000001fa0800 nid=0x7991 waiting for monitor entry [0x00007f8ee7f7c000]\n java.lang.Thread.State: BLOCKED (on object monitor)\n at org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor.terminated(EsThreadPoolExecutor.java:64)\n - waiting to lock <0x00000005fdbaaf50> (a java.lang.Object)\n - locked <0x00000005fae03ef0> (a org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor)\n at java.util.concurrent.ThreadPoolExecutor.tryTerminate(ThreadPoolExecutor.java:704)\n at java.util.concurrent.ThreadPoolExecutor.processWorkerExit(ThreadPoolExecutor.java:1006)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1163)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:744)\n\n\"elasticsearch[AG 8][search][T#6]\" daemon prio=10 tid=0x0000000001f9f000 nid=0x7990 waiting on condition [0x00007f8ee7fbd000]\n java.lang.Thread.State: WAITING (parking)\n at sun.misc.Unsafe.park(Native Method)\n - parking to wait for <0x00000005fdba9278> (a java.util.concurrent.locks.ReentrantLock$NonfairSync)\n at java.util.concurrent.locks.LockSupport.park(LockSupport.java:186)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.parkAndCheckInterrupt(AbstractQueuedSynchronizer.java:834)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireQueued(AbstractQueuedSynchronizer.java:867)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:1197)\n at java.util.concurrent.locks.ReentrantLock$NonfairSync.lock(ReentrantLock.java:214)\n at java.util.concurrent.locks.ReentrantLock.lock(ReentrantLock.java:290)\n at java.util.concurrent.ThreadPoolExecutor.processWorkerExit(ThreadPoolExecutor.java:998)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1163)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:744)\n\n\"elasticsearch[AG 8][search][T#1]\" daemon prio=10 tid=0x00007f8e8410f000 nid=0x798f waiting on condition [0x00007f8ee7ffe000]\n java.lang.Thread.State: WAITING (parking)\n at sun.misc.Unsafe.park(Native Method)\n - parking to wait for <0x00000005fdba9278> (a java.util.concurrent.locks.ReentrantLock$NonfairSync)\n at java.util.concurrent.locks.LockSupport.park(LockSupport.java:186)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.parkAndCheckInterrupt(AbstractQueuedSynchronizer.java:834)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireQueued(AbstractQueuedSynchronizer.java:867)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:1197)\n at java.util.concurrent.locks.ReentrantLock$NonfairSync.lock(ReentrantLock.java:214)\n at java.util.concurrent.locks.ReentrantLock.lock(ReentrantLock.java:290)\n at java.util.concurrent.ThreadPoolExecutor.processWorkerExit(ThreadPoolExecutor.java:998)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1163)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:744)\n\n\"DestroyJavaVM\" prio=10 tid=0x00007f8f1000a800 nid=0x775c waiting on condition [0x0000000000000000]\n java.lang.Thread.State: RUNNABLE\n\n\"elasticsearch[AG 8][clusterService#updateTask][T#1]\" daemon prio=10 tid=0x00007f8e84107800 nid=0x7798 waiting on condition [0x00007f8eee056000]\n java.lang.Thread.State: WAITING (parking)\n at sun.misc.Unsafe.park(Native Method)\n - parking to wait for <0x00000005fdba9278> (a java.util.concurrent.locks.ReentrantLock$NonfairSync)\n at java.util.concurrent.locks.LockSupport.park(LockSupport.java:186)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.parkAndCheckInterrupt(AbstractQueuedSynchronizer.java:834)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireQueued(AbstractQueuedSynchronizer.java:867)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:1197)\n at java.util.concurrent.locks.ReentrantLock$NonfairSync.lock(ReentrantLock.java:214)\n at java.util.concurrent.locks.ReentrantLock.lock(ReentrantLock.java:290)\n at java.util.concurrent.ThreadPoolExecutor.interruptIdleWorkers(ThreadPoolExecutor.java:781)\n at java.util.concurrent.ThreadPoolExecutor.tryTerminate(ThreadPoolExecutor.java:695)\n at java.util.concurrent.ThreadPoolExecutor.shutdown(ThreadPoolExecutor.java:1397)\n at org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor.shutdown(EsThreadPoolExecutor.java:56)\n - locked <0x00000005fdbaaf50> (a java.lang.Object)\n at org.elasticsearch.threadpool.ThreadPool.updateSettings(ThreadPool.java:395)\n at org.elasticsearch.threadpool.ThreadPool$ApplySettings.onRefreshSettings(ThreadPool.java:656)\n at org.elasticsearch.node.settings.NodeSettingsService.clusterChanged(NodeSettingsService.java:84)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:417)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:135)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:744)\n\n\"Service Thread\" daemon prio=10 tid=0x00007f8f10113800 nid=0x7769 runnable [0x0000000000000000]\n java.lang.Thread.State: RUNNABLE\n\n\"C2 CompilerThread1\" daemon prio=10 tid=0x00007f8f10111000 nid=0x7768 waiting on condition [0x0000000000000000]\n java.lang.Thread.State: RUNNABLE\n\n\"C2 CompilerThread0\" daemon prio=10 tid=0x00007f8f1010e800 nid=0x7767 waiting on condition [0x0000000000000000]\n java.lang.Thread.State: RUNNABLE\n\n\"Signal Dispatcher\" daemon prio=10 tid=0x00007f8f1010c800 nid=0x7766 runnable [0x0000000000000000]\n java.lang.Thread.State: RUNNABLE\n\n\"Surrogate Locker Thread (Concurrent GC)\" daemon prio=10 tid=0x00007f8f10102000 nid=0x7765 waiting on condition [0x0000000000000000]\n java.lang.Thread.State: RUNNABLE\n\n\"Finalizer\" daemon prio=10 tid=0x00007f8f100eb800 nid=0x7764 in Object.wait() [0x00007f8f0c1bd000]\n java.lang.Thread.State: WAITING (on object monitor)\n at java.lang.Object.wait(Native Method)\n - waiting on <0x00000005fce11a08> (a java.lang.ref.ReferenceQueue$Lock)\n at java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:135)\n - locked <0x00000005fce11a08> (a java.lang.ref.ReferenceQueue$Lock)\n at java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:151)\n at java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:189)\n\n\"Reference Handler\" daemon prio=10 tid=0x00007f8f100e7800 nid=0x7763 in Object.wait() [0x00007f8f0c1fe000]\n java.lang.Thread.State: WAITING (on object monitor)\n at java.lang.Object.wait(Native Method)\n - waiting on <0x00000005fce13cb0> (a java.lang.ref.Reference$Lock)\n at java.lang.Object.wait(Object.java:503)\n at java.lang.ref.Reference$ReferenceHandler.run(Reference.java:133)\n - locked <0x00000005fce13cb0> (a java.lang.ref.Reference$Lock)\n\n\"VM Thread\" prio=10 tid=0x00007f8f100e5000 nid=0x7762 runnable \n\n\"Gang worker#0 (Parallel GC Threads)\" prio=10 tid=0x00007f8f1001c000 nid=0x775d runnable \n\n\"Gang worker#1 (Parallel GC Threads)\" prio=10 tid=0x00007f8f1001e000 nid=0x775e runnable \n\n\"Gang worker#2 (Parallel GC Threads)\" prio=10 tid=0x00007f8f1001f800 nid=0x775f runnable \n\n\"Gang worker#3 (Parallel GC Threads)\" prio=10 tid=0x00007f8f10021800 nid=0x7760 runnable \n\n\"Concurrent Mark-Sweep GC Thread\" prio=10 tid=0x00007f8f100a2000 nid=0x7761 runnable \n\"VM Periodic Task Thread\" prio=10 tid=0x00007f8f1011e800 nid=0x776a waiting on condition \n\nJNI global references: 284\n# Found one Java-level deadlock:\n\n\"Thread-1\":\n waiting for ownable synchronizer 0x00000005fdba9278, (a java.util.concurrent.locks.ReentrantLock$NonfairSync),\n which is held by \"elasticsearch[AG 8][search][T#8]\"\n\"elasticsearch[AG 8][search][T#8]\":\n waiting to lock monitor 0x00007f8e980a33a8 (object 0x00000005fdbaaf50, a java.lang.Object),\n which is held by \"elasticsearch[AG 8][clusterService#updateTask][T#1]\"\n\"elasticsearch[AG 8][clusterService#updateTask][T#1]\":\n waiting for ownable synchronizer 0x00000005fdba9278, (a java.util.concurrent.locks.ReentrantLock$NonfairSync),\n which is held by \"elasticsearch[AG 8][search][T#8]\"\n# Java stack information for the threads listed above:\n\n\"Thread-1\":\n at sun.misc.Unsafe.park(Native Method)\n - parking to wait for <0x00000005fdba9278> (a java.util.concurrent.locks.ReentrantLock$NonfairSync)\n at java.util.concurrent.locks.LockSupport.park(LockSupport.java:186)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.parkAndCheckInterrupt(AbstractQueuedSynchronizer.java:834)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireQueued(AbstractQueuedSynchronizer.java:867)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:1197)\n at java.util.concurrent.locks.ReentrantLock$NonfairSync.lock(ReentrantLock.java:214)\n at java.util.concurrent.locks.ReentrantLock.lock(ReentrantLock.java:290)\n at java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1461)\n at org.elasticsearch.threadpool.ThreadPool.awaitTermination(ThreadPool.java:249)\n at org.elasticsearch.node.internal.InternalNode.close(InternalNode.java:342)\n at org.elasticsearch.bootstrap.Bootstrap$1.run(Bootstrap.java:73)\n\"elasticsearch[AG 8][search][T#8]\":\n at org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor.terminated(EsThreadPoolExecutor.java:64)\n - waiting to lock <0x00000005fdbaaf50> (a java.lang.Object)\n - locked <0x00000005fae03ef0> (a org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor)\n at java.util.concurrent.ThreadPoolExecutor.tryTerminate(ThreadPoolExecutor.java:704)\n at java.util.concurrent.ThreadPoolExecutor.processWorkerExit(ThreadPoolExecutor.java:1006)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1163)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:744)\n\"elasticsearch[AG 8][clusterService#updateTask][T#1]\":\n at sun.misc.Unsafe.park(Native Method)\n - parking to wait for <0x00000005fdba9278> (a java.util.concurrent.locks.ReentrantLock$NonfairSync)\n at java.util.concurrent.locks.LockSupport.park(LockSupport.java:186)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.parkAndCheckInterrupt(AbstractQueuedSynchronizer.java:834)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireQueued(AbstractQueuedSynchronizer.java:867)\n at java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:1197)\n at java.util.concurrent.locks.ReentrantLock$NonfairSync.lock(ReentrantLock.java:214)\n at java.util.concurrent.locks.ReentrantLock.lock(ReentrantLock.java:290)\n at java.util.concurrent.ThreadPoolExecutor.interruptIdleWorkers(ThreadPoolExecutor.java:781)\n at java.util.concurrent.ThreadPoolExecutor.tryTerminate(ThreadPoolExecutor.java:695)\n at java.util.concurrent.ThreadPoolExecutor.shutdown(ThreadPoolExecutor.java:1397)\n at org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor.shutdown(EsThreadPoolExecutor.java:56)\n - locked <0x00000005fdbaaf50> (a java.lang.Object)\n at org.elasticsearch.threadpool.ThreadPool.updateSettings(ThreadPool.java:395)\n at org.elasticsearch.threadpool.ThreadPool$ApplySettings.onRefreshSettings(ThreadPool.java:656)\n at org.elasticsearch.node.settings.NodeSettingsService.clusterChanged(NodeSettingsService.java:84)\n at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:417)\n at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:135)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:744)\n\nFound 1 deadlock.\n", "comments": [ { "body": "@aganapat do you remember if you tried to update thread pool settings before shutting nodes down. Did you update any settings at all before shutdown? \n", "created_at": "2013-12-12T21:40:56Z" }, { "body": "I am not exactly sure but yes I was playing with update settings for threadpool.\n", "created_at": "2013-12-12T23:57:41Z" }, { "body": "Thanks for fixing this. I have a related question, I observed faster bulk insert times when I increased the threadpool size to 50 for bulk. The thread pool is of type fixed. What is your recommendation on this ? \n", "created_at": "2013-12-19T21:20:06Z" }, { "body": "It's hard to recommend something here without knowing details of your index and hardware. Moreover, we are tying to use github issues for feature requests and bug reporting. Could you ask your question on the mailing list?\n", "created_at": "2013-12-23T02:06:16Z" } ], "number": 4334, "title": "Node deadlock on shutdown" }
{ "body": "Fixes #4334\n\nThe deadlock occurs between monitor object of EsThreadPoolExecutor and mainLock of ThreadPoolExecutor. The shutdown method of EsThreadPoolExecutor obtains the lock on monitor first and waits for mainLock of ThreadPoolExecutor in ThreadPoolExecutor#shutdown for part of the processing, while EsThreadPoolExecutor#terminated is executed under mainLock and tries to obtain monitor to notify listeners.\n", "number": 4444, "review_comments": [], "title": "Resolve potential deadlock state during EsThreadPoolExecutor shutdown" }
{ "commits": [ { "message": "Resolve potential deadlock state during EsThreadPoolExecutor shutdown\n\nFixes #4334\n\nThe deadlock occurs between monitor object of EsThreadPoolExecutor and mainLock of ThreadPoolExecutor. The shutdown method of EsThreadPoolExecutor obtains the lock on monitor first and waits for mainLock of ThreadPoolExecutor in ThreadPoolExecutor#shutdown for part of the processing, while EsThreadPoolExecutor#terminated is executed under mainLock and tries to obtain monitor to notify listeners." } ], "files": [ { "diff": "@@ -53,8 +53,8 @@ public void shutdown(ShutdownListener listener) {\n } else {\n this.listener = listener;\n }\n- shutdown();\n }\n+ shutdown();\n }\n \n @Override", "filename": "src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java", "status": "modified" } ] }
{ "body": "Although this method may look convenient in order to size data-structures when a request comes in, this value can only grow due to the way it is implemented: when a new field data instance is loaded, what basically happens is something like `highestNumberOfSeenUniqueValues = max(highestNumberOfSeenUniqueValues, atomicFieldData.getNumberUniqueValues());`.\n\nSo for example if you index lots of data into Elasticsearch and then remove most of it, this value will be highly over-estimated.\n\nI think a better way to solve this issue would be to compute this information on a per-request basis by iterating over the atomic readers wrapped by the context index searcher. Although this might sound expensive, this is very likely to be very cheap compared to the query execution: there are usually less than 50 segments in an index while queries can easily match thousands or millions of documents. And it also has some advantages compared to the current approach:\n- this number will be accurate,\n- this number will only take into account segments that are wrapped by the searcher while something implemented at the index field data level could only return a number which is global to all live segments in the index.\n\nThis method isn't used currently in master so it will be easy to remove. Regarding 0.90, I plan to deprecate it in order not to break plugins that may rely on that method.\n", "comments": [], "number": 4426, "title": "Remove IndexFieldData.getHighestNumberOfSeenUniqueValues()" }
{ "body": "Closes #4426\n", "number": 4427, "review_comments": [], "title": "Remove IndexFieldData.getHighestNumberOfSeenUniqueValues()." }
{ "commits": [ { "message": "Remove IndexFieldData.getHighestNumberOfSeenUniqueValues().\n\nCloses #4426" } ], "files": [ { "diff": "@@ -9,14 +9,11 @@\n import org.elasticsearch.index.mapper.FieldMapper;\n import org.elasticsearch.index.settings.IndexSettings;\n \n-import java.util.concurrent.atomic.AtomicLong;\n-\n /**\n */\n public abstract class AbstractIndexFieldData<FD extends AtomicFieldData> extends AbstractIndexComponent implements IndexFieldData<FD> {\n \n private final FieldMapper.Names fieldNames;\n- private final AtomicLong highestUniqueValuesCount = new AtomicLong();\n protected final FieldDataType fieldDataType;\n protected final IndexFieldDataCache cache;\n \n@@ -42,16 +39,10 @@ public void clear(IndexReader reader) {\n cache.clear(reader);\n }\n \n- @Override\n- public long getHighestNumberOfSeenUniqueValues() {\n- return highestUniqueValuesCount.get();\n- }\n-\n @Override\n public final FD load(AtomicReaderContext context) {\n try {\n FD fd = cache.load(context, this);\n- updateHighestSeenValuesCount(fd.getNumberUniqueValues());\n return fd;\n } catch (Throwable e) {\n if (e instanceof ElasticSearchException) {\n@@ -62,13 +53,4 @@ public final FD load(AtomicReaderContext context) {\n }\n }\n \n- private void updateHighestSeenValuesCount(long newValuesCount) {\n- long current;\n- do {\n- if ((current = highestUniqueValuesCount.get()) >= newValuesCount) {\n- break;\n- }\n- } while (!highestUniqueValuesCount.compareAndSet(current, newValuesCount));\n- }\n-\n }", "filename": "src/main/java/org/elasticsearch/index/fielddata/AbstractIndexFieldData.java", "status": "modified" }, { "diff": "@@ -81,11 +81,6 @@ public static boolean removeOrdsOnSingleValue(FieldDataType fieldDataType) {\n \n void clear(IndexReader reader);\n \n- /**\n- * Returns the highest ever seen uniqiue values in an atomic reader.\n- */\n- long getHighestNumberOfSeenUniqueValues();\n-\n // we need this extended source we we have custom comparators to reuse our field data\n // in this case, we need to reduce type that will be used when search results are reduced\n // on another node (we don't have the custom source them...)", "filename": "src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java", "status": "modified" }, { "diff": "@@ -76,11 +76,6 @@ public void clear(IndexReader reader) {\n in.clear(reader);\n }\n \n- @Override\n- public long getHighestNumberOfSeenUniqueValues() {\n- return in.getHighestNumberOfSeenUniqueValues();\n- }\n-\n };\n }\n ", "filename": "src/test/java/org/elasticsearch/index/fielddata/NoOrdinalsStringFieldDataTests.java", "status": "modified" } ] }
{ "body": "I've got it working using the curl query to create a new template.\nBut when i set it up in the/templates directory it doesnt seem to get picked up at all.\nI've seen a bug raised looking similar to this. What is the work around- can i explicitly set the templates path in the config file? Also should i be able to see anything in the logs telling me this the template files are being picked up or not?\n\nAlso while looking into this issue i was following the page to see if my templates are created ok : http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-templates.html to get an output of all the loaded up templates, And i've found it doesnt seem to work unless i specify the correct template exact name. So calling curl -XGET localhost:9200/_template/temp\\* or curl -XGET localhost:9200/_template/ doesnt seem to bring back any output at all.\n\nThis was using elasticsearch-0.90.3\n", "comments": [ { "body": "Hey,\n\nit seems you have hit this https://github.com/elasticsearch/elasticsearch/issues/4235\n\nThis will be fixed in the upcoming 0.90.8 release. If you want to test, you could test elasticsearch 1.0.0beta2 or the current 0.90 branch - would be happy to get feedback\n", "created_at": "2013-12-12T08:15:08Z" }, { "body": "I found another subtle bug, the #4235 works only if you use \n\n```\n\"settings\" : { \"index.number_of_shards\" : 12 } }\n```\n\nbut does not work for the shorter version\n\n```\n\"settings\" : { \"number_of_shards\" : 12 } }\n```\n\nwill fix\n", "created_at": "2013-12-12T09:34:41Z" } ], "number": 4411, "title": "problem picking up templates in the config/templates directory" }
{ "body": "We support three different settings in templates\n- \"settings\" : { \"index\" : { \"number_of_shards\" : 12 } }\n- \"settings\" : { \"index.number_of_shards\" : 12 }\n- \"settings\" : { \"number_of_shards\" : 12 }\n\nThe latter one was not supported by the fix in #4235\n\nThis commit fixes this issue and uses randomized testing to test any of the three cases above when running integration tests.\n\nCloses #4411\n", "number": 4425, "review_comments": [], "title": "Fix parsing of file based template loading" }
{ "commits": [ { "message": "Fix parsing of file based template loading\n\nWe support three different settings in templates\n\n* \"settings\" : { \"index\" : { \"number_of_shards\" : 12 } }\n* \"settings\" : { \"index.number_of_shards\" : 12 }\n* \"settings\" : { \"number_of_shards\" : 12 }\n\nThe latter one was not supported by the fix in #4235\n\nThis commit fixes this issue and uses randomized testing to test any of the three cases above when running integration tests.\n\nCloses #4411" } ], "files": [ { "diff": "@@ -303,7 +303,15 @@ public static IndexTemplateMetaData fromXContent(XContentParser parser) throws I\n currentFieldName = parser.currentName();\n } else if (token == XContentParser.Token.START_OBJECT) {\n if (\"settings\".equals(currentFieldName)) {\n- builder.settings(ImmutableSettings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())));\n+ ImmutableSettings.Builder templateSettingsBuilder = ImmutableSettings.settingsBuilder();\n+ for (Map.Entry<String, String> entry : SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered()).entrySet()) {\n+ if (!entry.getKey().startsWith(\"index.\")) {\n+ templateSettingsBuilder.put(\"index.\" + entry.getKey(), entry.getValue());\n+ } else {\n+ templateSettingsBuilder.put(entry.getKey(), entry.getValue());\n+ }\n+ }\n+ builder.settings(templateSettingsBuilder.build());\n } else if (\"mappings\".equals(currentFieldName)) {\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n if (token == XContentParser.Token.FIELD_NAME) {", "filename": "src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java", "status": "modified" }, { "diff": "@@ -38,7 +38,7 @@\n /**\n *\n */\n-@ClusterScope(scope= Scope.SUITE, numNodes=1)\n+@ClusterScope(scope=Scope.SUITE, numNodes=1)\n public class IndexTemplateFileLoadingTests extends ElasticsearchIntegrationTest {\n \n @Rule\n@@ -57,7 +57,8 @@ protected Settings nodeSettings(int nodeOrdinal) {\n templatesDir.mkdir();\n \n File dst = new File(templatesDir, \"template.json\");\n- String template = Streams.copyToStringFromClasspath(\"/org/elasticsearch/indices/template/template.json\");\n+ // random template, one uses the 'setting.index.number_of_shards', the other 'settings.number_of_shards'\n+ String template = Streams.copyToStringFromClasspath(\"/org/elasticsearch/indices/template/template\" + randomInt(1) + \".json\");\n Files.write(template, dst, Charsets.UTF_8);\n } catch (Exception e) {\n throw new RuntimeException(e);", "filename": "src/test/java/org/elasticsearch/indices/template/IndexTemplateFileLoadingTests.java", "status": "modified" }, { "diff": "@@ -0,0 +1,7 @@\n+{\n+ \"template\" : \"foo*\",\n+ \"settings\" : {\n+ \"number_of_shards\": 10,\n+ \"number_of_replicas\": 0\n+ }\n+}\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/indices/template/template1.json", "status": "added" }, { "diff": "@@ -0,0 +1,9 @@\n+{\n+ \"template\" : \"foo*\",\n+ \"settings\" : {\n+ \"index\" : {\n+ \"number_of_shards\": 10,\n+ \"number_of_replicas\": 0\n+ }\n+ }\n+}\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/indices/template/template2.json", "status": "added" } ] }
{ "body": "Seems that with 0.90.7/master loading files via the `config/templates`directory does not work.\n\nThe problem is, that the parsed file jumps over the first while parsing, which results in the template not being set, if it is the first.\n\nGoing to send a PR in a second.\n", "comments": [ { "body": "This still doesn't seem to work for me.\n\nI've used example given in http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-templates.html#config but after restart, `curl -XGET http://localhost:9200/_template` is empty and doesn't seem to be applied.\n\nElasticsearch 0.9.10.\n", "created_at": "2014-01-17T13:51:18Z" } ], "number": 4235, "title": "Loading templates via templates/ directory is not working" }
{ "body": "We support three different settings in templates\n- \"settings\" : { \"index\" : { \"number_of_shards\" : 12 } }\n- \"settings\" : { \"index.number_of_shards\" : 12 }\n- \"settings\" : { \"number_of_shards\" : 12 }\n\nThe latter one was not supported by the fix in #4235\n\nThis commit fixes this issue and uses randomized testing to test any of the three cases above when running integration tests.\n\nCloses #4411\n", "number": 4425, "review_comments": [], "title": "Fix parsing of file based template loading" }
{ "commits": [ { "message": "Fix parsing of file based template loading\n\nWe support three different settings in templates\n\n* \"settings\" : { \"index\" : { \"number_of_shards\" : 12 } }\n* \"settings\" : { \"index.number_of_shards\" : 12 }\n* \"settings\" : { \"number_of_shards\" : 12 }\n\nThe latter one was not supported by the fix in #4235\n\nThis commit fixes this issue and uses randomized testing to test any of the three cases above when running integration tests.\n\nCloses #4411" } ], "files": [ { "diff": "@@ -303,7 +303,15 @@ public static IndexTemplateMetaData fromXContent(XContentParser parser) throws I\n currentFieldName = parser.currentName();\n } else if (token == XContentParser.Token.START_OBJECT) {\n if (\"settings\".equals(currentFieldName)) {\n- builder.settings(ImmutableSettings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())));\n+ ImmutableSettings.Builder templateSettingsBuilder = ImmutableSettings.settingsBuilder();\n+ for (Map.Entry<String, String> entry : SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered()).entrySet()) {\n+ if (!entry.getKey().startsWith(\"index.\")) {\n+ templateSettingsBuilder.put(\"index.\" + entry.getKey(), entry.getValue());\n+ } else {\n+ templateSettingsBuilder.put(entry.getKey(), entry.getValue());\n+ }\n+ }\n+ builder.settings(templateSettingsBuilder.build());\n } else if (\"mappings\".equals(currentFieldName)) {\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n if (token == XContentParser.Token.FIELD_NAME) {", "filename": "src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java", "status": "modified" }, { "diff": "@@ -38,7 +38,7 @@\n /**\n *\n */\n-@ClusterScope(scope= Scope.SUITE, numNodes=1)\n+@ClusterScope(scope=Scope.SUITE, numNodes=1)\n public class IndexTemplateFileLoadingTests extends ElasticsearchIntegrationTest {\n \n @Rule\n@@ -57,7 +57,8 @@ protected Settings nodeSettings(int nodeOrdinal) {\n templatesDir.mkdir();\n \n File dst = new File(templatesDir, \"template.json\");\n- String template = Streams.copyToStringFromClasspath(\"/org/elasticsearch/indices/template/template.json\");\n+ // random template, one uses the 'setting.index.number_of_shards', the other 'settings.number_of_shards'\n+ String template = Streams.copyToStringFromClasspath(\"/org/elasticsearch/indices/template/template\" + randomInt(1) + \".json\");\n Files.write(template, dst, Charsets.UTF_8);\n } catch (Exception e) {\n throw new RuntimeException(e);", "filename": "src/test/java/org/elasticsearch/indices/template/IndexTemplateFileLoadingTests.java", "status": "modified" }, { "diff": "@@ -0,0 +1,7 @@\n+{\n+ \"template\" : \"foo*\",\n+ \"settings\" : {\n+ \"number_of_shards\": 10,\n+ \"number_of_replicas\": 0\n+ }\n+}\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/indices/template/template1.json", "status": "added" }, { "diff": "@@ -0,0 +1,9 @@\n+{\n+ \"template\" : \"foo*\",\n+ \"settings\" : {\n+ \"index\" : {\n+ \"number_of_shards\": 10,\n+ \"number_of_replicas\": 0\n+ }\n+ }\n+}\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/indices/template/template2.json", "status": "added" } ] }
{ "body": "When running a simple query_string search, specifying the search fields (prefixed with the type), the search returns results as expected.\nWhen running a proximity search query_string, using the same search fields, the search fails. If I remove the type prefix from the search field name, the search works.\n# EXAMPLE:\n## Setup:\n\ncurl -XPUT 'http://localhost:9201/test/product/1' -d '{ \n \"desc\": \"description of product one with something\" \n}'\ncurl -XPUT 'http://localhost:9200/test/product/2' -d '{ \n \"desc\": \"description of product two with something else\" \n}'\ncurl -XPUT 'http://localhost:9200/test/product/3' -d '{ \n \"desc\": \"description of product three with something else again\" \n}'\ncurl -XPUT 'http://localhost:9200/test/customer/1' -d '{ \n \"desc\": \"description of customer one with something\" \n}'\ncurl -XPUT 'http://localhost:9200/test/customer/2' -d '{ \n \"desc\": \"description of customer two with something else\" \n}'\ncurl -XPUT 'http://localhost:9200/test/customer/3' -d '{ \n \"desc\": \"description of customer three with something else again\" \n}'\n## Simple Search:\n\ncurl -XPOST 'http://localhost:9200/test/_search?pretty=true' -d '{\n \"query\" : {\n \"query_string\" : {\n \"query\" : \"description\",\n \"fields\" : [\"customer.desc\", \"product.desc\"]\n }\n }\n}'\n## Failing proximity search:\n\ncurl -XPOST 'http://localhost:9200/test/_search?pretty=true' -d '{\n \"query\" : {\n \"query_string\" : {\n \"query\" : \"\\\"customer else\\\"~5\",\n \"fields\" : [\"customer.desc\", \"product.desc\"]\n }\n }\n}'\n## Successful proximity search:\n\ncurl -XPOST 'http://localhost:9200/test/_search?pretty=true' -d '{\n \"query\" : {\n \"query_string\" : {\n \"query\" : \"\\\"customer else\\\"~5\",\n \"fields\" : [\"desc\"]\n }\n }\n}'\n\nWe want to be able to allow the user to search specific fields, hence prefixing the 'desc' field with either 'customer' or 'product' (our real ES instance has many 'title' and 'desc' fields, so we need to prefix these fields with the type). Is this an incorrect use of fields, or a defect with proximity search?\n\nJava: 1.7.0_45\nES: 90.5\nOS: Windows 7\n", "comments": [ { "body": "can you please ask this question as it is on the mailing list. This is a bug tracker while this seems to be a question rather than a bug. \n\nthanks,\n\nsimon\n", "created_at": "2013-12-06T13:52:54Z" }, { "body": "@s1monw Actually I've found two bugs in the `query_string` query from the above. More clearly demonstrated here:\n\n```\ncurl -XPUT \"http://localhost:9200/test/product/1\" -d'\n{\n \"desc\": \"one two three\"\n}'\n\ncurl -XPUT \"http://localhost:9200/test/customer/2\" -d'\n{\n \"desc\": \"one two three\"\n}'\n```\n\nFirst bug: Setting the default field to `customer.field` does not limit the results to just docs of type `customer`. No type filter is added, and both docs are returned:\n\n```\ncurl -XPOST \"http://localhost:9200/test/_search\" -d'\n{\n \"query\": {\n \"query_string\": {\n \"default_field\": \"customer.desc\",\n \"query\": \"\\\"one three\\\"~5\"\n }\n }\n}'\n```\n\nResults:\n\n```\n \"hits\": [\n {\n \"_index\": \"test\",\n \"_type\": \"customer\",\n \"_id\": \"2\",\n \"_score\": 0.2169777,\n \"_source\": {\n \"desc\": \"one two three\"\n }\n },\n {\n \"_index\": \"test\",\n \"_type\": \"product\",\n \"_id\": \"1\",\n \"_score\": 0.2169777,\n \"_source\": {\n \"desc\": \"one two three\"\n }\n }\n ]\n```\n\nWhen `customer.desc` is specified using `fields`, the type filter is correctly applied:\n\n```\ncurl -XPOST \"http://localhost:9200/test/_search\" -d'\n{\n \"query\": {\n \"query_string\": {\n \"fields\": [\n \"customer.desc\"\n ],\n \"query\": \"\\\"one two\\\"~5\"\n }\n }\n}'\n```\n\nThis returns:\n\n```\n \"hits\": [\n {\n \"_index\": \"test\",\n \"_type\": \"customer\",\n \"_id\": \"2\",\n \"_score\": 0.30685282,\n \"_source\": {\n \"desc\": \"one two three\"\n }\n }\n ]\n```\n\nHowever, when the documents _requires_ `slop` on the phrase in order to match, nothing is returned:\n\n```\ncurl -XPOST \"http://localhost:9200/test/_search\" -d'\n{\n \"query\": {\n \"query_string\": {\n \"fields\": [\n \"customer.desc\"\n ],\n \"query\": \"\\\"one three\\\"~5\"\n }\n }\n}'\n```\n", "created_at": "2013-12-06T15:01:52Z" }, { "body": "Thanks @clintongormley for reopening. I must would have missed it! Sorry for jumping to conclusion so quickly.\nI added tests and fixed the problem in the attached the commit. I will open a PR soonish\n", "created_at": "2013-12-06T21:39:44Z" } ], "number": 4356, "title": "Inconsistent search results when running proximity searches" }
{ "body": "If a phrase query is wrapped in a filtered query due to type filtering\nslop was not applied correctly. Also if the default field required a\ntype filter the filter was not applied.\n\nCloses #4356\n", "number": 4369, "review_comments": [], "title": "Apply slop correctly if phrase query is wrapped in a filtered query." }
{ "commits": [ { "message": "Apply slop correctly if phrase query is wrapped in a filtered query.\n\nIf a phrase query is wrapped in a filtered query due to type filtering\nslop was not applied correctly. Also if the default field required a\ntype filter the filter was not applied.\n\nCloses #4356" } ], "files": [ { "diff": "@@ -29,6 +29,7 @@\n import org.apache.lucene.util.automaton.RegExp;\n import org.elasticsearch.common.lucene.Lucene;\n import org.elasticsearch.common.lucene.search.Queries;\n+import org.elasticsearch.common.lucene.search.XFilteredQuery;\n import org.elasticsearch.index.mapper.FieldMapper;\n import org.elasticsearch.index.mapper.MapperService;\n import org.elasticsearch.index.query.QueryParseContext;\n@@ -849,6 +850,9 @@ private void applyBoost(String field, Query q) {\n }\n \n private void applySlop(Query q, int slop) {\n+ if (q instanceof XFilteredQuery) {\n+ applySlop(((XFilteredQuery)q).getQuery(), slop);\n+ }\n if (q instanceof PhraseQuery) {\n ((PhraseQuery) q).setSlop(slop);\n } else if (q instanceof MultiPhraseQuery) {", "filename": "src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java", "status": "modified" }, { "diff": "@@ -125,7 +125,7 @@ public Query parse(QueryParseContext parseContext) throws IOException, QueryPars\n if (\"query\".equals(currentFieldName)) {\n qpSettings.queryString(parser.text());\n } else if (\"default_field\".equals(currentFieldName) || \"defaultField\".equals(currentFieldName)) {\n- qpSettings.defaultField(parseContext.indexName(parser.text()));\n+ qpSettings.defaultField(parser.text());\n } else if (\"default_operator\".equals(currentFieldName) || \"defaultOperator\".equals(currentFieldName)) {\n String op = parser.text();\n if (\"or\".equalsIgnoreCase(op)) {\n@@ -201,7 +201,6 @@ public Query parse(QueryParseContext parseContext) throws IOException, QueryPars\n }\n \n qpSettings.queryTypes(parseContext.queryTypes());\n-\n Query query = parseContext.indexCache().queryParserCache().get(qpSettings);\n if (query != null) {\n return query;", "filename": "src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java", "status": "modified" }, { "diff": "@@ -1704,6 +1704,36 @@ public void testMinScore() {\n assertSecondHit(searchResponse, hasId(\"1\"));\n }\n \n+ @Test\n+ public void testQueryStringWithSlopAndFields() {\n+ createIndex(\"test\");\n+ ensureGreen();\n+\n+ client().prepareIndex(\"test\", \"customer\", \"1\").setSource(\"desc\", \"one two three\").get();\n+ client().prepareIndex(\"test\", \"product\", \"2\").setSource(\"desc\", \"one two three\").get();\n+ refresh();\n+ {\n+ SearchResponse searchResponse = client().prepareSearch(\"test\").setQuery(QueryBuilders.queryString(\"\\\"one two\\\"\").defaultField(\"desc\")).get();\n+ assertHitCount(searchResponse, 2);\n+ }\n+ {\n+ SearchResponse searchResponse = client().prepareSearch(\"test\").setQuery(QueryBuilders.queryString(\"\\\"one two\\\"\").field(\"product.desc\")).get();\n+ assertHitCount(searchResponse, 1);\n+ }\n+ {\n+ SearchResponse searchResponse = client().prepareSearch(\"test\").setQuery(QueryBuilders.queryString(\"\\\"one three\\\"~5\").field(\"product.desc\")).get();\n+ assertHitCount(searchResponse, 1);\n+ }\n+ {\n+ SearchResponse searchResponse = client().prepareSearch(\"test\").setQuery(QueryBuilders.queryString(\"\\\"one two\\\"\").defaultField(\"customer.desc\")).get();\n+ assertHitCount(searchResponse, 1);\n+ }\n+ {\n+ SearchResponse searchResponse = client().prepareSearch(\"test\").setQuery(QueryBuilders.queryString(\"\\\"one two\\\"\").defaultField(\"customer.desc\")).get();\n+ assertHitCount(searchResponse, 1);\n+ }\n+ }\n+\n private static FilterBuilder rangeFilter(String field, Object from, Object to) {\n if (randomBoolean()) {\n if (randomBoolean()) {", "filename": "src/test/java/org/elasticsearch/search/query/SimpleQueryTests.java", "status": "modified" } ] }
{ "body": "A bunch of ways to highlight boost phrase matches over general term matches in score order highlighted fragments don't work. I don't have time to make a curl recreation at the moment but I'll send a pull request with failing tests that I _think_ should all pass. Note: they all pass for the postings highlighter, but the other two don't have it together.\n", "comments": [ { "body": "I'll have a look at getting these to not fail at some point....\n", "created_at": "2013-12-05T20:03:29Z" } ], "number": 4351, "title": "A bunch of ways to highlight boost phrase matches over general term matches in score order highlighted fragments don't work" }
{ "body": "The FVH was throwing away some boosts on queries stopping a number of\nways to boost phrase matches to the top of the list of fragments from\nworking.\n\nThe plain highlighter also doesn't work for this but that is because it\ndoesn't support the concept of terms at different positions having different\nweights.\n\nCloses #4351\n", "number": 4352, "review_comments": [ { "body": "I think these blank lines made the file easier to read?\n", "created_at": "2014-01-07T19:17:22Z" }, { "body": "Could we have an assertion that the Lucene version is 4.6 so that we don't forget to remove this code when Lucene gets fixed? Something like `assert Lucene.VERSION == Version.LUCENE_46 : \"LUCENE-5361\"`\n", "created_at": "2014-01-07T19:28:22Z" }, { "body": "Since this file only has reformatted imports, maybe we could reset it to what it is in master?\n", "created_at": "2014-01-07T19:29:57Z" }, { "body": "I'm not really sure why I removed those.....\n", "created_at": "2014-01-07T20:50:03Z" }, { "body": "Sure.\n", "created_at": "2014-01-07T20:50:26Z" }, { "body": "Done.\n", "created_at": "2014-01-07T20:50:40Z" } ], "title": "Stop FVH from throwing away some query boosts" }
{ "commits": [ { "message": "Stop FVH from throwing away some query boosts\n\nThe FVH was throwing away some boosts on queries stopping a number of\nways to boost phrase matches to the top of the list of fragments from\nworking.\n\nThe plain highlighter also doesn't work for this but that is because it\ndoesn't support the concept of the same term having a different score at\ndifferent positions.\n\nAlso update documentation claiming that FHV is nicer for weighing terms\nfound by query combinations.\n\nCloses #4351" } ], "files": [ { "diff": "@@ -77,6 +77,9 @@ will be used instead of the plain highlighter. The fast vector highlighter:\n increases the size of the index\n * Can combine matches from multiple fields into one result. See\n `matched_fields`\n+* Can assign different weights to matches at different positions allowing\n+ for things like phrase matches being sorted above term matches when\n+ highlighting a Boosting Query that boosts phrase matches over term matches\n \n Here is an example of setting the `content` field to allow for\n highlighting using the fast vector highlighter on it (this will cause", "filename": "docs/reference/search/request/highlighting.asciidoc", "status": "modified" }, { "diff": "@@ -25,6 +25,8 @@\n import org.apache.lucene.queries.TermFilter;\n import org.apache.lucene.search.*;\n import org.apache.lucene.search.spans.SpanTermQuery;\n+import org.apache.lucene.util.Version;\n+import org.elasticsearch.common.lucene.Lucene;\n import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;\n import org.elasticsearch.common.lucene.search.XBooleanFilter;\n import org.elasticsearch.common.lucene.search.XFilteredQuery;\n@@ -66,10 +68,36 @@ public CustomFieldQuery(Query query, IndexReader reader, boolean phraseHighlight\n \n @Override\n void flatten(Query sourceQuery, IndexReader reader, Collection<Query> flatQueries) throws IOException {\n- if (sourceQuery instanceof DisjunctionMaxQuery) {\n+ assert Lucene.VERSION == Version.LUCENE_46 : \"LUCENE-5361\";\n+ if( sourceQuery instanceof BooleanQuery ){\n+ BooleanQuery bq = (BooleanQuery)sourceQuery;\n+ if (bq.getBoost() == 1) {\n+ for( BooleanClause clause : bq.getClauses() ) {\n+ if(!clause.isProhibited()) {\n+ flatten(clause.getQuery(), reader, flatQueries);\n+ }\n+ }\n+ } else {\n+ for( BooleanClause clause : bq.getClauses() ) {\n+ if(!clause.isProhibited()) {\n+ Query cloned = clause.getQuery().clone();\n+ cloned.setBoost(cloned.getBoost() * bq.getBoost());\n+ flatten(cloned, reader, flatQueries);\n+ }\n+ }\n+ }\n+ } else if (sourceQuery instanceof DisjunctionMaxQuery) {\n DisjunctionMaxQuery dmq = (DisjunctionMaxQuery) sourceQuery;\n- for (Query query : dmq) {\n- flatten(query, reader, flatQueries);\n+ if (dmq.getBoost() == 1) {\n+ for (Query query : dmq) {\n+ flatten(query, reader, flatQueries);\n+ }\n+ } else {\n+ for (Query query : dmq) {\n+ Query clone = query.clone();\n+ clone.setBoost(clone.getBoost() * dmq.getBoost());\n+ flatten(clone, reader, flatQueries);\n+ }\n }\n } else if (sourceQuery instanceof SpanTermQuery) {\n TermQuery termQuery = new TermQuery(((SpanTermQuery) sourceQuery).getTerm());", "filename": "src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java", "status": "modified" }, { "diff": "@@ -2469,4 +2469,82 @@ public void testPlainHighlighterCustomIndexName() {\n .addHighlightedField(\"field1\").setHighlighterRequireFieldMatch(true).get();\n assertHighlight(searchResponse, 0, \"field1\", 0, 1, equalTo(\"<em>First</em> sentence. Second sentence.\"));\n }\n+\n+ @Test\n+ public void testFastVectorHighlighterPhraseBoost() throws Exception {\n+ assertAcked(client().admin().indices().prepareCreate(\"test\").addMapping(\"type1\", type1TermVectorMapping()));\n+ phraseBoostTestCase(\"fvh\");\n+ }\n+\n+ @Test\n+ public void testPostingsHighlighterPhraseBoost() throws Exception {\n+ assertAcked(client().admin().indices().prepareCreate(\"test\").addMapping(\"type1\", type1PostingsffsetsMapping()));\n+ phraseBoostTestCase(\"postings\");\n+ }\n+\n+ /**\n+ * Test phrase boosting over normal term matches. Note that this will never pass with the plain highlighter\n+ * because it doesn't support the concept of terms having a different weight based on position.\n+ * @param highlighterType highlighter to test\n+ */\n+ private void phraseBoostTestCase(String highlighterType) {\n+ ensureGreen();\n+ StringBuilder text = new StringBuilder();\n+ text.append(\"words words junk junk junk junk junk junk junk junk highlight junk junk junk junk together junk\\n\");\n+ for (int i = 0; i<10; i++) {\n+ text.append(\"junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk\\n\");\n+ }\n+ text.append(\"highlight words together\\n\");\n+ for (int i = 0; i<10; i++) {\n+ text.append(\"junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk\\n\");\n+ }\n+ index(\"test\", \"type1\", \"1\", \"field1\", text.toString());\n+ refresh();\n+\n+ // Match queries\n+ phraseBoostTestCaseForClauses(highlighterType, 100f,\n+ matchQuery(\"field1\", \"highlight words together\"),\n+ matchPhraseQuery(\"field1\", \"highlight words together\"));\n+\n+ // Query string with a single field\n+ phraseBoostTestCaseForClauses(highlighterType, 100f,\n+ queryString(\"highlight words together\").field(\"field1\"),\n+ queryString(\"\\\"highlight words together\\\"\").field(\"field1\").autoGeneratePhraseQueries(true));\n+\n+ // Query string with a single field without dismax\n+ phraseBoostTestCaseForClauses(highlighterType, 100f,\n+ queryString(\"highlight words together\").field(\"field1\").useDisMax(false),\n+ queryString(\"\\\"highlight words together\\\"\").field(\"field1\").useDisMax(false).autoGeneratePhraseQueries(true));\n+\n+ // Query string with more than one field\n+ phraseBoostTestCaseForClauses(highlighterType, 100f,\n+ queryString(\"highlight words together\").field(\"field1\").field(\"field2\"),\n+ queryString(\"\\\"highlight words together\\\"\").field(\"field1\").field(\"field2\").autoGeneratePhraseQueries(true));\n+\n+ // Query string boosting the field\n+ phraseBoostTestCaseForClauses(highlighterType, 1f,\n+ queryString(\"highlight words together\").field(\"field1\"),\n+ queryString(\"\\\"highlight words together\\\"\").field(\"field1^100\").autoGeneratePhraseQueries(true));\n+ }\n+\n+ private <P extends QueryBuilder & BoostableQueryBuilder> void\n+ phraseBoostTestCaseForClauses(String highlighterType, float boost, QueryBuilder terms, P phrase) {\n+ Matcher<String> highlightedMatcher = either(containsString(\"<em>highlight words together</em>\")).or(\n+ containsString(\"<em>highlight</em> <em>words</em> <em>together</em>\"));\n+ SearchRequestBuilder search = client().prepareSearch(\"test\").setHighlighterRequireFieldMatch(true)\n+ .setHighlighterOrder(\"score\").setHighlighterType(highlighterType)\n+ .addHighlightedField(\"field1\", 100, 1);\n+\n+ // Try with a bool query\n+ phrase.boost(boost);\n+ SearchResponse response = search.setQuery(boolQuery().must(terms).should(phrase)).get();\n+ assertHighlight(response, 0, \"field1\", 0, 1, highlightedMatcher);\n+ phrase.boost(1);\n+ // Try with a boosting query\n+ response = search.setQuery(boostingQuery().positive(phrase).negative(terms).boost(boost).negativeBoost(1)).get();\n+ assertHighlight(response, 0, \"field1\", 0, 1, highlightedMatcher);\n+ // Try with a boosting query using a negative boost\n+ response = search.setQuery(boostingQuery().positive(phrase).negative(terms).boost(1).negativeBoost(1/boost)).get();\n+ assertHighlight(response, 0, \"field1\", 0, 1, highlightedMatcher);\n+ }\n }", "filename": "src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java", "status": "modified" } ] }
{ "body": "The implementation of `currentValueHash` in FieldDataSource.Bytes.SortedAndUnique is not correct and returns hash codes that are always equal to 0, which makes BytesRefHash lookup perform in linear time instead of constant time.\n", "comments": [], "number": 4330, "title": "FieldDataSource.Bytes.SortedAndUnique returns wrong hashes" }
{ "body": "Close #4330\n", "number": 4331, "review_comments": [], "title": "Fix implementation of currentValueHash in FieldDataSource.Bytes.SortedAndUnique" }
{ "commits": [ { "message": "Fix implementation of currentValueHash in FieldDataSource.Bytes.SortedAndUnique.\n\nClose #4330" } ], "files": [ { "diff": "@@ -79,6 +79,7 @@ public void collect(int doc, long owningBucketOrdinal) throws IOException {\n continue;\n }\n final int hash = values.currentValueHash();\n+ assert hash == bytes.hashCode();\n int bucketOrdinal = bucketOrds.add(bytes, hash);\n if (bucketOrdinal < 0) { // already seen\n bucketOrdinal = - 1 - bucketOrdinal;", "filename": "src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java", "status": "modified" }, { "diff": "@@ -166,7 +166,7 @@ public int setDocument(int docId) {\n bytes.clear();\n bytes.reinit();\n for (int i = 0; i < numValues; ++i) {\n- bytes.add(super.nextValue(), super.hashCode());\n+ bytes.add(super.nextValue(), super.currentValueHash());\n }\n numUniqueValues = bytes.size();\n sortedIds = bytes.sort(BytesRef.getUTF8SortedAsUnicodeComparator());\n@@ -180,6 +180,11 @@ public BytesRef nextValue() {\n return spare;\n }\n \n+ @Override\n+ public int currentValueHash() {\n+ return spare.hashCode();\n+ }\n+\n @Override\n public Order getOrder() {\n return Order.BYTES;", "filename": "src/main/java/org/elasticsearch/search/aggregations/support/FieldDataSource.java", "status": "modified" } ] }
{ "body": "The _all field uses payloads in order to be able to store per-field boosts in a single index field. However, the way it is implemented relies on the fact that the token stream doesn't eagerly consume the input `java.io.Reader` (see `AllEntries.read`). So in practice, boost on the _all field doesn't work when under any of these circumstances:\n- there is a char filter,\n- the tokenizer is not the `standard` tokenizer,\n- any token filter has read-ahead logic.\n", "comments": [ { "body": "Could you also consider a wider scope of\n1. Per field boost in multified see #4108\n2. Infrastructure for boosting fragments of input text at index time. This would allow to have some sort of markup in the indexed json to supply boost to fragments of text. Common use case is finding and boosting fragments of importance as a part of indexing\n", "created_at": "2013-12-02T22:37:12Z" }, { "body": "@roytmana The two issues you are mentioning are actually quite tough to implement, so I would like to concentrate on just fixing boosting on the _all field for now.\n", "created_at": "2013-12-03T11:02:53Z" }, { "body": "@jpountz isn't #1 quite similar to _all?\nI understand _all is searched in a special way taking per field boosts stored as postings into account. Could not the same to be done for multifields?\n", "created_at": "2013-12-03T14:38:17Z" }, { "body": "@roytmana a similar method could be applied indeed. But I'm not fully happy with the way per-field boosting works for the _all field so I would like that we consider improving it before applying the same logic to other places. In particular, this doesn't work with all queries (eg. phrase queries) and is quite wasteful storage-wise (4 bytes per occurrence of a term whose field has a boost which is not 1: I wouldn't be surprised to see that it sometimes almost doubles the size of the inverted index for the _all field).\n", "created_at": "2013-12-03T14:48:32Z" }, { "body": "@jpountz Great thank you for the info. I just wanted to bring these two cases up so you could consider them as you work on _all implementation. Hopefully multifield will follow soon :-) and an arbitrary snippet boosting after that \n", "created_at": "2013-12-03T14:57:35Z" }, { "body": "@jpountz do you mind if I create another ticket with expanded scope as discussed in my first reply toy your post as I feel ability to boos individual text fragments and particularly multifields is very powerful feature?\nOr maybe you would rather write it up yourself?\n", "created_at": "2013-12-05T17:48:50Z" }, { "body": "@roytmana please open a ticket. I do think the ability to boost individual text fragments is very interesting!\n", "created_at": "2013-12-05T23:03:55Z" } ], "number": 4315, "title": "Per-field boosting of the _all field is broken unless very specific conditions are met" }
{ "body": "_all boosting used to rely on the fact that the TokenStream doesn't eagerly\nconsume the input java.io.Reader. This fixes the issue by using binary search\nin order to find the right boost given a token's start offset.\n\nClose #4315\n", "number": 4326, "review_comments": [ { "body": "I've never really liked references to github issues. I'm of the mind that if the test is well enough named there isn't really a point and if you really need to trace it back to an issue you can `git blame` the file.\n", "created_at": "2013-12-03T16:12:42Z" }, { "body": "I guess I'm doing it because I'm very bad at picking names. :-) I got your point and will remove the comment when pushing.\n", "created_at": "2013-12-03T23:45:29Z" } ], "title": "Fix _all boosting." }
{ "commits": [ { "message": "Fix _all boosting.\n\n_all boosting used to rely on the fact that the TokenStream doesn't eagerly\nconsume the input java.io.Reader. This fixes the issue by using binary search\nin order to find the right boost given a token's start offset.\n\nClose #4315" } ], "files": [ { "diff": "@@ -40,14 +40,20 @@ public class AllEntries extends Reader {\n public static class Entry {\n private final String name;\n private final FastStringReader reader;\n+ private final int startOffset;\n private final float boost;\n \n- public Entry(String name, FastStringReader reader, float boost) {\n+ public Entry(String name, FastStringReader reader, int startOffset, float boost) {\n this.name = name;\n this.reader = reader;\n+ this.startOffset = startOffset;\n this.boost = boost;\n }\n \n+ public int startOffset() {\n+ return startOffset;\n+ }\n+\n public String name() {\n return this.name;\n }\n@@ -75,7 +81,15 @@ public void addText(String name, String text, float boost) {\n if (boost != 1.0f) {\n customBoost = true;\n }\n- Entry entry = new Entry(name, new FastStringReader(text), boost);\n+ final int lastStartOffset;\n+ if (entries.isEmpty()) {\n+ lastStartOffset = -1;\n+ } else {\n+ final Entry last = entries.get(entries.size() - 1);\n+ lastStartOffset = last.startOffset() + last.reader().length();\n+ }\n+ final int startOffset = lastStartOffset + 1; // +1 because we insert a space between tokens\n+ Entry entry = new Entry(name, new FastStringReader(text), startOffset, boost);\n entries.add(entry);\n }\n \n@@ -129,8 +143,22 @@ public Set<String> fields() {\n return fields;\n }\n \n- public Entry current() {\n- return this.current;\n+ // compute the boost for a token with the given startOffset\n+ public float boost(int startOffset) {\n+ int lo = 0, hi = entries.size() - 1;\n+ while (lo <= hi) {\n+ final int mid = (lo + hi) >>> 1;\n+ final int midOffset = entries.get(mid).startOffset();\n+ if (startOffset < midOffset) {\n+ hi = mid - 1;\n+ } else {\n+ lo = mid + 1;\n+ }\n+ }\n+ final int index = Math.max(0, hi); // protection against broken token streams\n+ assert entries.get(index).startOffset() <= startOffset;\n+ assert index == entries.size() - 1 || entries.get(index + 1).startOffset() > startOffset;\n+ return entries.get(index).boost();\n }\n \n @Override\n@@ -186,7 +214,7 @@ public int read(char[] cbuf, int off, int len) throws IOException {\n @Override\n public void close() {\n if (current != null) {\n- current.reader().close();\n+ // no need to close, these are readers on strings\n current = null;\n }\n }", "filename": "src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java", "status": "modified" }, { "diff": "@@ -22,6 +22,7 @@\n import org.apache.lucene.analysis.Analyzer;\n import org.apache.lucene.analysis.TokenFilter;\n import org.apache.lucene.analysis.TokenStream;\n+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;\n import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;\n import org.apache.lucene.util.BytesRef;\n \n@@ -42,11 +43,13 @@ public static TokenStream allTokenStream(String allFieldName, AllEntries allEntr\n \n private final AllEntries allEntries;\n \n+ private final OffsetAttribute offsetAttribute;\n private final PayloadAttribute payloadAttribute;\n \n AllTokenStream(TokenStream input, AllEntries allEntries) {\n super(input);\n this.allEntries = allEntries;\n+ offsetAttribute = addAttribute(OffsetAttribute.class);\n payloadAttribute = addAttribute(PayloadAttribute.class);\n }\n \n@@ -59,14 +62,12 @@ public final boolean incrementToken() throws IOException {\n if (!input.incrementToken()) {\n return false;\n }\n- if (allEntries.current() != null) {\n- float boost = allEntries.current().boost();\n- if (boost != 1.0f) {\n- encodeFloat(boost, payloadSpare.bytes, payloadSpare.offset);\n- payloadAttribute.setPayload(payloadSpare);\n- } else {\n- payloadAttribute.setPayload(null);\n- }\n+ final float boost = allEntries.boost(offsetAttribute.startOffset());\n+ if (boost != 1.0f) {\n+ encodeFloat(boost, payloadSpare.bytes, payloadSpare.offset);\n+ payloadAttribute.setPayload(payloadSpare);\n+ } else {\n+ payloadAttribute.setPayload(null);\n }\n return true;\n }", "filename": "src/main/java/org/elasticsearch/common/lucene/all/AllTokenStream.java", "status": "modified" }, { "diff": "@@ -19,6 +19,11 @@\n \n package org.elasticsearch.common.lucene.all;\n \n+import org.apache.lucene.analysis.TokenStream;\n+import org.apache.lucene.analysis.core.WhitespaceAnalyzer;\n+import org.apache.lucene.analysis.payloads.PayloadHelper;\n+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;\n+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;\n import org.apache.lucene.document.Document;\n import org.apache.lucene.document.Field;\n import org.apache.lucene.document.StoredField;\n@@ -27,6 +32,7 @@\n import org.apache.lucene.search.*;\n import org.apache.lucene.store.Directory;\n import org.apache.lucene.store.RAMDirectory;\n+import org.apache.lucene.util.BytesRef;\n import org.elasticsearch.common.lucene.Lucene;\n import org.elasticsearch.test.ElasticsearchTestCase;\n import org.junit.Test;\n@@ -40,6 +46,52 @@\n */\n public class SimpleAllTests extends ElasticsearchTestCase {\n \n+ @Test\n+ // https://github.com/elasticsearch/elasticsearch/issues/4315\n+ public void testBoostOnEagerTokenizer() throws Exception {\n+ AllEntries allEntries = new AllEntries();\n+ allEntries.addText(\"field1\", \"all\", 2.0f);\n+ allEntries.addText(\"field2\", \"your\", 1.0f);\n+ allEntries.addText(\"field1\", \"boosts\", 0.5f);\n+ allEntries.reset();\n+ // whitespace analyzer's tokenizer reads characters eagerly on the contrary to the standard tokenizer\n+ final TokenStream ts = AllTokenStream.allTokenStream(\"any\", allEntries, new WhitespaceAnalyzer(Lucene.VERSION));\n+ final CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);\n+ final PayloadAttribute payloadAtt = ts.addAttribute(PayloadAttribute.class);\n+ ts.reset();\n+ for (int i = 0; i < 3; ++i) {\n+ assertTrue(ts.incrementToken());\n+ final String term;\n+ final float boost;\n+ switch (i) {\n+ case 0:\n+ term = \"all\";\n+ boost = 2;\n+ break;\n+ case 1:\n+ term = \"your\";\n+ boost = 1;\n+ break;\n+ case 2:\n+ term = \"boosts\";\n+ boost = 0.5f;\n+ break;\n+ default:\n+ throw new AssertionError();\n+ }\n+ assertEquals(term, termAtt.toString());\n+ final BytesRef payload = payloadAtt.getPayload();\n+ if (payload == null || payload.length == 0) {\n+ assertEquals(boost, 1f, 0.001f);\n+ } else {\n+ assertEquals(4, payload.length);\n+ final float b = PayloadHelper.decodeFloat(payload.bytes, payload.offset);\n+ assertEquals(boost, b, 0.001f);\n+ }\n+ }\n+ assertFalse(ts.incrementToken());\n+ }\n+\n @Test\n public void testAllEntriesRead() throws Exception {\n AllEntries allEntries = new AllEntries();", "filename": "src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java", "status": "modified" } ] }
{ "body": "To reproduce \n- set `cluster.routing.allocation.node_initial_primaries_recoveries` to any value lower than `cluster.routing.allocation.node_concurrent_recoveries`\n- start cluster with two nodes (node1 and node2)\n- create an index with 1000 shards and 0 replicas:\n\n```\ncurl -XPUT localhost:9200/test -d '{\"settings\": {\"number_of_shards\":1000, \"number_of_replicas\":0}}' \n```\n- shutdown node2\n- enable allocation filtering to exclude node1:\n\n```\ncurl -XPUT localhost:9200/_cluster/settings -d '{\"transient\": {\"cluster.routing.allocation.exclude._id\": \"...id of node1...\"}}'\n```\n- at this time half of the shards should be `STARTED` and another half should be `UNASSIGNED`\n- start node2\n- observe that instead of initializing `node_initial_primaries_recoveries` shards on node2 first, elasticsearch is moving shards from node1 to node2\n\nExpected behavior: until all local shards are initialized, all `node_initial_primaries_recoveries` shards should be initializing locally and the rest (`node_concurrent_recoveries` - `node_initial_primaries_recoveries`) can be used for relocation.\n\nImpact: as a result of this bug, sometimes relocating primaries from another node can take over local recovery and a cluster may take very long time to get to green status\n", "comments": [], "number": 4237, "title": "During node startup local primaries should be preferred to relocating primaries" }
{ "body": "To reproduce the bug use -Dtests.seed=5AB62524C9AB0489\nFixes #4237\n", "number": 4238, "review_comments": [ { "body": "can we add a comment here on what it fixes?\n", "created_at": "2013-11-24T20:20:21Z" } ], "title": "Local primaries should be preferred to relocating primaries" }
{ "commits": [ { "message": "Local primaries should be preferred to relocating primaries\n\nTo reproduce the bug use -Dtests.seed=5AB62524C9AB0489\nFixes #4237" } ], "files": [ { "diff": "@@ -82,7 +82,9 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing\n List<MutableShardRouting> shards = node.shards();\n for (int i = 0; i < shards.size(); i++) {\n MutableShardRouting shard = shards.get(i);\n- if (shard.state() == ShardRoutingState.INITIALIZING && shard.primary()) {\n+ // when a primary shard is INITIALIZING, it can be because of *initial recovery* or *relocation from another node*\n+ // we only count initial recoveries here, so we need to make sure that relocating node is null\n+ if (shard.state() == ShardRoutingState.INITIALIZING && shard.primary() && shard.relocatingNodeId() == null) {\n primariesInRecovery++;\n }\n }", "filename": "src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java", "status": "modified" }, { "diff": "@@ -0,0 +1,125 @@\n+/*\n+ * Licensed to ElasticSearch and Shay Banon under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. ElasticSearch licenses this\n+ * file to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.elasticsearch.cluster.routing.allocation;\n+\n+import com.google.common.collect.ImmutableMap;\n+import org.elasticsearch.cluster.ClusterState;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n+import org.elasticsearch.cluster.metadata.MetaData;\n+import org.elasticsearch.cluster.node.DiscoveryNodes;\n+import org.elasticsearch.cluster.routing.MutableShardRouting;\n+import org.elasticsearch.cluster.routing.RoutingTable;\n+import org.elasticsearch.test.ElasticsearchTestCase;\n+import org.junit.Test;\n+\n+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;\n+import static org.elasticsearch.cluster.routing.allocation.RoutingAllocationTests.newNode;\n+import static org.elasticsearch.cluster.routing.allocation.RoutingAllocationTests.startRandomInitializingShard;\n+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;\n+import static org.hamcrest.Matchers.equalTo;\n+\n+/**\n+ */\n+public class PreferLocalPrimariesToRelocatingPrimariesTests extends ElasticsearchTestCase {\n+ @Test\n+ public void testPreferLocalPrimaryAllocationOverFiltered() {\n+ int concurrentRecoveries = randomIntBetween(1, 10);\n+ int primaryRecoveries = randomIntBetween(1, 10);\n+ int numberOfShards = randomIntBetween(5, 20);\n+ int totalNumberOfShards = numberOfShards * 2;\n+\n+ logger.info(\"create an allocation with [{}] initial primary recoveries and [{}] concurrent recoveries\", primaryRecoveries, concurrentRecoveries);\n+ AllocationService strategy = new AllocationService(settingsBuilder()\n+ .put(\"cluster.routing.allocation.node_concurrent_recoveries\", concurrentRecoveries)\n+ .put(\"cluster.routing.allocation.node_initial_primaries_recoveries\", primaryRecoveries)\n+ .build());\n+\n+ logger.info(\"create 2 indices with [{}] no replicas, and wait till all are allocated\", numberOfShards);\n+\n+ MetaData metaData = MetaData.builder()\n+ .put(IndexMetaData.builder(\"test1\").numberOfShards(numberOfShards).numberOfReplicas(0))\n+ .put(IndexMetaData.builder(\"test2\").numberOfShards(numberOfShards).numberOfReplicas(0))\n+ .build();\n+\n+ RoutingTable routingTable = RoutingTable.builder()\n+ .addAsNew(metaData.index(\"test1\"))\n+ .addAsNew(metaData.index(\"test2\"))\n+ .build();\n+\n+ ClusterState clusterState = ClusterState.builder().metaData(metaData).routingTable(routingTable).build();\n+\n+ logger.info(\"adding two nodes and performing rerouting till all are allocated\");\n+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()\n+ .put(newNode(\"node1\", ImmutableMap.of(\"tag1\", \"value1\")))\n+ .put(newNode(\"node2\", ImmutableMap.of(\"tag1\", \"value2\")))).build();\n+\n+ routingTable = strategy.reroute(clusterState).routingTable();\n+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n+\n+ while (!clusterState.routingNodes().shardsWithState(INITIALIZING).isEmpty()) {\n+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();\n+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n+ }\n+\n+ logger.info(\"remove one of the nodes and apply filter to move everything from another node\");\n+\n+ metaData = MetaData.builder()\n+ .put(IndexMetaData.builder(\"test1\").settings(settingsBuilder()\n+ .put(\"index.number_of_shards\", numberOfShards)\n+ .put(\"index.number_of_replicas\", 0)\n+ .put(\"index.routing.allocation.exclude.tag1\", \"value2\")\n+ .build()))\n+ .put(IndexMetaData.builder(\"test2\").settings(settingsBuilder()\n+ .put(\"index.number_of_shards\", numberOfShards)\n+ .put(\"index.number_of_replicas\", 0)\n+ .put(\"index.routing.allocation.exclude.tag1\", \"value2\")\n+ .build()))\n+ .build();\n+ clusterState = ClusterState.builder(clusterState).metaData(metaData).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(\"node1\")).build();\n+ routingTable = strategy.reroute(clusterState).routingTable();\n+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n+\n+ logger.info(\"[{}] primaries should be still started but [{}] other primaries should be unassigned\", numberOfShards, numberOfShards);\n+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(numberOfShards));\n+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));\n+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(numberOfShards));\n+\n+ logger.info(\"start node back up\");\n+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())\n+ .put(newNode(\"node1\", ImmutableMap.of(\"tag1\", \"value1\")))).build();\n+ routingTable = strategy.reroute(clusterState).routingTable();\n+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();\n+\n+ while (clusterState.routingNodes().shardsWithState(STARTED).size() < totalNumberOfShards) {\n+ int localInitializations = 0;\n+ int relocatingInitializations = 0;\n+ for (MutableShardRouting routing : clusterState.routingNodes().shardsWithState(INITIALIZING)) {\n+ if (routing.relocatingNodeId() == null) {\n+ localInitializations++;\n+ } else {\n+ relocatingInitializations++;\n+ }\n+ }\n+ int needToInitialize = totalNumberOfShards - clusterState.routingNodes().shardsWithState(STARTED).size() - clusterState.routingNodes().shardsWithState(RELOCATING).size();\n+ logger.info(\"local initializations: [{}], relocating: [{}], need to initialize: {}\", localInitializations, relocatingInitializations, needToInitialize);\n+ assertThat(localInitializations, equalTo(Math.min(primaryRecoveries, needToInitialize)));\n+ clusterState = startRandomInitializingShard(clusterState, strategy);\n+ }\n+ }\n+}", "filename": "src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java", "status": "added" }, { "diff": "@@ -20,14 +20,21 @@\n package org.elasticsearch.cluster.routing.allocation;\n \n import org.elasticsearch.Version;\n+import org.elasticsearch.cluster.ClusterState;\n import org.elasticsearch.cluster.node.DiscoveryNode;\n+import org.elasticsearch.cluster.routing.MutableShardRouting;\n+import org.elasticsearch.cluster.routing.RoutingTable;\n import org.elasticsearch.common.transport.DummyTransportAddress;\n import org.elasticsearch.common.transport.TransportAddress;\n import org.elasticsearch.test.ElasticsearchTestCase;\n import org.junit.Ignore;\n \n+import java.util.List;\n import java.util.Map;\n \n+import static com.google.common.collect.Lists.newArrayList;\n+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;\n+\n @Ignore(\"Not a test\")\n public class RoutingAllocationTests extends ElasticsearchTestCase {\n \n@@ -42,4 +49,14 @@ public static DiscoveryNode newNode(String nodeId, TransportAddress address) {\n public static DiscoveryNode newNode(String nodeId, Map<String, String> attributes) {\n return new DiscoveryNode(\"\", nodeId, DummyTransportAddress.INSTANCE, attributes, Version.CURRENT);\n }\n+\n+ public static ClusterState startRandomInitializingShard(ClusterState clusterState, AllocationService strategy) {\n+ List<MutableShardRouting> initializingShards = clusterState.routingNodes().shardsWithState(INITIALIZING);\n+ if (initializingShards.isEmpty()) {\n+ return clusterState;\n+ }\n+ RoutingTable routingTable = strategy.applyStartedShards(clusterState, newArrayList(initializingShards.get(randomInt(initializingShards.size() - 1)))).routingTable();\n+ return ClusterState.builder(clusterState).routingTable(routingTable).build();\n+ }\n+\n }\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocationTests.java", "status": "modified" } ] }
{ "body": "Seems that with 0.90.7/master loading files via the `config/templates`directory does not work.\n\nThe problem is, that the parsed file jumps over the first while parsing, which results in the template not being set, if it is the first.\n\nGoing to send a PR in a second.\n", "comments": [ { "body": "This still doesn't seem to work for me.\n\nI've used example given in http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-templates.html#config but after restart, `curl -XGET http://localhost:9200/_template` is empty and doesn't seem to be applied.\n\nElasticsearch 0.9.10.\n", "created_at": "2014-01-17T13:51:18Z" } ], "number": 4235, "title": "Loading templates via templates/ directory is not working" }
{ "body": "When parsing the json file, the first field is ignored as\nparser.nextToken() seems to be called too often.\n\nCloses #4235\n", "number": 4236, "review_comments": [], "title": "Fixed file-based template loading via config/templates" }
{ "commits": [ { "message": "Fixed file-based template loading via config/templates\n\nWhen parsing the json file, the first field is ignored as\nparser.nextToken() seems to be called too often.\n\nCloses #4235" } ], "files": [ { "diff": "@@ -482,7 +482,7 @@ private List<IndexTemplateMetaData> findTemplates(Request request, ClusterState\n try {\n byte[] templatesData = Streams.copyToByteArray(templatesFile);\n parser = XContentHelper.createParser(templatesData, 0, templatesData.length);\n- IndexTemplateMetaData template = IndexTemplateMetaData.Builder.fromXContentStandalone(parser);\n+ IndexTemplateMetaData template = IndexTemplateMetaData.Builder.fromXContent(parser);\n if (Regex.simpleMatch(template.template(), request.index)) {\n templates.add(template);\n }", "filename": "src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java", "status": "modified" }, { "diff": "@@ -0,0 +1,78 @@\n+/*\n+ * Licensed to ElasticSearch and Shay Banon under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. ElasticSearch licenses this\n+ * file to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.elasticsearch.indices.template;\n+\n+import com.google.common.base.Charsets;\n+import com.google.common.io.Files;\n+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;\n+import org.elasticsearch.common.io.Streams;\n+import org.elasticsearch.common.settings.ImmutableSettings;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.test.ElasticsearchIntegrationTest;\n+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;\n+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;\n+import org.junit.Rule;\n+import org.junit.Test;\n+import org.junit.rules.TemporaryFolder;\n+\n+import java.io.File;\n+\n+import static org.hamcrest.Matchers.is;\n+\n+/**\n+ *\n+ */\n+@ClusterScope(scope= Scope.SUITE, numNodes=1)\n+public class IndexTemplateFileLoadingTests extends ElasticsearchIntegrationTest {\n+\n+ @Rule\n+ public TemporaryFolder temporaryFolder = new TemporaryFolder();\n+\n+ @Override\n+ protected Settings nodeSettings(int nodeOrdinal) {\n+ ImmutableSettings.Builder settingsBuilder = ImmutableSettings.settingsBuilder();\n+ settingsBuilder.put(super.nodeSettings(nodeOrdinal));\n+\n+ try {\n+ File directory = temporaryFolder.newFolder();\n+ settingsBuilder.put(\"path.conf\", directory.getPath());\n+\n+ File templatesDir = new File(directory + File.separator + \"templates\");\n+ templatesDir.mkdir();\n+\n+ File dst = new File(templatesDir, \"template.json\");\n+ String template = Streams.copyToStringFromClasspath(\"/org/elasticsearch/indices/template/template.json\");\n+ Files.write(template, dst, Charsets.UTF_8);\n+ } catch (Exception e) {\n+ throw new RuntimeException(e);\n+ }\n+\n+ return settingsBuilder.build();\n+ }\n+\n+ @Test\n+ public void testThatLoadingTemplateFromFileWorks() throws Exception {\n+ createIndex(\"foobar\");\n+ ensureYellow(); // ensuring yellow so the test fails faster if the template cannot be loaded\n+\n+ ClusterStateResponse stateResponse = client().admin().cluster().prepareState().setFilterIndices(\"foobar\").get();\n+ assertThat(stateResponse.getState().getMetaData().indices().get(\"foobar\").getNumberOfShards(), is(10));\n+ assertThat(stateResponse.getState().getMetaData().indices().get(\"foobar\").getNumberOfReplicas(), is(0));\n+ }\n+}", "filename": "src/test/java/org/elasticsearch/indices/template/IndexTemplateFileLoadingTests.java", "status": "added" }, { "diff": "@@ -0,0 +1,7 @@\n+{\n+ \"template\" : \"foo*\",\n+ \"settings\" : {\n+ \"index.number_of_shards\": 10,\n+ \"index.number_of_replicas\": 0\n+ }\n+}\n\\ No newline at end of file", "filename": "src/test/java/org/elasticsearch/indices/template/template.json", "status": "added" } ] }
{ "body": "Ran into this by accident, but basically if you add a warmup query with empty body, ES will accept it(even though I believe it shouldn't). \nAfter accepting this empty bodied query, it's not possible anymore to access warmup queries by regular exp. that would also match this query. Follows how to reproduce(on 0.90.8-SNAPSHOT)\n\n```\ncurl -XPOST http://localhost:9200/foo\ncurl -XPUT http://localhost:9200/foo/bar/1 -d '{\"id\":1,\"content\":\"one\"}'\ncurl -XPUT http://localhost:9200/foo/bar/2 -d '{\"id\":2,\"content\":\"two\"}'\ncurl -XPUT http://localhost:9200/foo/_warmer/warmer_1 -d '{\"query\": {\"match_all\":{}}}'\ncurl -XGET http://localhost:9200/foo/_warmer/w*\ncurl -XPUT http://localhost:9200/foo/_warmer/warmer_2\ncurl -XGET http://localhost:9200/foo/_warmer/w*\n```\n\nwhich yields:\n{\"error\":\"NullPointerException[null]\",\"status\":500}\n", "comments": [ { "body": "@spinscale i have some free time now. should i look into it or are you already doing it?\n", "created_at": "2013-11-18T13:53:07Z" }, { "body": "@lmenezes on it already... seems we should stop making the search request optional, when doing the validation.\n\nthanks for your help, as usual :-)\n", "created_at": "2013-11-18T13:55:19Z" }, { "body": "@spinscale np then. yep, i guess just rejecting empty requests would already solve the problem :)\nthanks for the quick response!\n", "created_at": "2013-11-18T13:57:34Z" }, { "body": ":+1: \n", "created_at": "2013-11-19T14:43:52Z" } ], "number": 4196, "title": "Warmers API breaking when empty Warmup query is created" }
{ "body": "The search request inside of a put warmer request was nullable, but actually we have to have that request in the transport action.\nValidation and appropriate test added.\n\nCloses #4196\n", "number": 4197, "review_comments": [], "title": "Making SearchRequest in PutWarmerRequest mandatory & validated" }
{ "commits": [ { "message": "Making SearchRequest in PutWarmerRequest mandatory & validated\n\nThe search request inside of a put warmer request was nullable, but actually we have to have that request in the transport action.\nValidation and appropriate test added.\n\nCloses #4196" } ], "files": [ { "diff": "@@ -24,7 +24,6 @@\n import org.elasticsearch.action.search.SearchRequest;\n import org.elasticsearch.action.search.SearchRequestBuilder;\n import org.elasticsearch.action.support.master.AcknowledgedRequest;\n-import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.io.stream.StreamInput;\n import org.elasticsearch.common.io.stream.StreamOutput;\n \n@@ -81,14 +80,18 @@ public PutWarmerRequest searchRequest(SearchRequestBuilder searchRequest) {\n return this;\n }\n \n- @Nullable\n SearchRequest searchRequest() {\n return this.searchRequest;\n }\n \n @Override\n public ActionRequestValidationException validate() {\n- ActionRequestValidationException validationException = searchRequest.validate();\n+ ActionRequestValidationException validationException = null;\n+ if (searchRequest == null) {\n+ validationException = addValidationError(\"search request is missing\", validationException);\n+ } else {\n+ validationException = searchRequest.validate();\n+ }\n if (name == null) {\n validationException = addValidationError(\"name is missing\", validationException);\n }", "filename": "src/main/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequest.java", "status": "modified" }, { "diff": "@@ -19,6 +19,7 @@\n package org.elasticsearch.action.admin.indices.warmer.put;\n \n import org.elasticsearch.Version;\n+import org.elasticsearch.action.ActionRequestValidationException;\n import org.elasticsearch.common.io.stream.InputStreamStreamInput;\n import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;\n import org.elasticsearch.common.unit.TimeValue;\n@@ -28,7 +29,10 @@\n import java.io.ByteArrayInputStream;\n import java.io.ByteArrayOutputStream;\n \n+import static org.hamcrest.CoreMatchers.containsString;\n import static org.hamcrest.CoreMatchers.equalTo;\n+import static org.hamcrest.Matchers.hasSize;\n+import static org.hamcrest.Matchers.is;\n \n public class PutWarmerRequestTests extends ElasticsearchTestCase {\n \n@@ -73,4 +77,12 @@ public void testPutWarmerTimeoutBwComp_Post0906Format() throws Exception {\n //timeout is default as we don't read it from the received buffer\n assertThat(inRequest.timeout().millis(), equalTo(outRequest.timeout().millis()));\n }\n+\n+ @Test // issue 4196\n+ public void testThatValidationWithoutSpecifyingSearchRequestFails() {\n+ PutWarmerRequest putWarmerRequest = new PutWarmerRequest(\"foo\");\n+ ActionRequestValidationException validationException = putWarmerRequest.validate();\n+ assertThat(validationException.validationErrors(), hasSize(1));\n+ assertThat(validationException.getMessage(), containsString(\"search request is missing\"));\n+ }\n }", "filename": "src/test/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestTests.java", "status": "modified" } ] }
{ "body": "I'm experiencing a deadlock in BulkProcessor during heavy bulk indexing to a single node cluster.\n\nOccasionally, during heavy indexing, the node becomes unresponsive causing the client to throw NoNodeAvailableException, triggering the issue.\n\nhttps://github.com/elasticsearch/elasticsearch/blob/master/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java#L279-305\n\nA semaphore is acquired before the call to client.bulk(request, listener) and only released in the callback. If an Exception, such as NoNodeAvailableException, is thrown from client.bulk(request, listener), the semaphore is never released.\n\nWhen the number of Exceptions thrown by client.bulk(request, listener) is greater than the number of concurrent requests supported by BulkProcessor a deadlock occurs and no more documents can be indexed.\n\nBulkProcessor should be able to handle this by wrapping client.bulk(request, listener) inside a try/catch block and handling the Exception, calling the listener and releasing the semaphore.\n", "comments": [ { "body": "good call! we are looking into this!\n", "created_at": "2013-11-12T13:08:38Z" } ], "number": 4153, "title": "Deadlock in BulkProcessor" }
{ "body": "Closes #4153\n", "number": 4155, "review_comments": [], "title": "Release semaphore if client call throws and exception" }
{ "commits": [ { "message": "Release semaphore if client call throws and exception\n\nCloses #4153" } ], "files": [ { "diff": "@@ -276,32 +276,39 @@ private void execute() {\n listener.afterBulk(executionId, bulkRequest, e);\n }\n } else {\n+ boolean success = false;\n try {\n semaphore.acquire();\n+ listener.beforeBulk(executionId, bulkRequest);\n+ client.bulk(bulkRequest, new ActionListener<BulkResponse>() {\n+ @Override\n+ public void onResponse(BulkResponse response) {\n+ try {\n+ listener.afterBulk(executionId, bulkRequest, response);\n+ } finally {\n+ semaphore.release();\n+ }\n+ }\n+\n+ @Override\n+ public void onFailure(Throwable e) {\n+ try {\n+ listener.afterBulk(executionId, bulkRequest, e);\n+ } finally {\n+ semaphore.release();\n+ }\n+ }\n+ });\n+ success = true;\n } catch (InterruptedException e) {\n+ Thread.interrupted();\n listener.afterBulk(executionId, bulkRequest, e);\n- return;\n+ } finally {\n+ if (!success) { // if we fail on client.bulk() release the semaphore\n+ semaphore.release();\n+ }\n }\n- listener.beforeBulk(executionId, bulkRequest);\n- client.bulk(bulkRequest, new ActionListener<BulkResponse>() {\n- @Override\n- public void onResponse(BulkResponse response) {\n- try {\n- listener.afterBulk(executionId, bulkRequest, response);\n- } finally {\n- semaphore.release();\n- }\n- }\n \n- @Override\n- public void onFailure(Throwable e) {\n- try {\n- listener.afterBulk(executionId, bulkRequest, e);\n- } finally {\n- semaphore.release();\n- }\n- }\n- });\n }\n }\n ", "filename": "src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java", "status": "modified" } ] }
{ "body": "river instance is not created after successfully issuing PUT _meta request \n\nthis problem happens when creating river on completely fresh cluster \nand only for first river (following updates to _meta document for this river still dont start it)\ncreating second river works fine (but first one is still down)\n\nim using latest elasticsearch sources from master branch\nand running it from eclipse (one node cluster, no replication)\nto replicate delete all cluster data files, then start single node cluster and create river as first request to the cluster\n\noverall it looks like racing condition, after debugging for while i see that when checking if river cluster state changed (RiverClusterStateUpdateTask), event.state().metaData().index(riverIndexName) is not returning newly created river (probably slowed down by initial index creation)\n\ndebug logs:\n\n[2013-10-07 15:35:38,937][DEBUG][cluster.service ] [Peepers] processing [create-index [_river], cause [auto(index api)]]: execute\n[2013-10-07 15:35:38,950][DEBUG][indices ] [Peepers] creating Index [_river], shards [1]/[0]\n[2013-10-07 15:35:43,713][DEBUG][index.mapper ] [Peepers] [_river] using dynamic[true], default mapping: default_mapping_location[null], loaded_from[file:/mnt/data/projects/git/elasticsearch/elasticsearch/target/classes/org/elasticsearch/index/mapper/default-mapping.json], default percolator mapping: location[null], loaded_from[null]\n[2013-10-07 15:35:43,722][DEBUG][index.cache.query.parser.resident] [Peepers] [_river] using [resident] query cache with max_size [100], expire [null]\n[2013-10-07 15:35:43,965][DEBUG][index.store.fs ] [Peepers] [_river] using index.store.throttle.type [node], with index.store.throttle.max_bytes_per_sec [0b]\n[2013-10-07 15:35:45,458][INFO ][cluster.metadata ] [Peepers] [_river] creating index, cause [auto(index api)], shards [1]/[0], mappings []\n[2013-10-07 15:35:45,560][DEBUG][index.cache.filter.weighted] [Peepers] [_river] full cache clear, reason [close]\n[2013-10-07 15:35:45,570][DEBUG][cluster.service ] [Peepers] cluster state updated, version [3], source [create-index [_river], cause [auto(index api)]]\n[2013-10-07 15:35:45,572][DEBUG][cluster.service ] [Peepers] publishing cluster state version 3\n[2013-10-07 15:35:45,574][DEBUG][cluster.service ] [Peepers] set local cluster state to version 3\n[2013-10-07 15:35:45,579][DEBUG][river.cluster ] [Peepers] processing [reroute_rivers_node_changed]: execute\n[2013-10-07 15:35:45,584][DEBUG][indices.cluster ] [Peepers] [_river] creating index\n[2013-10-07 15:35:45,587][DEBUG][indices ] [Peepers] creating Index [_river], shards [1]/[0]\n[2013-10-07 15:35:45,615][DEBUG][river.cluster ] [Peepers] processing [reroute_rivers_node_changed]: no change in cluster_state\n[2013-10-07 15:35:47,464][DEBUG][index.mapper ] [Peepers] [_river] using dynamic[true], default mapping: default_mapping_location[null], loaded_from[file:/mnt/data/projects/git/elasticsearch/elasticsearch/target/classes/org/elasticsearch/index/mapper/default-mapping.json], default percolator mapping: location[null], loaded_from[null]\n[2013-10-07 15:35:47,472][DEBUG][index.cache.query.parser.resident] [Peepers] [_river] using [resident] query cache with max_size [100], expire [null]\n[2013-10-07 15:35:47,519][DEBUG][index.store.fs ] [Peepers] [_river] using index.store.throttle.type [node], with index.store.throttle.max_bytes_per_sec [0b]\n[2013-10-07 15:35:47,593][DEBUG][indices.cluster ] [Peepers] [_river][0] creating shard\n[2013-10-07 15:35:47,595][DEBUG][index.service ] [Peepers] [_river] creating shard_id [0]\n[2013-10-07 15:35:49,258][DEBUG][index.deletionpolicy ] [Peepers] [_river][0] Using [keep_only_last] deletion policy\n[2013-10-07 15:35:49,287][DEBUG][index.merge.policy ] [Peepers] [_river][0] using [tiered] merge policy with expunge_deletes_allowed[10.0], floor_segment[2mb], max_merge_at_once[10], max_merge_at_once_explicit[30], max_merged_segment[5gb], segments_per_tier[10.0], reclaim_deletes_weight[2.0], async_merge[true]\n[2013-10-07 15:35:49,293][DEBUG][index.merge.scheduler ] [Peepers] [_river][0] using [concurrent] merge scheduler with max_thread_count[1]\n[2013-10-07 15:35:49,377][DEBUG][index.shard.service ] [Peepers] [_river][0] state: [CREATED]\n[2013-10-07 15:35:49,389][DEBUG][index.translog ] [Peepers] [_river][0] interval [5s], flush_threshold_ops [5000], flush_threshold_size [200mb], flush_threshold_period [30m]\n[2013-10-07 15:35:49,467][DEBUG][index.shard.service ] [Peepers] [_river][0] state: [CREATED]->[RECOVERING], reason [from gateway]\n[2013-10-07 15:35:49,474][DEBUG][index.gateway ] [Peepers] [_river][0] starting recovery from local ...\n[2013-10-07 15:35:49,620][DEBUG][index.engine.robin ] [Peepers] [_river][0] starting engine\n[2013-10-07 15:35:49,708][DEBUG][cluster.service ] [Peepers] processing [create-index [_river], cause [auto(index api)]]: done applying updated cluster_state (version: 3)\n[2013-10-07 15:35:50,875][DEBUG][index.shard.service ] [Peepers] [_river][0] scheduling refresher every 1s\n[2013-10-07 15:35:50,882][DEBUG][index.shard.service ] [Peepers] [_river][0] scheduling optimizer / merger every 1s\n[2013-10-07 15:35:50,884][DEBUG][index.shard.service ] [Peepers] [_river][0] state: [RECOVERING]->[POST_RECOVERY], reason [post recovery from gateway, no translog]\n[2013-10-07 15:35:50,888][DEBUG][index.gateway ] [Peepers] [_river][0] recovery completed from [local], took [1.4s]\n[2013-10-07 15:35:50,890][DEBUG][cluster.action.shard ] [Peepers] sending shard started for [_river][0], node[FDoGx4I-T8SLGc1IYsBkzw], [P], s[INITIALIZING], indexUUID [hhVydPb3QvuD110I9V4XyA], reason [after recovery from gateway]\n[2013-10-07 15:35:50,892][DEBUG][cluster.action.shard ] [Peepers] received shard started for [_river][0], node[FDoGx4I-T8SLGc1IYsBkzw], [P], s[INITIALIZING], indexUUID [hhVydPb3QvuD110I9V4XyA], reason [after recovery from gateway]\n[2013-10-07 15:35:50,900][DEBUG][cluster.service ] [Peepers] processing [shard-started ([_river][0], node[FDoGx4I-T8SLGc1IYsBkzw], [P], s[INITIALIZING]), reason [after recovery from gateway]]: execute\n[2013-10-07 15:35:50,905][DEBUG][cluster.action.shard ] [Peepers] [_river][0] will apply shard started [_river][0], node[FDoGx4I-T8SLGc1IYsBkzw], [P], s[INITIALIZING], indexUUID [hhVydPb3QvuD110I9V4XyA], reason [after recovery from gateway]\n[2013-10-07 15:35:50,915][DEBUG][cluster.service ] [Peepers] cluster state updated, version [4], source [shard-started ([_river][0], node[FDoGx4I-T8SLGc1IYsBkzw], [P], s[INITIALIZING]), reason [after recovery from gateway]]\n[2013-10-07 15:35:50,917][DEBUG][cluster.service ] [Peepers] publishing cluster state version 4\n[2013-10-07 15:35:50,920][DEBUG][cluster.service ] [Peepers] set local cluster state to version 4\n[2013-10-07 15:35:50,924][DEBUG][river.cluster ] [Peepers] processing [reroute_rivers_node_changed]: execute\n[2013-10-07 15:35:50,936][DEBUG][river.cluster ] [Peepers] processing [reroute_rivers_node_changed]: no change in cluster_state\n[2013-10-07 15:35:50,937][DEBUG][index.shard.service ] [Peepers] [_river][0] state: [POST_RECOVERY]->[STARTED], reason [global state moved to started]\n[2013-10-07 15:35:51,155][DEBUG][cluster.service ] [Peepers] processing [shard-started ([_river][0], node[FDoGx4I-T8SLGc1IYsBkzw], [P], s[INITIALIZING]), reason [after recovery from gateway]]: done applying updated cluster_state (version: 4)\n[2013-10-07 15:35:51,762][DEBUG][cluster.service ] [Peepers] processing [update-mapping [_river][test]]: execute\n[2013-10-07 15:35:51,962][DEBUG][cluster.metadata ] [Peepers] [_river] update_mapping [test](dynamic) with source [{\"test\":{\"index_analyzer\":\"default_index\",\"search_analyzer\":\"default_search\",\"properties\":{\"type\":{\"type\":\"string\"}}}}]\n[2013-10-07 15:35:52,022][DEBUG][cluster.service ] [Peepers] cluster state updated, version [5], source [update-mapping [_river][test]]\n[2013-10-07 15:35:52,032][DEBUG][cluster.service ] [Peepers] publishing cluster state version 5\n[2013-10-07 15:35:52,042][DEBUG][cluster.service ] [Peepers] set local cluster state to version 5\n[2013-10-07 15:35:52,055][DEBUG][river.cluster ] [Peepers] processing [reroute_rivers_node_changed]: execute\n[2013-10-07 15:35:52,233][DEBUG][river.cluster ] [Peepers] processing [reroute_rivers_node_changed]: no change in cluster_state\n[2013-10-07 15:35:52,382][DEBUG][cluster.service ] [Peepers] processing [update-mapping [_river][test]]: done applying updated cluster_state (version: 5)\n", "comments": [ { "body": "I pushed a fix for this, as I said in #4089 . Could you confirm it solves the issue?\n", "created_at": "2013-11-10T20:05:28Z" } ], "number": 3840, "title": "river instance not created after successful creation of _meta document" }
{ "body": "With #3782 we changed the execution order of dynamic mapping updates and index operations. We now first send the mapping update to the master node, and then we index the document. This makes sense but caused issues with rivers as they are started due to the cluster changed event that is triggered on the master node right after the mapping update has been applied, but in order for the river to be started its _meta document needs to be available, which is not the case anymore as the index operation most likely hasn't happened yet. As a result in most of the cases rivers don't get started.\nWhat we want to do is retry a few times if the _meta document wasn't found, so that the river gets started anyway.\n\nCloses #4089, #3840\n", "number": 4143, "review_comments": [], "title": "Make sure rivers get started" }
{ "commits": [ { "message": "Add river creation test" }, { "message": "Schedule retry if the river type is available but the _meta document isn't\n\nWith #3782 we changed the execution order of dynamic mapping updates and index operations. We now first send the mapping update to the master node, and then we index the document. This makes sense but caused issues with rivers as they are started due to the cluster changed event that is triggered on the master node right after the mapping update has been applied, but in order for the river to be started its _meta document needs to be available, which is not the case anymore as the index operation most likely hasn't happened yet. As a result in most of the cases rivers don't get started.\nWhat we want to do is retry a few times if the _meta document wasn't found, so that the river gets started anyway.\n\nCloses #4089, #3840" } ], "files": [ { "diff": "@@ -85,6 +85,13 @@ public void remove(RiverClusterStateListener listener) {\n clusterStateListeners.remove(listener);\n }\n \n+ /**\n+ * The current state.\n+ */\n+ public ClusterState state() {\n+ return clusterService.state();\n+ }\n+\n public void submitStateUpdateTask(final String source, final RiverClusterStateUpdateTask updateTask) {\n if (!lifecycle.started()) {\n return;", "filename": "src/main/java/org/elasticsearch/river/cluster/RiverClusterService.java", "status": "modified" }, { "diff": "@@ -27,6 +27,7 @@\n import org.elasticsearch.client.Client;\n import org.elasticsearch.cluster.ClusterChangedEvent;\n import org.elasticsearch.cluster.ClusterService;\n+import org.elasticsearch.cluster.ClusterState;\n import org.elasticsearch.cluster.ClusterStateListener;\n import org.elasticsearch.cluster.block.ClusterBlockException;\n import org.elasticsearch.cluster.metadata.IndexMetaData;\n@@ -35,6 +36,9 @@\n import org.elasticsearch.common.component.AbstractLifecycleComponent;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.common.unit.TimeValue;\n+import org.elasticsearch.common.util.concurrent.CountDown;\n+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;\n import org.elasticsearch.common.xcontent.support.XContentMapValues;\n import org.elasticsearch.index.shard.IllegalIndexShardStateException;\n import org.elasticsearch.indices.IndexMissingException;\n@@ -44,6 +48,7 @@\n import org.elasticsearch.river.cluster.RiverClusterState;\n import org.elasticsearch.river.cluster.RiverClusterStateUpdateTask;\n import org.elasticsearch.river.cluster.RiverNodeHelper;\n+import org.elasticsearch.threadpool.ThreadPool;\n \n import java.util.Iterator;\n import java.util.List;\n@@ -54,18 +59,24 @@\n */\n public class RiversRouter extends AbstractLifecycleComponent<RiversRouter> implements ClusterStateListener {\n \n+ private static final TimeValue RIVER_START_RETRY_INTERVAL = TimeValue.timeValueMillis(1000);\n+ private static final int RIVER_START_MAX_RETRIES = 5;\n+\n private final String riverIndexName;\n \n private final Client client;\n \n private final RiverClusterService riverClusterService;\n \n+ private final ThreadPool threadPool;\n+\n @Inject\n- public RiversRouter(Settings settings, Client client, ClusterService clusterService, RiverClusterService riverClusterService) {\n+ public RiversRouter(Settings settings, Client client, ClusterService clusterService, RiverClusterService riverClusterService, ThreadPool threadPool) {\n super(settings);\n this.riverIndexName = RiverIndexName.Conf.indexName(settings);\n this.riverClusterService = riverClusterService;\n this.client = client;\n+ this.threadPool = threadPool;\n clusterService.add(this);\n }\n \n@@ -86,113 +97,142 @@ public void clusterChanged(final ClusterChangedEvent event) {\n if (!event.localNodeMaster()) {\n return;\n }\n- riverClusterService.submitStateUpdateTask(\"reroute_rivers_node_changed\", new RiverClusterStateUpdateTask() {\n+ final String source = \"reroute_rivers_node_changed\";\n+ //we'll try again a few times if we don't find the river _meta document while the type is there\n+ final CountDown countDown = new CountDown(RIVER_START_MAX_RETRIES);\n+ riverClusterService.submitStateUpdateTask(source, new RiverClusterStateUpdateTask() {\n @Override\n public RiverClusterState execute(RiverClusterState currentState) {\n- if (!event.state().metaData().hasIndex(riverIndexName)) {\n- // if there are routings, publish an empty one (so it will be deleted on nodes), otherwise, return the same state\n- if (!currentState.routing().isEmpty()) {\n- return RiverClusterState.builder().state(currentState).routing(RiversRouting.builder()).build();\n- }\n- return currentState;\n- }\n+ return updateRiverClusterState(source, currentState, event.state(), countDown);\n+ }\n+ });\n+ }\n+\n+ protected RiverClusterState updateRiverClusterState(final String source, final RiverClusterState currentState,\n+ ClusterState newClusterState, final CountDown countDown) {\n+ if (!newClusterState.metaData().hasIndex(riverIndexName)) {\n+ // if there are routings, publish an empty one (so it will be deleted on nodes), otherwise, return the same state\n+ if (!currentState.routing().isEmpty()) {\n+ return RiverClusterState.builder().state(currentState).routing(RiversRouting.builder()).build();\n+ }\n+ return currentState;\n+ }\n \n- RiversRouting.Builder routingBuilder = RiversRouting.builder().routing(currentState.routing());\n- boolean dirty = false;\n-\n- IndexMetaData indexMetaData = event.state().metaData().index(riverIndexName);\n- // go over and create new river routing (with no node) for new types (rivers names)\n- for (MappingMetaData mappingMd : indexMetaData.mappings().values()) {\n- String mappingType = mappingMd.type(); // mapping type is the name of the river\n- if (!currentState.routing().hasRiverByName(mappingType)) {\n- // no river, we need to add it to the routing with no node allocation\n- try {\n- GetResponse getResponse = client.prepareGet(riverIndexName, mappingType, \"_meta\").execute().actionGet();\n- if (getResponse.isExists()) {\n- String riverType = XContentMapValues.nodeStringValue(getResponse.getSourceAsMap().get(\"type\"), null);\n- if (riverType == null) {\n- logger.warn(\"no river type provided for [{}], ignoring...\", riverIndexName);\n- } else {\n- routingBuilder.put(new RiverRouting(new RiverName(riverType, mappingType), null));\n- dirty = true;\n- }\n+ RiversRouting.Builder routingBuilder = RiversRouting.builder().routing(currentState.routing());\n+ boolean dirty = false;\n+\n+ IndexMetaData indexMetaData = newClusterState.metaData().index(riverIndexName);\n+ // go over and create new river routing (with no node) for new types (rivers names)\n+ for (MappingMetaData mappingMd : indexMetaData.mappings().values()) {\n+ String mappingType = mappingMd.type(); // mapping type is the name of the river\n+ if (!currentState.routing().hasRiverByName(mappingType)) {\n+ // no river, we need to add it to the routing with no node allocation\n+ try {\n+ GetResponse getResponse = client.prepareGet(riverIndexName, mappingType, \"_meta\").setPreference(\"_primary\").get();\n+ if (!getResponse.isExists()) {\n+ if (countDown.countDown()) {\n+ logger.warn(\"no river _meta document found after {} attempts\", RIVER_START_MAX_RETRIES);\n+ } else {\n+ logger.info(\"no river _meta document found, retrying in {} ms\", RIVER_START_RETRY_INTERVAL.millis());\n+ try {\n+ threadPool.schedule(RIVER_START_RETRY_INTERVAL, ThreadPool.Names.GENERIC, new Runnable() {\n+ @Override\n+ public void run() {\n+ riverClusterService.submitStateUpdateTask(source, new RiverClusterStateUpdateTask() {\n+ @Override\n+ public RiverClusterState execute(RiverClusterState currentState) {\n+ return updateRiverClusterState(source, currentState, riverClusterService.state(), countDown);\n+ }\n+ });\n+ }\n+ });\n+ } catch(EsRejectedExecutionException ex) {\n+ logger.debug(\"Couldn't schedule river start retry, node might be shutting down\", ex);\n }\n- } catch (NoShardAvailableActionException e) {\n- // ignore, we will get it next time...\n- } catch (ClusterBlockException e) {\n- // ignore, we will get it next time\n- } catch (IndexMissingException e) {\n- // ignore, we will get it next time\n- } catch (IllegalIndexShardStateException e) {\n- // ignore, we will get it next time\n- } catch (Exception e) {\n- logger.warn(\"failed to get/parse _meta for [{}]\", e, mappingType);\n }\n+ return currentState;\n }\n- }\n- // now, remove routings that were deleted\n- // also, apply nodes that were removed and rivers were running on\n- for (RiverRouting routing : currentState.routing()) {\n- if (!indexMetaData.mappings().containsKey(routing.riverName().name())) {\n- routingBuilder.remove(routing);\n- dirty = true;\n- } else if (routing.node() != null && !event.state().nodes().nodeExists(routing.node().id())) {\n- routingBuilder.remove(routing);\n- routingBuilder.put(new RiverRouting(routing.riverName(), null));\n+ String riverType = XContentMapValues.nodeStringValue(getResponse.getSourceAsMap().get(\"type\"), null);\n+ if (riverType == null) {\n+ logger.warn(\"no river type provided for [{}], ignoring...\", riverIndexName);\n+ } else {\n+ routingBuilder.put(new RiverRouting(new RiverName(riverType, mappingType), null));\n dirty = true;\n }\n+ } catch (NoShardAvailableActionException e) {\n+ // ignore, we will get it next time...\n+ } catch (ClusterBlockException e) {\n+ // ignore, we will get it next time\n+ } catch (IndexMissingException e) {\n+ // ignore, we will get it next time\n+ } catch (IllegalIndexShardStateException e) {\n+ // ignore, we will get it next time\n+ } catch (Exception e) {\n+ logger.warn(\"failed to get/parse _meta for [{}]\", e, mappingType);\n }\n+ }\n+ }\n+ // now, remove routings that were deleted\n+ // also, apply nodes that were removed and rivers were running on\n+ for (RiverRouting routing : currentState.routing()) {\n+ if (!indexMetaData.mappings().containsKey(routing.riverName().name())) {\n+ routingBuilder.remove(routing);\n+ dirty = true;\n+ } else if (routing.node() != null && !newClusterState.nodes().nodeExists(routing.node().id())) {\n+ routingBuilder.remove(routing);\n+ routingBuilder.put(new RiverRouting(routing.riverName(), null));\n+ dirty = true;\n+ }\n+ }\n \n- // build a list from nodes to rivers\n- Map<DiscoveryNode, List<RiverRouting>> nodesToRivers = Maps.newHashMap();\n+ // build a list from nodes to rivers\n+ Map<DiscoveryNode, List<RiverRouting>> nodesToRivers = Maps.newHashMap();\n \n- for (DiscoveryNode node : event.state().nodes()) {\n- if (RiverNodeHelper.isRiverNode(node)) {\n- nodesToRivers.put(node, Lists.<RiverRouting>newArrayList());\n- }\n- }\n+ for (DiscoveryNode node : newClusterState.nodes()) {\n+ if (RiverNodeHelper.isRiverNode(node)) {\n+ nodesToRivers.put(node, Lists.<RiverRouting>newArrayList());\n+ }\n+ }\n \n- List<RiverRouting> unassigned = Lists.newArrayList();\n- for (RiverRouting routing : routingBuilder.build()) {\n- if (routing.node() == null) {\n- unassigned.add(routing);\n- } else {\n- List<RiverRouting> l = nodesToRivers.get(routing.node());\n- if (l == null) {\n- l = Lists.newArrayList();\n- nodesToRivers.put(routing.node(), l);\n- }\n- l.add(routing);\n- }\n+ List<RiverRouting> unassigned = Lists.newArrayList();\n+ for (RiverRouting routing : routingBuilder.build()) {\n+ if (routing.node() == null) {\n+ unassigned.add(routing);\n+ } else {\n+ List<RiverRouting> l = nodesToRivers.get(routing.node());\n+ if (l == null) {\n+ l = Lists.newArrayList();\n+ nodesToRivers.put(routing.node(), l);\n }\n- for (Iterator<RiverRouting> it = unassigned.iterator(); it.hasNext(); ) {\n- RiverRouting routing = it.next();\n- DiscoveryNode smallest = null;\n- int smallestSize = Integer.MAX_VALUE;\n- for (Map.Entry<DiscoveryNode, List<RiverRouting>> entry : nodesToRivers.entrySet()) {\n- if (RiverNodeHelper.isRiverNode(entry.getKey(), routing.riverName())) {\n- if (entry.getValue().size() < smallestSize) {\n- smallestSize = entry.getValue().size();\n- smallest = entry.getKey();\n- }\n- }\n- }\n- if (smallest != null) {\n- dirty = true;\n- it.remove();\n- routing.node(smallest);\n- nodesToRivers.get(smallest).add(routing);\n+ l.add(routing);\n+ }\n+ }\n+ for (Iterator<RiverRouting> it = unassigned.iterator(); it.hasNext(); ) {\n+ RiverRouting routing = it.next();\n+ DiscoveryNode smallest = null;\n+ int smallestSize = Integer.MAX_VALUE;\n+ for (Map.Entry<DiscoveryNode, List<RiverRouting>> entry : nodesToRivers.entrySet()) {\n+ if (RiverNodeHelper.isRiverNode(entry.getKey(), routing.riverName())) {\n+ if (entry.getValue().size() < smallestSize) {\n+ smallestSize = entry.getValue().size();\n+ smallest = entry.getKey();\n }\n }\n+ }\n+ if (smallest != null) {\n+ dirty = true;\n+ it.remove();\n+ routing.node(smallest);\n+ nodesToRivers.get(smallest).add(routing);\n+ }\n+ }\n \n \n- // add relocation logic...\n+ // add relocation logic...\n \n- if (dirty) {\n- return RiverClusterState.builder().state(currentState).routing(routingBuilder).build();\n- }\n- return currentState;\n- }\n- });\n+ if (dirty) {\n+ return RiverClusterState.builder().state(currentState).routing(routingBuilder).build();\n+ }\n+ return currentState;\n }\n }", "filename": "src/main/java/org/elasticsearch/river/routing/RiversRouter.java", "status": "modified" }, { "diff": "@@ -0,0 +1,53 @@\n+/*\n+ * Licensed to ElasticSearch and Shay Banon under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. ElasticSearch licenses this\n+ * file to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.river;\n+\n+import com.google.common.base.Predicate;\n+import org.elasticsearch.action.get.GetResponse;\n+import org.elasticsearch.action.index.IndexResponse;\n+import org.elasticsearch.test.ElasticsearchIntegrationTest;\n+import org.junit.Test;\n+\n+import java.util.concurrent.TimeUnit;\n+\n+import static org.elasticsearch.test.ElasticsearchIntegrationTest.*;\n+import static org.hamcrest.Matchers.equalTo;\n+\n+@ClusterScope(scope = Scope.TEST)\n+public class RiverTests extends ElasticsearchIntegrationTest {\n+\n+ @Test\n+ public void testRiverStart() throws Exception {\n+ final String riverName = \"test_river\";\n+ logger.info(\"--> creating river [{}]\", riverName);\n+ IndexResponse indexResponse = client().prepareIndex(RiverIndexName.Conf.DEFAULT_INDEX_NAME, riverName, \"_meta\")\n+ .setSource(\"type\", TestRiverModule.class.getCanonicalName()).get();\n+ assertTrue(indexResponse.isCreated());\n+\n+ logger.info(\"--> checking that river [{}] was created\", riverName);\n+ assertThat(awaitBusy(new Predicate<Object>() {\n+ public boolean apply(Object obj) {\n+ GetResponse response = client().prepareGet(RiverIndexName.Conf.DEFAULT_INDEX_NAME, riverName, \"_status\").get();\n+ return response.isExists();\n+ }\n+ }, 5, TimeUnit.SECONDS), equalTo(true));\n+\n+ }\n+}", "filename": "src/test/java/org/elasticsearch/river/RiverTests.java", "status": "added" }, { "diff": "@@ -0,0 +1,53 @@\n+/*\n+ * Licensed to ElasticSearch and Shay Banon under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. ElasticSearch licenses this\n+ * file to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.river;\n+\n+import org.elasticsearch.client.Client;\n+import org.elasticsearch.common.inject.Inject;\n+\n+/**\n+ *\n+ */\n+public class TestRiver extends AbstractRiverComponent implements River {\n+\n+ private final String riverIndexName;\n+ private final Client client;\n+\n+ @Inject\n+ public TestRiver(RiverName riverName, @RiverIndexName String riverIndexName, RiverSettings settings, Client client) {\n+ super(riverName, settings);\n+ logger.info(\"create\");\n+ this.client = client;\n+ this.riverIndexName = riverIndexName;\n+ }\n+\n+ @Override\n+ public void start() {\n+ logger.info(\"start\");\n+ if (!client.prepareIndex(riverIndexName, riverName.getName(), \"_status\").setSource(\"created\", true).get().isCreated()) {\n+ logger.warn(\"_status record wasn't created\");\n+ }\n+ }\n+\n+ @Override\n+ public void close() {\n+ logger.info(\"close\");\n+ }\n+}", "filename": "src/test/java/org/elasticsearch/river/TestRiver.java", "status": "added" }, { "diff": "@@ -0,0 +1,29 @@\n+/*\n+ * Licensed to ElasticSearch and Shay Banon under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. ElasticSearch licenses this\n+ * file to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.river;\n+\n+import org.elasticsearch.common.inject.AbstractModule;\n+\n+public class TestRiverModule extends AbstractModule {\n+ @Override\n+ protected void configure() {\n+ bind(River.class).to(TestRiver.class).asEagerSingleton();\n+ }\n+}", "filename": "src/test/java/org/elasticsearch/river/TestRiverModule.java", "status": "added" } ] }
{ "body": "If the number of results exceed a certain limit, has_child query throws a NullPointerException. \n\n```\ncurl -x '' -s -XPOST \"http://localhost:9200/myindex/vendor/_search?from=0&size=20&pretty=true&routing=0\" -d '{\n \"query\" : {\n \"has_child\" : {\n \"query\" : {\n \"query_string\" : { \"query\" : \"signed_date:[now-120d TO now-90d]\" }\n },\n \"type\" : \"transaction\"\n }\n }\n}\n'\n\n[2013-11-08 15:01:41,825][DEBUG][action.search.type ] [Phage] [myindex_20131108][1], node[fuBzjMWDQgmr_qm0cVLIog], [P], s[STARTED]: Failed to execute [org.elasticsearch.action.search.SearchRequest@23b13dc6]\norg.elasticsearch.transport.RemoteTransportException: [Ch'od][inet[/192.168.175.128:9301]][search/phase/query+fetch]\nCaused by: org.elasticsearch.search.query.QueryPhaseExecutionException: [myindex_20131108][1]: query[filtered(child_filter[transaction/vendor](filtered(signed_date:[1373572895448 TO 1376164895448])->cache(_type:transaction)))->cache(_type:vendor)],from[0],size[20]: Query Failed [Failed to execute main query]\n at org.elasticsearch.search.query.QueryPhase.execute(QueryPhase.java:123)\n at org.elasticsearch.search.SearchService.executeFetchPhase(SearchService.java:306)\n at org.elasticsearch.search.action.SearchServiceTransportAction$SearchQueryFetchTransportHandler.messageReceived(SearchServiceTransportAction.java:686)\n at org.elasticsearch.search.action.SearchServiceTransportAction$SearchQueryFetchTransportHandler.messageReceived(SearchServiceTransportAction.java:1)\n at org.elasticsearch.transport.netty.MessageChannelHandler$RequestHandler.run(MessageChannelHandler.java:270)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:744)\nCaused by: java.lang.NullPointerException\n at org.elasticsearch.common.lucene.search.ApplyAcceptedDocsFilter.getDocIdSet(ApplyAcceptedDocsFilter.java:45)\n at org.elasticsearch.index.search.child.ChildrenConstantScoreQuery$ParentWeight.scorer(ChildrenConstantScoreQuery.java:178)\n at org.apache.lucene.search.FilteredQuery$RandomAccessFilterStrategy.filteredScorer(FilteredQuery.java:533)\n at org.apache.lucene.search.FilteredQuery$1.scorer(FilteredQuery.java:133)\n at org.apache.lucene.search.IndexSearcher.search(IndexSearcher.java:624)\n at org.elasticsearch.search.internal.ContextIndexSearcher.search(ContextIndexSearcher.java:167)\n at org.apache.lucene.search.IndexSearcher.search(IndexSearcher.java:491)\n at org.apache.lucene.search.IndexSearcher.search(IndexSearcher.java:448)\n at org.apache.lucene.search.IndexSearcher.search(IndexSearcher.java:281)\n at org.apache.lucene.search.IndexSearcher.search(IndexSearcher.java:269)\n at org.elasticsearch.search.query.QueryPhase.execute(QueryPhase.java:119)\n ... 7 more\n[2013-11-08 15:01:41,827][DEBUG][action.search.type ] [Phage] All shards failed for phase: [query_fetch]\n```\n\nI debugged the code and found the issue happens due to class ChildrenConstantScoreQuery.java, line #115 is passing null shortCircuitFilter. This happens when remaining value > 8192 so it's not initialized \n\n```\n Filter shortCircuitFilter = null;\n if (remaining == 1) {\n BytesRef id = collectedUids.v().iterator().next().value.toBytesRef();\n shortCircuitFilter = new TermFilter(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)));\n } else if (remaining <= shortCircuitParentDocSet) {\n shortCircuitFilter = new ParentIdsFilter(parentType, collectedUids.v().keys, collectedUids.v().allocated);\n }\n\n ParentWeight parentWeight = new ParentWeight(parentFilter, shortCircuitFilter, searchContext, collectedUids);\n```\n\nThis is bug introduced after 0.90.1 as the query was running fine in past. Thanks!\n", "comments": [ { "body": "@martijnvg can you look into this? \n", "created_at": "2013-11-08T22:44:14Z" }, { "body": "Thanks for reporting this issue @ajhalani! We will fix this soon.\n", "created_at": "2013-11-09T21:21:34Z" }, { "body": "You got it, thanks for getting to it quickly! \n", "created_at": "2013-11-10T05:03:51Z" } ], "number": 4135, "title": "NPE for has_child query if number of results exceed certain limit" }
{ "body": "Fixed NPE if matched parent docs is higher than short_circuit_cutoff.\nCloses #4135\n", "number": 4139, "review_comments": [], "title": "Fix NPE in has_child filter and query." }
{ "commits": [ { "message": "Fixed NPE if matched parent docs is higher than short_circuit_cutoff.\nCloses #4135" } ], "files": [ { "diff": "@@ -132,7 +132,7 @@ public ParentWeight(Filter parentFilter, Filter shortCircuitFilter, SearchContex\n if (applyAcceptedDocs) {\n // In case filters are cached, we need to apply deletes, since filters from filter cache didn't apply deletes\n this.parentFilter = new ApplyAcceptedDocsFilter(parentFilter);\n- this.shortCircuitFilter = new ApplyAcceptedDocsFilter(shortCircuitFilter);\n+ this.shortCircuitFilter = shortCircuitFilter != null ? new ApplyAcceptedDocsFilter(shortCircuitFilter) : null;\n } else {\n this.parentFilter = parentFilter;\n this.shortCircuitFilter = shortCircuitFilter;", "filename": "src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java", "status": "modified" }, { "diff": "@@ -34,8 +34,7 @@\n import org.elasticsearch.common.lucene.search.function.CombineFunction;\n import org.elasticsearch.common.settings.ImmutableSettings;\n import org.elasticsearch.index.mapper.MergeMappingException;\n-import org.elasticsearch.index.query.QueryBuilder;\n-import org.elasticsearch.index.query.QueryBuilders;\n+import org.elasticsearch.index.query.*;\n import org.elasticsearch.search.facet.terms.TermsFacet;\n import org.elasticsearch.search.sort.SortBuilders;\n import org.elasticsearch.search.sort.SortOrder;\n@@ -65,7 +64,6 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest {\n \n @Test\n public void multiLevelChild() throws Exception {\n-\n client().admin().indices().prepareCreate(\"test\")\n .setSettings(ImmutableSettings.settingsBuilder().put(\"index.number_of_shards\", 1).put(\"index.number_of_replicas\", 0))\n .execute().actionGet();\n@@ -1957,4 +1955,22 @@ public void run() {\n }\n }\n \n+ private static HasChildFilterBuilder hasChildFilter(String type, QueryBuilder queryBuilder) {\n+ HasChildFilterBuilder hasChildFilterBuilder = FilterBuilders.hasChildFilter(type, queryBuilder);\n+ hasChildFilterBuilder.setShortCircuitCutoff(randomInt(10));\n+ return hasChildFilterBuilder;\n+ }\n+\n+ private static HasChildFilterBuilder hasChildFilter(String type, FilterBuilder filterBuilder) {\n+ HasChildFilterBuilder hasChildFilterBuilder = FilterBuilders.hasChildFilter(type, filterBuilder);\n+ hasChildFilterBuilder.setShortCircuitCutoff(randomInt(10));\n+ return hasChildFilterBuilder;\n+ }\n+\n+ private static HasChildQueryBuilder hasChildQuery(String type, QueryBuilder queryBuilder) {\n+ HasChildQueryBuilder hasChildQueryBuilder = QueryBuilders.hasChildQuery(type, queryBuilder);\n+ hasChildQueryBuilder.setShortCircuitCutoff(randomInt(10));\n+ return hasChildQueryBuilder;\n+ }\n+\n }", "filename": "src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java", "status": "modified" } ] }
{ "body": "Although it might not make much sense to percolate a document containing a completion field, that's what you end up doing if you percolate while indexing, and your mapping contains a completion field. When adding the completion field to the memory index a NullPointerException is thrown. \n\nThis happens with the 0.90 branch. Happens also with master, at least percolating an existing document.\n\nHere is the recreation:\n\n```\ncurl -XPUT localhost:9200/hotels -d '\n{\n \"mappings\": {\n \"hotel\" : {\n \"properties\" : {\n \"name\" : { \"type\" : \"string\" },\n \"city\" : { \"type\" : \"string\" },\n \"name_suggest\" : {\n \"type\" : \"completion\"\n }\n }\n }\n }\n}'\n\ncurl -XGET localhost:9200/hotels/hotel/_percolate -d '{\n \"doc\" : {\n \"name\" : \"Mercure Hotel Munich\",\n \"city\" : \"Munich\",\n \"name_suggest\" : \"Mercure Hotel Munich\"\n }\n}\n'\n```\n\nHere is the stacktrace:\n\n```\njava.lang.RuntimeException: java.lang.NullPointerException\n at org.apache.lucene.index.memory.MemoryIndex.addField(MemoryIndex.java:463)\n at org.apache.lucene.index.memory.MemoryIndex.addField(MemoryIndex.java:370)\n at org.elasticsearch.index.percolator.PercolatorExecutor.percolate(PercolatorExecutor.java:450)\n at org.elasticsearch.index.percolator.PercolatorExecutor.percolate(PercolatorExecutor.java:422)\n at org.elasticsearch.index.percolator.PercolatorService.percolate(PercolatorService.java:111)\n at org.elasticsearch.action.percolate.TransportPercolateAction.shardOperation(TransportPercolateAction.java:93)\n at org.elasticsearch.action.percolate.TransportPercolateAction.shardOperation(TransportPercolateAction.java:41)\n at org.elasticsearch.action.support.single.custom.TransportSingleCustomOperationAction$AsyncSingleAction$2.run(TransportSingleCustomOperationAction.java:175)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:724)\nCaused by: java.lang.NullPointerException\n at org.apache.lucene.util.BytesRefHash.add(BytesRefHash.java:274)\n at org.apache.lucene.index.memory.MemoryIndex.addField(MemoryIndex.java:437)\n ... 10 more\n```\n", "comments": [], "number": 4028, "title": "NPE when percolating a document that contains a completion field" }
{ "body": "This adds a delegate to CharTermAttributeImpl to be compatible\nwith the Percolator that needs a CharTermAttribute. Yet compared\nto CharTermAttributImpl we only fill the BytesRef with UTF-8 since\nwe already have it and only if we need to convert to UTF-32 we do it.\n\nCloses #4028\n", "number": 4129, "review_comments": [], "title": "Lazily fill CharTermAttribute if needed in CompletionTokenStream" }
{ "commits": [ { "message": "Lazily fill CharTermAttribute if needed in CompletionTokenStream\n\nThis adds a delegate to CharTermAttributeImpl to be compatible\nwith the Percolator that needs a CharTermAttribute. Yet compared\nto CharTermAttributImpl we only fill the BytesRef with UTF-8 since\nwe already have it and only if we need to convert to UTF-16 we do it.\n\nCloses #4028" } ], "files": [ { "diff": "@@ -19,12 +19,8 @@\n package org.elasticsearch.search.suggest.completion;\n \n import org.apache.lucene.analysis.TokenStream;\n-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;\n-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;\n-import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;\n-import org.apache.lucene.util.AttributeImpl;\n-import org.apache.lucene.util.BytesRef;\n-import org.apache.lucene.util.IntsRef;\n+import org.apache.lucene.analysis.tokenattributes.*;\n+import org.apache.lucene.util.*;\n import org.apache.lucene.util.fst.Util;\n \n import java.io.IOException;\n@@ -36,19 +32,22 @@\n */\n public final class CompletionTokenStream extends TokenStream {\n \n- private final PayloadAttribute payloadAttr = addAttribute(PayloadAttribute.class);;\n+ private final PayloadAttribute payloadAttr = addAttribute(PayloadAttribute.class);\n private final PositionIncrementAttribute posAttr = addAttribute(PositionIncrementAttribute.class);\n- private final ByteTermAttribute bytesAtt = addAttribute(ByteTermAttribute.class);\n+ private final ByteTermAttribute bytesAtt = addAttribute(ByteTermAttribute.class);;\n+\n \n private final TokenStream input;\n private BytesRef payload;\n private Iterator<IntsRef> finiteStrings;\n private ToFiniteStrings toFiniteStrings;\n private int posInc = -1;\n private static final int MAX_PATHS = 256;\n- private final BytesRef scratch = new BytesRef();\n+ private CharTermAttribute charTermAttribute;\n \n public CompletionTokenStream(TokenStream input, BytesRef payload, ToFiniteStrings toFiniteStrings) throws IOException {\n+ // Don't call the super(input) ctor - this is a true delegate and has a new attribute source since we consume\n+ // the input stream entirely in toFiniteStrings(input)\n this.input = input;\n this.payload = payload;\n this.toFiniteStrings = toFiniteStrings;\n@@ -74,8 +73,11 @@ public boolean incrementToken() throws IOException {\n * produced. Multi Fields have the same surface form and therefore sum up\n */\n posInc = 0;\n- Util.toBytesRef(finiteStrings.next(), scratch); // now we have UTF-8\n- bytesAtt.setBytesRef(scratch);\n+ Util.toBytesRef(finiteStrings.next(), bytesAtt.getBytesRef()); // now we have UTF-8\n+ if (charTermAttribute != null) {\n+ charTermAttribute.setLength(0);\n+ charTermAttribute.append(bytesAtt.toUTF16());\n+ }\n if (payload != null) {\n payloadAttr.setPayload(this.payload);\n }\n@@ -107,16 +109,23 @@ public static interface ToFiniteStrings {\n @Override\n public void reset() throws IOException {\n super.reset();\n+ if (hasAttribute(CharTermAttribute.class)) {\n+ // we only create this if we really need it to safe the UTF-8 to UTF-16 conversion\n+ charTermAttribute = getAttribute(CharTermAttribute.class);\n+ }\n finiteStrings = null;\n posInc = -1;\n }\n \n public interface ByteTermAttribute extends TermToBytesRefAttribute {\n- public void setBytesRef(BytesRef bytes);\n+ // marker interface\n+\n+ public CharSequence toUTF16();\n }\n \n public static final class ByteTermAttributeImpl extends AttributeImpl implements ByteTermAttribute, TermToBytesRefAttribute {\n- private BytesRef bytes;\n+ private final BytesRef bytes = new BytesRef();\n+ private CharsRef charsRef;\n \n @Override\n public int fillBytesRef() {\n@@ -128,19 +137,24 @@ public BytesRef getBytesRef() {\n return bytes;\n }\n \n- @Override\n- public void setBytesRef(BytesRef bytes) {\n- this.bytes = bytes;\n- }\n-\n @Override\n public void clear() {\n+ bytes.length = 0;\n }\n \n @Override\n public void copyTo(AttributeImpl target) {\n ByteTermAttributeImpl other = (ByteTermAttributeImpl) target;\n- other.bytes = bytes;\n+ other.bytes.copyBytes(bytes);\n+ }\n+\n+ @Override\n+ public CharSequence toUTF16() {\n+ if (charsRef == null) {\n+ charsRef = new CharsRef();\n+ }\n+ UnicodeUtil.UTF8toUTF16(bytes, charsRef);\n+ return charsRef;\n }\n }\n }\n\\ No newline at end of file", "filename": "src/main/java/org/elasticsearch/search/suggest/completion/CompletionTokenStream.java", "status": "modified" }, { "diff": "@@ -28,14 +28,17 @@\n import org.elasticsearch.action.admin.indices.segments.ShardSegments;\n import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;\n import org.elasticsearch.action.index.IndexRequestBuilder;\n+import org.elasticsearch.action.percolate.PercolateResponse;\n import org.elasticsearch.action.search.SearchPhaseExecutionException;\n import org.elasticsearch.action.suggest.SuggestResponse;\n+import org.elasticsearch.client.Requests;\n import org.elasticsearch.common.settings.ImmutableSettings;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.index.mapper.MapperException;\n import org.elasticsearch.index.mapper.MapperParsingException;\n import org.elasticsearch.index.mapper.core.CompletionFieldMapper;\n+import org.elasticsearch.percolator.PercolatorService;\n import org.elasticsearch.search.sort.FieldSortBuilder;\n import org.elasticsearch.search.suggest.completion.CompletionStats;\n import org.elasticsearch.search.suggest.completion.CompletionSuggestion;\n@@ -56,6 +59,7 @@\n import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;\n import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;\n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;\n import static org.hamcrest.Matchers.*;\n \n@@ -89,6 +93,36 @@ public void testSimple() throws Exception {\n assertSuggestionsNotInOrder(\"t\", \"The Prodigy\", \"Turbonegro\", \"Turbonegro Get it on\", \"The Prodigy Firestarter\");\n }\n \n+ @Test\n+ public void testSuggestFieldWithPercolateApi() throws Exception {\n+ createIndexAndMapping();\n+ String[][] input = {{\"Foo Fighters\"}, {\"Foo Fighters\"}, {\"Foo Fighters\"}, {\"Foo Fighters\"},\n+ {\"Generator\", \"Foo Fighters Generator\"}, {\"Learn to Fly\", \"Foo Fighters Learn to Fly\"},\n+ {\"The Prodigy\"}, {\"The Prodigy\"}, {\"The Prodigy\"}, {\"Firestarter\", \"The Prodigy Firestarter\"},\n+ {\"Turbonegro\"}, {\"Turbonegro\"}, {\"Get it on\", \"Turbonegro Get it on\"}}; // work with frequencies\n+ for (int i = 0; i < input.length; i++) {\n+ client().prepareIndex(INDEX, TYPE, \"\" + i)\n+ .setSource(jsonBuilder()\n+ .startObject().startObject(FIELD)\n+ .startArray(\"input\").value(input[i]).endArray()\n+ .endObject()\n+ .endObject()\n+ )\n+ .execute().actionGet();\n+ }\n+\n+ client().prepareIndex(INDEX, PercolatorService.TYPE_NAME, \"4\")\n+ .setSource(jsonBuilder().startObject().field(\"query\", matchAllQuery()).endObject())\n+ .execute().actionGet();\n+\n+ refresh();\n+\n+ PercolateResponse response = client().preparePercolate().setIndices(INDEX).setDocumentType(TYPE)\n+ .setGetRequest(Requests.getRequest(INDEX).type(TYPE).id(\"1\"))\n+ .execute().actionGet();\n+ assertThat(response.getCount(), equalTo(1l));\n+ }\n+\n @Test\n public void testBasicPrefixSuggestion() throws Exception {\n createIndexAndMapping();", "filename": "src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java", "status": "modified" }, { "diff": "@@ -25,10 +25,7 @@\n import org.apache.lucene.analysis.synonym.SynonymFilter;\n import org.apache.lucene.analysis.synonym.SynonymMap;\n import org.apache.lucene.analysis.synonym.SynonymMap.Builder;\n-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;\n-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;\n-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;\n-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;\n+import org.apache.lucene.analysis.tokenattributes.*;\n import org.apache.lucene.search.suggest.analyzing.XAnalyzingSuggester;\n import org.apache.lucene.util.BytesRef;\n import org.apache.lucene.util.CharsRef;\n@@ -42,6 +39,8 @@\n import java.io.StringReader;\n import java.util.Set;\n \n+import static org.hamcrest.Matchers.equalTo;\n+\n public class CompletionTokenStreamTest extends ElasticsearchTokenStreamTestCase {\n \n final XAnalyzingSuggester suggester = new XAnalyzingSuggester(new SimpleAnalyzer(TEST_VERSION_CURRENT));\n@@ -143,12 +142,36 @@ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {\n suggestTokenStream.close();\n \n }\n- \n+\n+ @Test\n+ public void testSuggestTokenFilterProperlyDelegateInputStream() throws Exception {\n+ TokenStream tokenStream = new MockTokenizer(new StringReader(\"mykeyword\"), MockTokenizer.WHITESPACE, true);\n+ BytesRef payload = new BytesRef(\"Surface keyword|friggin payload|10\");\n+ TokenStream suggestTokenStream = new ByteTermAttrToCharTermAttrFilter(new CompletionTokenStream(tokenStream, payload, new CompletionTokenStream.ToFiniteStrings() {\n+ @Override\n+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {\n+ return suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream);\n+ }\n+ }));\n+ TermToBytesRefAttribute termAtt = suggestTokenStream.getAttribute(TermToBytesRefAttribute.class);\n+ BytesRef ref = termAtt.getBytesRef();\n+ assertNotNull(ref);\n+ suggestTokenStream.reset();\n+\n+ while (suggestTokenStream.incrementToken()) {\n+ termAtt.fillBytesRef();\n+ assertThat(ref.utf8ToString(), equalTo(\"mykeyword\"));\n+ }\n+ suggestTokenStream.end();\n+ suggestTokenStream.close();\n+ }\n+\n+\n public final static class ByteTermAttrToCharTermAttrFilter extends TokenFilter {\n- private CharTermAttribute attr = addAttribute(CharTermAttribute.class);\n private ByteTermAttribute byteAttr = addAttribute(ByteTermAttribute.class);\n private PayloadAttribute payload = addAttribute(PayloadAttribute.class);\n private TypeAttribute type = addAttribute(TypeAttribute.class);\n+ private CharTermAttribute charTermAttribute = addAttribute(CharTermAttribute.class);\n protected ByteTermAttrToCharTermAttrFilter(TokenStream input) {\n super(input);\n }\n@@ -157,13 +180,12 @@ protected ByteTermAttrToCharTermAttrFilter(TokenStream input) {\n public boolean incrementToken() throws IOException {\n if (input.incrementToken()) {\n BytesRef bytesRef = byteAttr.getBytesRef();\n- attr.append(bytesRef.utf8ToString());\n // we move them over so we can assert them more easily in the tests\n- type.setType(payload.getPayload().utf8ToString()); \n+ type.setType(payload.getPayload().utf8ToString());\n return true;\n }\n return false;\n }\n- \n+\n }\n }", "filename": "src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java", "status": "modified" } ] }
{ "body": "A used in the forum is trying to apply a nested filter to just one index, while querying more than one index, some of which don't have the nested mapping.\n\nThe query is complaining about a missing nested mapping, but on the indices to which the query should not apply.\n\nCreate two indices: `test_1` with a nested mapping, and `test_2` without a nested mapping:\n\n```\ncurl -XPUT 'http://127.0.0.1:9200/test_1/?pretty=1' -d '\n{\n \"mappings\" : {\n \"foo\" : {\n \"properties\" : {\n \"authors\" : {\n \"type\" : \"nested\",\n \"properties\" : {\n \"name\" : {\n \"type\" : \"string\"\n }\n }\n },\n \"title\" : {\n \"type\" : \"string\"\n }\n }\n }\n }\n}\n'\n\ncurl -XPUT 'http://127.0.0.1:9200/test_2/?pretty=1' -d '\n{\n \"mappings\" : {\n \"bar\" : {\n \"properties\" : {\n \"title\" : {\n \"type\" : \"string\"\n }\n }\n }\n }\n}\n'\n```\n\nIndex some data:\n\n```\ncurl -XPOST 'http://127.0.0.1:9200/test_1/foo?pretty=1' -d '\n{\n \"author\" : {\n \"name\" : \"john smith\"\n },\n \"title\" : \"test 1 doc\"\n}\n'\n\ncurl -XPOST 'http://127.0.0.1:9200/test_2/bar?pretty=1' -d '\n{\n \"title\" : \"test 2 doc\"\n}\n'\n```\n\nQuery `test_1` and `test_2` but limit the nested filter to just `test_1`:\n\n```\ncurl -XGET 'http://127.0.0.1:9200/test_1%2Ctest_2/_search?pretty=1' -d '\n{\n \"query\" : {\n \"filtered\" : {\n \"filter\" : {\n \"indices\" : {\n \"no_match_filter\" : \"none\",\n \"filter\" : {\n \"nested\" : {\n \"filter\" : {\n \"term\" : {\n \"author.name\" : \"john\"\n }\n },\n \"path\" : \"author\"\n }\n },\n \"indices\" : [\n \"test_1\"\n ]\n }\n },\n \"query\" : {\n \"match\" : {\n \"title\" : \"test\"\n }\n }\n }\n }\n}\n'\n```\n\nThrows this error:\n\n```\nSearchPhaseExecutionException[Failed to execute phase [query], total failure; shardFailures {[Yit05d94RgiUwMg9vzMOgw][test_1][1]: SearchParseException[[test_1][1]: from[-1],size[-1]: Parse Failure [Failed to parse source [{\n \"query\" : {\n \"filtered\" : {\n \"filter\" : {\n \"indices\" : {\n \"no_match_filter\" : \"none\",\n \"filter\" : {\n \"nested\" : {\n \"filter\" : {\n \"term\" : {\n \"author.name\" : \"john\"\n }\n },\n \"path\" : \"author\"\n }\n },\n \"indices\" : [\n \"test_1\"\n ]\n }\n },\n \"query\" : {\n \"match\" : {\n \"title\" : \"test\"\n }\n }\n }\n }\n}\n```\n", "comments": [ { "body": "I've seen this behaviour too. I have a horrible feeling that it's causing a whole pile of extra work on our cluster. If someone's running a search that only covers two weeks of data, we really don't want to start dozens of extra threads for the rest of the year, even if ultimately they return no data.\n", "created_at": "2013-09-11T20:37:54Z" } ], "number": 2416, "title": "Indices filter parsed for indices to which it should not apply" }
{ "body": "Fixes #2416\n", "number": 4111, "review_comments": [ { "body": "This line is called multiple times as we keep adding indices. It could be called once at the end of the loop I guess.\n", "created_at": "2013-11-13T10:44:34Z" }, { "body": "The way I see it we call `concreteIndices` as soon as we found `indices` (or `index`), and at the same time we could verify if those indices match the current one and store the result, no need to call `matchesIndices` in multiple places.\n", "created_at": "2013-11-13T10:46:19Z" }, { "body": "Same comments that I left for the filter parser apply here too.\n", "created_at": "2013-11-13T10:47:28Z" }, { "body": "Here you call the health api only after the first index creation, which might lead to issues. I would use the common methods in the base class like this, as you don't seem to need a single shard, neither to create the type explicitly as far as I understood:\n\n```\ncreateIndex(\"index1\");\ncreateIndex(\"index2\");\nensureGreen();\n```\n\nMakes sense?\n", "created_at": "2013-11-13T10:49:19Z" }, { "body": "if you use here `refresh();` from the base class we also make sure we get back no failures.\n", "created_at": "2013-11-13T10:50:07Z" }, { "body": "you can replace both lines (check for failed shards and total hits) with: `assertHitCount(response, 2l);`. I would it everywhere in new tests as it makes them less verbose.\n", "created_at": "2013-11-13T10:50:53Z" }, { "body": "Also, you dont necessarily have to change this but you can now replace `.execute.actionGet();` with just `.get();`\n", "created_at": "2013-11-13T10:52:32Z" }, { "body": "Indeed, it happens because all shards fail. I guess you want to verify that a normal query would fail but since we parse it only for the right indices we don't get back the failure using indices filter/query. I would then try and catch the exception that's thrown.\n", "created_at": "2013-11-13T10:54:01Z" }, { "body": "Can we have a new `testIndicesFilter` method as well?\n", "created_at": "2013-11-13T10:54:44Z" }, { "body": "Btw, you don't necessarily need to use a jsonBuilder here, you can just do `setSource(\"text\",\"parent\")`\n", "created_at": "2013-11-13T10:56:44Z" }, { "body": "I somehow thought the error happened in another thread and could not be caught.\nInstead it is forwarded to the caller, and I can catch it in the tests.\nI tried to assert that I got a `QueryParsingException`, but with randomized testing I have 3 ways to analyze the error, and only one would allow to match the class name as a substring of the detailed message. So I just expect and catch the error.\n", "created_at": "2013-11-13T16:52:32Z" }, { "body": "Sure it does! I reduced the number of shards to speed up the tests a bit.\n", "created_at": "2013-11-13T16:53:00Z" }, { "body": "Oops, right.\n", "created_at": "2013-11-13T16:53:16Z" }, { "body": "Good catch! I stayed too close to the original code to see this opportunity.\n", "created_at": "2013-11-13T16:53:50Z" } ], "title": "Indices query/filter skip parsing altogether for irrelevant indices" }
{ "commits": [ { "message": "Indices query/filter skip parsing altogether for irrelevant indices - fix #2416" } ], "files": [ { "diff": "@@ -0,0 +1,38 @@\n+[[query-dsl-indices-filter]]\n+=== Indices Filter\n+\n+The `indices` filter can be used when executed across multiple indices,\n+allowing to have a filter that executes only when executed on an index\n+that matches a specific list of indices, and another filter that executes\n+when it is executed on an index that does not match the listed indices.\n+\n+[source,js]\n+--------------------------------------------------\n+{\n+ \"indices\" : {\n+ \"indices\" : [\"index1\", \"index2\"],\n+ \"filter\" : {\n+ \"term\" : { \"tag\" : \"wow\" }\n+ },\n+ \"no_match_filter\" : {\n+ \"term\" : { \"tag\" : \"kow\" }\n+ }\n+ }\n+}\n+--------------------------------------------------\n+\n+You can use the `index` field to provide a single index.\n+\n+`no_match_filter` can also have \"string\" value of `none` (to match no\n+documents), and `all` (to match all). Defaults to `all`.\n+\n+`filter` is mandatory. You must provide the indices.\n+It is forbidden to omit or to give `indices` or `index` multiple times,\n+or to give both.\n+\n+Please note that the fields order is important: If the indices are\n+provided before `filter` or `no_match_filter`, the filter parsing is\n+skipped altogether.\n+For instance, this feature is useful to prevent a query that runs\n+against multiple indices to fail because of a missing type.\n+See `has_child`, `has_parent`, `top_children` and `nested`.", "filename": "docs/reference/query-dsl/filters/indices-filter.asciidoc", "status": "added" }, { "diff": "@@ -21,5 +21,18 @@ when it is executed on an index that does not match the listed indices.\n }\n --------------------------------------------------\n \n+You can use the `index` field to provide a single index.\n+\n `no_match_query` can also have \"string\" value of `none` (to match no\n-documents), and `all` (to match all).\n+documents), and `all` (to match all). Defaults to `all`.\n+\n+`query` is mandatory. You must provide the indices.\n+It is forbidden to omit or to give `indices` or `index` multiple times,\n+or to give both.\n+\n+Please note that the fields order is important: If the indices are\n+provided before `query` or `no_match_query`, the query parsing is\n+skipped altogether.\n+For instance, this feature is useful to prevent a query that runs\n+against multiple indices to fail because of a missing type.\n+See `has_child`, `has_parent`, `top_children` and `nested`.", "filename": "docs/reference/query-dsl/queries/indices-query.asciidoc", "status": "modified" }, { "diff": "@@ -70,9 +70,9 @@ public IndicesFilterBuilder filterName(String filterName) {\n @Override\n protected void doXContent(XContentBuilder builder, Params params) throws IOException {\n builder.startObject(IndicesFilterParser.NAME);\n+ builder.field(\"indices\", indices);\n builder.field(\"filter\");\n filterBuilder.toXContent(builder, params);\n- builder.field(\"indices\", indices);\n if (noMatchFilter != null) {\n builder.field(\"no_match_filter\");\n noMatchFilter.toXContent(builder, params);", "filename": "src/main/java/org/elasticsearch/index/query/IndicesFilterBuilder.java", "status": "modified" }, { "diff": "@@ -19,7 +19,6 @@\n \n package org.elasticsearch.index.query;\n \n-import com.google.common.collect.Sets;\n import org.apache.lucene.search.Filter;\n import org.elasticsearch.action.support.IgnoreIndices;\n import org.elasticsearch.cluster.ClusterService;\n@@ -31,7 +30,9 @@\n import org.elasticsearch.common.xcontent.XContentParser;\n \n import java.io.IOException;\n-import java.util.Set;\n+import java.util.ArrayList;\n+import java.util.Arrays;\n+import java.util.Collection;\n \n /**\n */\n@@ -57,46 +58,85 @@ public Filter parse(QueryParseContext parseContext) throws IOException, QueryPar\n XContentParser parser = parseContext.parser();\n \n Filter filter = null;\n+ Filter noMatchFilter = Queries.MATCH_ALL_FILTER;\n+ Filter chosenFilter = null;\n boolean filterFound = false;\n- Set<String> indices = Sets.newHashSet();\n+ boolean indicesFound = false;\n+ boolean matchesConcreteIndices = false;\n \n String currentFieldName = null;\n XContentParser.Token token;\n- Filter noMatchFilter = Queries.MATCH_ALL_FILTER;\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n if (token == XContentParser.Token.FIELD_NAME) {\n currentFieldName = parser.currentName();\n } else if (token == XContentParser.Token.START_OBJECT) {\n if (\"filter\".equals(currentFieldName)) {\n filterFound = true;\n- filter = parseContext.parseInnerFilter();\n+ if (indicesFound) {\n+ // Because we know the indices, we can either skip, or parse and use the query\n+ if (matchesConcreteIndices) {\n+ filter = parseContext.parseInnerFilter();\n+ chosenFilter = filter;\n+ } else {\n+ parseContext.parser().skipChildren(); // skip the filter object without parsing it into a Filter\n+ }\n+ } else {\n+ // We do not know the indices, we must parse the query\n+ filter = parseContext.parseInnerFilter();\n+ }\n } else if (\"no_match_filter\".equals(currentFieldName)) {\n- noMatchFilter = parseContext.parseInnerFilter();\n+ if (indicesFound) {\n+ // Because we know the indices, we can either skip, or parse and use the query\n+ if (!matchesConcreteIndices) {\n+ noMatchFilter = parseContext.parseInnerFilter();\n+ chosenFilter = noMatchFilter;\n+ } else {\n+ parseContext.parser().skipChildren(); // skip the filter object without parsing it into a Filter\n+ }\n+ } else {\n+ // We do not know the indices, we must parse the query\n+ noMatchFilter = parseContext.parseInnerFilter();\n+ }\n } else {\n throw new QueryParsingException(parseContext.index(), \"[indices] filter does not support [\" + currentFieldName + \"]\");\n }\n } else if (token == XContentParser.Token.START_ARRAY) {\n if (\"indices\".equals(currentFieldName)) {\n+ if (indicesFound) {\n+ throw new QueryParsingException(parseContext.index(), \"[indices] indices already specified\");\n+ }\n+ indicesFound = true;\n+ Collection<String> indices = new ArrayList<String>();\n while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {\n String value = parser.textOrNull();\n if (value == null) {\n throw new QueryParsingException(parseContext.index(), \"No value specified for term filter\");\n }\n indices.add(value);\n }\n+ matchesConcreteIndices = matchesIndices(parseContext, getConcreteIndices(indices));\n } else {\n throw new QueryParsingException(parseContext.index(), \"[indices] filter does not support [\" + currentFieldName + \"]\");\n }\n } else if (token.isValue()) {\n if (\"index\".equals(currentFieldName)) {\n- indices.add(parser.text());\n+ if (indicesFound) {\n+ throw new QueryParsingException(parseContext.index(), \"[indices] indices already specified\");\n+ }\n+ indicesFound = true;\n+ matchesConcreteIndices = matchesIndices(parseContext, getConcreteIndices(Arrays.asList(parser.text())));\n } else if (\"no_match_filter\".equals(currentFieldName)) {\n String type = parser.text();\n if (\"all\".equals(type)) {\n noMatchFilter = Queries.MATCH_ALL_FILTER;\n } else if (\"none\".equals(type)) {\n noMatchFilter = Queries.MATCH_NO_FILTER;\n }\n+ if (indicesFound) {\n+ if (!matchesConcreteIndices) {\n+ chosenFilter = noMatchFilter;\n+ }\n+ }\n } else {\n throw new QueryParsingException(parseContext.index(), \"[indices] filter does not support [\" + currentFieldName + \"]\");\n }\n@@ -105,25 +145,38 @@ public Filter parse(QueryParseContext parseContext) throws IOException, QueryPar\n if (!filterFound) {\n throw new QueryParsingException(parseContext.index(), \"[indices] requires 'filter' element\");\n }\n- if (indices.isEmpty()) {\n+ if (!indicesFound) {\n throw new QueryParsingException(parseContext.index(), \"[indices] requires 'indices' element\");\n }\n \n- if (filter == null) {\n- return null;\n+ if (chosenFilter == null) {\n+ // Indices were not provided before we encountered the queries, which we hence parsed\n+ // We must now make a choice\n+ if (matchesConcreteIndices) {\n+ chosenFilter = filter;\n+ } else {\n+ chosenFilter = noMatchFilter;\n+ }\n }\n \n+ return chosenFilter;\n+ }\n+\n+ protected String[] getConcreteIndices(Collection<String> indices) {\n String[] concreteIndices = indices.toArray(new String[indices.size()]);\n if (clusterService != null) {\n MetaData metaData = clusterService.state().metaData();\n concreteIndices = metaData.concreteIndices(indices.toArray(new String[indices.size()]), IgnoreIndices.MISSING, true);\n }\n+ return concreteIndices;\n+ }\n \n+ protected boolean matchesIndices(QueryParseContext parseContext, String[] concreteIndices) {\n for (String index : concreteIndices) {\n if (Regex.simpleMatch(index, parseContext.index().name())) {\n- return filter;\n+ return true;\n }\n }\n- return noMatchFilter;\n+ return false;\n }\n }", "filename": "src/main/java/org/elasticsearch/index/query/IndicesFilterParser.java", "status": "modified" }, { "diff": "@@ -70,9 +70,9 @@ public IndicesQueryBuilder queryName(String queryName) {\n @Override\n protected void doXContent(XContentBuilder builder, Params params) throws IOException {\n builder.startObject(IndicesQueryParser.NAME);\n+ builder.field(\"indices\", indices);\n builder.field(\"query\");\n queryBuilder.toXContent(builder, params);\n- builder.field(\"indices\", indices);\n if (noMatchQuery != null) {\n builder.field(\"no_match_query\");\n noMatchQuery.toXContent(builder, params);", "filename": "src/main/java/org/elasticsearch/index/query/IndicesQueryBuilder.java", "status": "modified" }, { "diff": "@@ -19,20 +19,20 @@\n \n package org.elasticsearch.index.query;\n \n-import com.google.common.collect.Sets;\n import org.apache.lucene.search.Query;\n import org.elasticsearch.action.support.IgnoreIndices;\n import org.elasticsearch.cluster.ClusterService;\n import org.elasticsearch.cluster.metadata.MetaData;\n import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.inject.Inject;\n-import org.elasticsearch.common.lucene.search.MatchNoDocsQuery;\n import org.elasticsearch.common.lucene.search.Queries;\n import org.elasticsearch.common.regex.Regex;\n import org.elasticsearch.common.xcontent.XContentParser;\n \n import java.io.IOException;\n-import java.util.Set;\n+import java.util.ArrayList;\n+import java.util.Arrays;\n+import java.util.Collection;\n \n /**\n */\n@@ -58,47 +58,86 @@ public Query parse(QueryParseContext parseContext) throws IOException, QueryPars\n XContentParser parser = parseContext.parser();\n \n Query query = null;\n+ Query noMatchQuery = Queries.newMatchAllQuery();\n+ Query chosenQuery = null;\n boolean queryFound = false;\n- Set<String> indices = Sets.newHashSet();\n+ boolean indicesFound = false;\n+ boolean matchesConcreteIndices = false;\n String queryName = null;\n \n String currentFieldName = null;\n XContentParser.Token token;\n- Query noMatchQuery = Queries.newMatchAllQuery();\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n if (token == XContentParser.Token.FIELD_NAME) {\n currentFieldName = parser.currentName();\n } else if (token == XContentParser.Token.START_OBJECT) {\n if (\"query\".equals(currentFieldName)) {\n- query = parseContext.parseInnerQuery();\n queryFound = true;\n+ if (indicesFound) {\n+ // Because we know the indices, we can either skip, or parse and use the query\n+ if (matchesConcreteIndices) {\n+ query = parseContext.parseInnerQuery();\n+ chosenQuery = query;\n+ } else {\n+ parseContext.parser().skipChildren(); // skip the query object without parsing it into a Query\n+ }\n+ } else {\n+ // We do not know the indices, we must parse the query\n+ query = parseContext.parseInnerQuery();\n+ }\n } else if (\"no_match_query\".equals(currentFieldName)) {\n- noMatchQuery = parseContext.parseInnerQuery();\n+ if (indicesFound) {\n+ // Because we know the indices, we can either skip, or parse and use the query\n+ if (!matchesConcreteIndices) {\n+ noMatchQuery = parseContext.parseInnerQuery();\n+ chosenQuery = noMatchQuery;\n+ } else {\n+ parseContext.parser().skipChildren(); // skip the query object without parsing it into a Query\n+ }\n+ } else {\n+ // We do not know the indices, we must parse the query\n+ noMatchQuery = parseContext.parseInnerQuery();\n+ }\n } else {\n throw new QueryParsingException(parseContext.index(), \"[indices] query does not support [\" + currentFieldName + \"]\");\n }\n } else if (token == XContentParser.Token.START_ARRAY) {\n if (\"indices\".equals(currentFieldName)) {\n+ if (indicesFound) {\n+ throw new QueryParsingException(parseContext.index(), \"[indices] indices already specified\");\n+ }\n+ indicesFound = true;\n+ Collection<String> indices = new ArrayList<String>();\n while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {\n String value = parser.textOrNull();\n if (value == null) {\n throw new QueryParsingException(parseContext.index(), \"No value specified for term filter\");\n }\n indices.add(value);\n }\n+ matchesConcreteIndices = matchesIndices(parseContext, getConcreteIndices(indices));\n } else {\n throw new QueryParsingException(parseContext.index(), \"[indices] query does not support [\" + currentFieldName + \"]\");\n }\n } else if (token.isValue()) {\n if (\"index\".equals(currentFieldName)) {\n- indices.add(parser.text());\n+ if (indicesFound) {\n+ throw new QueryParsingException(parseContext.index(), \"[indices] indices already specified\");\n+ }\n+ indicesFound = true;\n+ matchesConcreteIndices = matchesIndices(parseContext, getConcreteIndices(Arrays.asList(parser.text())));\n } else if (\"no_match_query\".equals(currentFieldName)) {\n String type = parser.text();\n if (\"all\".equals(type)) {\n noMatchQuery = Queries.newMatchAllQuery();\n } else if (\"none\".equals(type)) {\n noMatchQuery = Queries.newMatchNoDocsQuery();\n }\n+ if (indicesFound) {\n+ if (!matchesConcreteIndices) {\n+ chosenQuery = noMatchQuery;\n+ }\n+ }\n } else if (\"_name\".equals(currentFieldName)) {\n queryName = parser.text();\n } else {\n@@ -109,30 +148,42 @@ public Query parse(QueryParseContext parseContext) throws IOException, QueryPars\n if (!queryFound) {\n throw new QueryParsingException(parseContext.index(), \"[indices] requires 'query' element\");\n }\n- if (query == null) {\n- return null;\n- }\n- if (indices.isEmpty()) {\n+ if (!indicesFound) {\n throw new QueryParsingException(parseContext.index(), \"[indices] requires 'indices' element\");\n }\n \n+ if (chosenQuery == null) {\n+ // Indices were not provided before we encountered the queries, which we hence parsed\n+ // We must now make a choice\n+ if (matchesConcreteIndices) {\n+ chosenQuery = query;\n+ } else {\n+ chosenQuery = noMatchQuery;\n+ }\n+ }\n+\n+ if (queryName != null && chosenQuery != null) {\n+ parseContext.addNamedQuery(queryName, chosenQuery);\n+ }\n+\n+ return chosenQuery;\n+ }\n+\n+ protected String[] getConcreteIndices(Collection<String> indices) {\n String[] concreteIndices = indices.toArray(new String[indices.size()]);\n if (clusterService != null) {\n MetaData metaData = clusterService.state().metaData();\n concreteIndices = metaData.concreteIndices(indices.toArray(new String[indices.size()]), IgnoreIndices.MISSING, true);\n }\n+ return concreteIndices;\n+ }\n \n+ protected boolean matchesIndices(QueryParseContext parseContext, String[] concreteIndices) {\n for (String index : concreteIndices) {\n if (Regex.simpleMatch(index, parseContext.index().name())) {\n- if (queryName != null) {\n- parseContext.addNamedQuery(queryName, query);\n- }\n- return query;\n+ return true;\n }\n }\n- if (queryName != null) {\n- parseContext.addNamedQuery(queryName, noMatchQuery);\n- }\n- return noMatchQuery;\n+ return false;\n }\n }", "filename": "src/main/java/org/elasticsearch/index/query/IndicesQueryParser.java", "status": "modified" }, { "diff": "@@ -26,6 +26,8 @@\n import org.elasticsearch.action.search.SearchPhaseExecutionException;\n import org.elasticsearch.action.search.SearchResponse;\n import org.elasticsearch.action.search.SearchType;\n+import org.elasticsearch.action.search.ShardSearchFailure;\n+import org.elasticsearch.common.Priority;\n import org.elasticsearch.common.settings.ImmutableSettings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentFactory;\n@@ -35,8 +37,10 @@\n import org.elasticsearch.rest.RestStatus;\n import org.elasticsearch.search.SearchHit;\n import org.elasticsearch.search.SearchHits;\n+import org.elasticsearch.search.SearchParseException;\n import org.elasticsearch.search.facet.FacetBuilders;\n import org.elasticsearch.test.AbstractIntegrationTest;\n+import org.elasticsearch.transport.RemoteTransportException;\n import org.joda.time.DateTime;\n import org.joda.time.DateTimeZone;\n import org.joda.time.format.ISODateTimeFormat;\n@@ -1890,4 +1894,179 @@ public void testMultiMatchLenientIssue3797() {\n .setQuery(QueryBuilders.multiMatchQuery(\"value2\", \"field2^2\").lenient(true)).get();\n assertHitCount(searchResponse, 1l);\n }\n+\n+ @Test\n+ public void testIndicesQuery() throws Exception {\n+ createIndex(\"index1\", \"index2\");\n+ ensureGreen();\n+\n+ client().prepareIndex(\"index1\", \"type1\").setId(\"1\").setSource(\"text\", \"value\").get();\n+ client().prepareIndex(\"index2\", \"type2\").setId(\"2\").setSource(\"text\", \"value\").get();\n+ refresh();\n+\n+ SearchResponse response = client().prepareSearch(\"index1\", \"index2\")\n+ .setQuery(indicesQuery(matchQuery(\"text\", \"value\"), \"index1\")\n+ .noMatchQuery(matchQuery(\"text\", \"value\"))).get();\n+ assertHitCount(response, 2l);\n+ assertThat(response.getHits().getAt(0).getId(), either(equalTo(\"1\")).or(equalTo(\"2\")));\n+ assertThat(response.getHits().getAt(1).getId(), either(equalTo(\"1\")).or(equalTo(\"2\")));\n+\n+ response = client().prepareSearch(\"index1\", \"index2\")\n+ .setQuery(indicesQuery(matchQuery(\"text\", \"value\"), \"index1\")).get();\n+ assertHitCount(response, 2l);\n+ assertThat(response.getHits().getAt(0).getId(), either(equalTo(\"1\")).or(equalTo(\"2\")));\n+ assertThat(response.getHits().getAt(1).getId(), either(equalTo(\"1\")).or(equalTo(\"2\")));\n+\n+ response = client().prepareSearch(\"index1\", \"index2\")\n+ .setQuery(indicesQuery(matchQuery(\"text\", \"value\"), \"index1\")\n+ .noMatchQuery(\"all\")).get();\n+ assertHitCount(response, 2l);\n+ assertThat(response.getHits().getAt(0).getId(), either(equalTo(\"1\")).or(equalTo(\"2\")));\n+ assertThat(response.getHits().getAt(1).getId(), either(equalTo(\"1\")).or(equalTo(\"2\")));\n+\n+ response = client().prepareSearch(\"index1\", \"index2\")\n+ .setQuery(indicesQuery(matchQuery(\"text\", \"value\"), \"index1\")\n+ .noMatchQuery(\"none\")).get();\n+ assertHitCount(response, 1l);\n+ assertThat(response.getHits().getAt(0).getId(), equalTo(\"1\"));\n+ }\n+\n+ @Test\n+ public void testIndicesFilter() throws Exception {\n+ createIndex(\"index1\", \"index2\");\n+ ensureGreen();\n+\n+ client().prepareIndex(\"index1\", \"type1\").setId(\"1\").setSource(\"text\", \"value\").get();\n+ client().prepareIndex(\"index2\", \"type2\").setId(\"2\").setSource(\"text\", \"value\").get();\n+ refresh();\n+\n+ SearchResponse response = client().prepareSearch(\"index1\", \"index2\")\n+ .setFilter(indicesFilter(termFilter(\"text\", \"value\"), \"index1\")\n+ .noMatchFilter(termFilter(\"text\", \"value\"))).get();\n+ assertHitCount(response, 2l);\n+ assertThat(response.getHits().getAt(0).getId(), either(equalTo(\"1\")).or(equalTo(\"2\")));\n+ assertThat(response.getHits().getAt(1).getId(), either(equalTo(\"1\")).or(equalTo(\"2\")));\n+\n+ response = client().prepareSearch(\"index1\", \"index2\")\n+ .setFilter(indicesFilter(termFilter(\"text\", \"value\"), \"index1\")).get();\n+ assertHitCount(response, 2l);\n+ assertThat(response.getHits().getAt(0).getId(), either(equalTo(\"1\")).or(equalTo(\"2\")));\n+ assertThat(response.getHits().getAt(1).getId(), either(equalTo(\"1\")).or(equalTo(\"2\")));\n+\n+ response = client().prepareSearch(\"index1\", \"index2\")\n+ .setFilter(indicesFilter(termFilter(\"text\", \"value\"), \"index1\")\n+ .noMatchFilter(\"all\")).get();\n+ assertHitCount(response, 2l);\n+ assertThat(response.getHits().getAt(0).getId(), either(equalTo(\"1\")).or(equalTo(\"2\")));\n+ assertThat(response.getHits().getAt(1).getId(), either(equalTo(\"1\")).or(equalTo(\"2\")));\n+\n+ response = client().prepareSearch(\"index1\", \"index2\")\n+ .setFilter(indicesFilter(termFilter(\"text\", \"value\"), \"index1\")\n+ .noMatchFilter(\"none\")).get();\n+ assertHitCount(response, 1l);\n+ assertThat(response.getHits().getAt(0).getId(), equalTo(\"1\"));\n+ }\n+\n+ @Test // https://github.com/elasticsearch/elasticsearch/issues/2416\n+ public void testIndicesQueryHideParsingExceptions() throws Exception {\n+ client().admin().indices().prepareCreate(\"simple\")\n+ .addMapping(\"lone\", jsonBuilder().startObject().startObject(\"lone\").endObject().endObject())\n+ .get();\n+ client().admin().indices().prepareCreate(\"related\")\n+ .addMapping(\"parent\", jsonBuilder().startObject().startObject(\"parent\").endObject().endObject())\n+ .addMapping(\"child\", jsonBuilder().startObject().startObject(\"child\").startObject(\"_parent\").field(\"type\", \"parent\")\n+ .endObject().endObject().endObject())\n+ .get();\n+ ensureGreen();\n+\n+ client().prepareIndex(\"simple\", \"lone\").setId(\"1\").setSource(\"text\", \"value\").get();\n+ client().prepareIndex(\"related\", \"parent\").setId(\"2\").setSource(\"text\", \"parent\").get();\n+ client().prepareIndex(\"related\", \"child\").setId(\"3\").setParent(\"2\").setSource(\"text\", \"value\").get();\n+ refresh();\n+\n+ SearchResponse response = client().prepareSearch(\"related\")\n+ .setQuery(hasChildQuery(\"child\", matchQuery(\"text\", \"value\"))).get();\n+ assertHitCount(response, 1l);\n+ assertThat(response.getHits().getAt(0).getId(), equalTo(\"2\"));\n+\n+ response = client().prepareSearch(\"simple\")\n+ .setQuery(matchQuery(\"text\", \"value\")).get();\n+ assertHitCount(response, 1l);\n+ assertThat(response.getHits().getAt(0).getId(), equalTo(\"1\"));\n+\n+ try {\n+ client().prepareSearch(\"simple\")\n+ .setQuery(hasChildQuery(\"child\", matchQuery(\"text\", \"value\"))).get();\n+ fail(\"Should have failed with a SearchPhaseExecutionException because all shards failed with a nested QueryParsingException\");\n+ // If no failure happens, the HasChildQuery may have changed behavior when provided with wrong types\n+ } catch (SearchPhaseExecutionException e) {\n+ // There is no easy way to ensure we got a QueryParsingException\n+ }\n+\n+ response = client().prepareSearch(\"related\", \"simple\")\n+ .setQuery(indicesQuery(matchQuery(\"text\", \"parent\"), \"related\")\n+ .noMatchQuery(matchQuery(\"text\", \"value\"))).get();\n+ assertHitCount(response, 2l);\n+ assertThat(response.getHits().getAt(0).getId(), either(equalTo(\"1\")).or(equalTo(\"2\")));\n+ assertThat(response.getHits().getAt(1).getId(), either(equalTo(\"1\")).or(equalTo(\"2\")));\n+\n+ response = client().prepareSearch(\"related\", \"simple\")\n+ .setQuery(indicesQuery(hasChildQuery(\"child\", matchQuery(\"text\", \"value\")), \"related\")\n+ .noMatchQuery(matchQuery(\"text\", \"value\"))).get();\n+ assertHitCount(response, 2l);\n+ assertThat(response.getHits().getAt(0).getId(), either(equalTo(\"1\")).or(equalTo(\"2\")));\n+ assertThat(response.getHits().getAt(1).getId(), either(equalTo(\"1\")).or(equalTo(\"2\")));\n+ }\n+\n+\n+ @Test // https://github.com/elasticsearch/elasticsearch/issues/2416\n+ public void testIndicesFilterHideParsingExceptions() throws Exception {\n+ client().admin().indices().prepareCreate(\"simple\")\n+ .addMapping(\"lone\", jsonBuilder().startObject().startObject(\"lone\").endObject().endObject())\n+ .get();\n+ client().admin().indices().prepareCreate(\"related\")\n+ .addMapping(\"parent\", jsonBuilder().startObject().startObject(\"parent\").endObject().endObject())\n+ .addMapping(\"child\", jsonBuilder().startObject().startObject(\"child\").startObject(\"_parent\").field(\"type\", \"parent\")\n+ .endObject().endObject().endObject())\n+ .get();\n+ ensureGreen();\n+\n+ client().prepareIndex(\"simple\", \"lone\").setId(\"1\").setSource(\"text\", \"value\").get();\n+ client().prepareIndex(\"related\", \"parent\").setId(\"2\").setSource(\"text\", \"parent\").get();\n+ client().prepareIndex(\"related\", \"child\").setId(\"3\").setParent(\"2\").setSource(\"text\", \"value\").get();\n+ refresh();\n+\n+ SearchResponse response = client().prepareSearch(\"related\")\n+ .setFilter(hasChildFilter(\"child\", termFilter(\"text\", \"value\"))).get();\n+ assertHitCount(response, 1l);\n+ assertThat(response.getHits().getAt(0).getId(), equalTo(\"2\"));\n+\n+ response = client().prepareSearch(\"simple\")\n+ .setFilter(termFilter(\"text\", \"value\")).get();\n+ assertHitCount(response, 1l);\n+ assertThat(response.getHits().getAt(0).getId(), equalTo(\"1\"));\n+\n+ try {\n+ client().prepareSearch(\"simple\")\n+ .setFilter(hasChildFilter(\"child\", termFilter(\"text\", \"value\"))).get();\n+ fail(\"Should have failed with a SearchPhaseExecutionException because all shards failed with a nested QueryParsingException\");\n+ // If no failure happens, the HasChildQuery may have changed behavior when provided with wrong types\n+ } catch (SearchPhaseExecutionException e) {\n+ // There is no easy way to ensure we got a QueryParsingException\n+ }\n+\n+ response = client().prepareSearch(\"related\", \"simple\")\n+ .setFilter(indicesFilter(termFilter(\"text\", \"parent\"), \"related\")\n+ .noMatchFilter(termFilter(\"text\", \"value\"))).get();\n+ assertHitCount(response, 2l);\n+ assertThat(response.getHits().getAt(0).getId(), either(equalTo(\"1\")).or(equalTo(\"2\")));\n+ assertThat(response.getHits().getAt(1).getId(), either(equalTo(\"1\")).or(equalTo(\"2\")));\n+\n+ response = client().prepareSearch(\"related\", \"simple\")\n+ .setFilter(indicesFilter(hasChildFilter(\"child\", termFilter(\"text\", \"value\")), \"related\")\n+ .noMatchFilter(termFilter(\"text\", \"value\"))).get();\n+ assertHitCount(response, 2l);\n+ assertThat(response.getHits().getAt(0).getId(), either(equalTo(\"1\")).or(equalTo(\"2\")));\n+ assertThat(response.getHits().getAt(1).getId(), either(equalTo(\"1\")).or(equalTo(\"2\")));\n+ }\n }", "filename": "src/test/java/org/elasticsearch/search/query/SimpleQueryTests.java", "status": "modified" } ] }
{ "body": "`FetchSubPhase.HitContext` exposes a cache that allows to reuse objects which can be shared between different hits, when executing subfetch phases. \n\nThis is meant to be used especially in combination with highlighting, as some of the objects are heavy to create and don't need to be recreated per hit.\n\nThis mechanism is not working properly, as `HitContext` gets recreated per hit, thus the cache is always empty and the objects are never reused.\n", "comments": [], "number": 4106, "title": "Highlight hit object cache gets reset per hit" }
{ "body": "Fixed fetch subphase to not recreate the HitContext for each hit, so that the object cache stays the same\n\nFixed also bug in the fast vector highlighter which was raised by enabling the object cache, due to null FieldQuery (NPE) in case the objects are taken from the cache\n\nAdded tests to check if there are issues when highlighting multiple fields at the same time\n\nCloses #4106\n", "number": 4107, "review_comments": [], "title": "Fixed fetch subphase to not recreate the HitContext for each hit" }
{ "commits": [ { "message": "Fixed fetch subphase to not recreate the HitContext for each hit, so that the object cache stays the same\n\nFixed also bug in the fast vector highlighter which was raised by enabling the object cache, due to null FieldQuery (NPE) in case the objects are taken from the cache\n\nAdded tests to check if there are issues when highlighting multiple fields at the same time\n\nCloses #4106" }, { "message": "Trimmed down HighlighterSearchTests\n\nUsed common assertHighlight and concise mapping whenever possible\nMore indexRandom too" } ], "files": [ { "diff": "@@ -142,6 +142,7 @@ public void execute(SearchContext context) {\n }\n \n InternalSearchHit[] hits = new InternalSearchHit[context.docIdsToLoadSize()];\n+ FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();\n for (int index = 0; index < context.docIdsToLoadSize(); index++) {\n int docId = context.docIdsToLoad()[context.docIdsToLoadFrom() + index];\n \n@@ -195,10 +196,9 @@ public void execute(SearchContext context) {\n }\n }\n \n+ hitContext.reset(searchHit, subReaderContext, subDoc, context.searcher().getIndexReader(), docId, fieldsVisitor);\n for (FetchSubPhase fetchSubPhase : fetchSubPhases) {\n- FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();\n if (fetchSubPhase.hitExecutionNeeded(context)) {\n- hitContext.reset(searchHit, subReaderContext, subDoc, context.searcher().getIndexReader(), docId, fieldsVisitor);\n fetchSubPhase.hitExecute(context, hitContext);\n }\n }", "filename": "src/main/java/org/elasticsearch/search/fetch/FetchPhase.java", "status": "modified" }, { "diff": "@@ -76,8 +76,22 @@ public HighlightField highlight(HighlighterContext highlighterContext) {\n HighlighterEntry cache = (HighlighterEntry) hitContext.cache().get(CACHE_KEY);\n \n try {\n+ FieldQuery fieldQuery;\n+ if (field.requireFieldMatch()) {\n+ if (cache.fieldMatchFieldQuery == null) {\n+ // we use top level reader to rewrite the query against all readers, with use caching it across hits (and across readers...)\n+ cache.fieldMatchFieldQuery = new CustomFieldQuery(highlighterContext.highlightQuery, hitContext.topLevelReader(), true, field.requireFieldMatch());\n+ }\n+ fieldQuery = cache.fieldMatchFieldQuery;\n+ } else {\n+ if (cache.noFieldMatchFieldQuery == null) {\n+ // we use top level reader to rewrite the query against all readers, with use caching it across hits (and across readers...)\n+ cache.noFieldMatchFieldQuery = new CustomFieldQuery(highlighterContext.highlightQuery, hitContext.topLevelReader(), true, field.requireFieldMatch());\n+ }\n+ fieldQuery = cache.noFieldMatchFieldQuery;\n+ }\n+\n MapperHighlightEntry entry = cache.mappers.get(mapper);\n- FieldQuery fieldQuery = null;\n if (entry == null) {\n FragListBuilder fragListBuilder;\n BaseFragmentsBuilder fragmentsBuilder;\n@@ -122,19 +136,6 @@ public HighlightField highlight(HighlighterContext highlighterContext) {\n cache.fvh = new org.apache.lucene.search.vectorhighlight.FastVectorHighlighter();\n }\n CustomFieldQuery.highlightFilters.set(field.highlightFilter());\n- if (field.requireFieldMatch()) {\n- if (cache.fieldMatchFieldQuery == null) {\n- // we use top level reader to rewrite the query against all readers, with use caching it across hits (and across readers...)\n- cache.fieldMatchFieldQuery = new CustomFieldQuery(highlighterContext.highlightQuery, hitContext.topLevelReader(), true, field.requireFieldMatch());\n- }\n- fieldQuery = cache.fieldMatchFieldQuery;\n- } else {\n- if (cache.noFieldMatchFieldQuery == null) {\n- // we use top level reader to rewrite the query against all readers, with use caching it across hits (and across readers...)\n- cache.noFieldMatchFieldQuery = new CustomFieldQuery(highlighterContext.highlightQuery, hitContext.topLevelReader(), true, field.requireFieldMatch());\n- }\n- fieldQuery = cache.noFieldMatchFieldQuery;\n- }\n cache.mappers.put(mapper, entry);\n }\n ", "filename": "src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java", "status": "modified" }, { "diff": "", "filename": "src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java", "status": "modified" }, { "diff": "@@ -200,15 +200,27 @@ public static void assertHighlight(SearchResponse resp, int hit, String field, i\n assertHighlight(resp, hit, field, fragment, equalTo(totalFragments), matcher);\n }\n \n+ public static void assertHighlight(SearchHit hit, String field, int fragment, Matcher<String> matcher) {\n+ assertHighlight(hit, field, fragment, greaterThan(fragment), matcher);\n+ }\n+\n+ public static void assertHighlight(SearchHit hit, String field, int fragment, int totalFragments, Matcher<String> matcher) {\n+ assertHighlight(hit, field, fragment, equalTo(totalFragments), matcher);\n+ }\n+\n private static void assertHighlight(SearchResponse resp, int hit, String field, int fragment, Matcher<Integer> fragmentsMatcher, Matcher<String> matcher) {\n assertNoFailures(resp);\n assertThat(\"not enough hits\", resp.getHits().hits().length, greaterThan(hit));\n- assertThat(resp.getHits().hits()[hit].getHighlightFields(), hasKey(field));\n- assertThat(resp.getHits().hits()[hit].getHighlightFields().get(field).fragments().length, fragmentsMatcher);\n- assertThat(resp.getHits().hits()[hit].highlightFields().get(field).fragments()[fragment].string(), matcher);\n+ assertHighlight(resp.getHits().hits()[hit], field, fragment, fragmentsMatcher, matcher);\n assertVersionSerializable(resp);\n }\n \n+ private static void assertHighlight(SearchHit hit, String field, int fragment, Matcher<Integer> fragmentsMatcher, Matcher<String> matcher) {\n+ assertThat(hit.getHighlightFields(), hasKey(field));\n+ assertThat(hit.getHighlightFields().get(field).fragments().length, fragmentsMatcher);\n+ assertThat(hit.highlightFields().get(field).fragments()[fragment].string(), matcher);\n+ }\n+\n public static void assertNotHighlighted(SearchResponse resp, int hit, String field) {\n assertNoFailures(resp);\n assertThat(\"not enough hits\", resp.getHits().hits().length, greaterThan(hit));", "filename": "src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java", "status": "modified" } ] }
{ "body": "0.90.6 needs a lot more direct memory compared to 0.90.5 when using an in memory store for tests.\n\nSetting needed for 0.90.6\n\n```\n-XX:MaxDirectMemorySize=4608m\n```\n\nSetting needed for 0.90.5\n\n```\n-XX:MaxDirectMemorySize=512m\n```\n", "comments": [ { "body": "Hiya\n\nYes, this is a known issue, see #4078 \n\nWe will be releasing version 0.90.7 soon, which will fix this. In the meantime, you can add this to your config:\n\n```\nindex.warmer.enabled: false\n```\n", "created_at": "2013-11-05T17:11:17Z" }, { "body": "(at least I assume this is the same thing)\n\nCould you try the setting above and let me know if it fixed the issue?\n\nthanks\n", "created_at": "2013-11-05T17:19:54Z" }, { "body": "No, that setting does not fix the issue.\n", "created_at": "2013-11-05T17:30:54Z" }, { "body": "Could you give us some idea of what your tests do and how you configure elasticsearch?\n", "created_at": "2013-11-05T17:33:41Z" }, { "body": "stacktrace\n\n```\njava.lang.OutOfMemoryError: Direct buffer memory\n at java.nio.Bits.reserveMemory(Bits.java:658)\n at java.nio.DirectByteBuffer.<init>(DirectByteBuffer.java:123)\n at java.nio.ByteBuffer.allocateDirect(ByteBuffer.java:306)\n at org.apache.lucene.store.bytebuffer.PlainByteBufferAllocator.allocate(PlainByteBufferAllocator.java:55)\n at org.apache.lucene.store.bytebuffer.CachingByteBufferAllocator.allocate(CachingByteBufferAllocator.java:52)\n at org.elasticsearch.cache.memory.ByteBufferCache.allocate(ByteBufferCache.java:101)\n at org.apache.lucene.store.bytebuffer.ByteBufferIndexOutput.switchCurrentBuffer(ByteBufferIndexOutput.java:106)\n at org.apache.lucene.store.bytebuffer.ByteBufferIndexOutput.writeBytes(ByteBufferIndexOutput.java:93)\n at org.elasticsearch.common.lucene.store.BufferedChecksumIndexOutput.flushBuffer(BufferedChecksumIndexOutput.java:69)\n at org.apache.lucene.store.BufferedIndexOutput.flushBuffer(BufferedIndexOutput.java:113)\n at org.apache.lucene.store.BufferedIndexOutput.flush(BufferedIndexOutput.java:102)\n at org.elasticsearch.common.lucene.store.BufferedChecksumIndexOutput.flush(BufferedChecksumIndexOutput.java:80)\n at org.apache.lucene.store.BufferedIndexOutput.close(BufferedIndexOutput.java:126)\n at org.elasticsearch.common.lucene.store.BufferedChecksumIndexOutput.close(BufferedChecksumIndexOutput.java:60)\n at org.elasticsearch.index.store.Store$StoreIndexOutput.close(Store.java:587)\n at org.apache.lucene.util.IOUtils.close(IOUtils.java:140)\n at org.apache.lucene.codecs.lucene41.Lucene41PostingsWriter.close(Lucene41PostingsWriter.java:582)\n at org.apache.lucene.util.IOUtils.closeWhileHandlingException(IOUtils.java:81)\n at org.apache.lucene.codecs.BlockTreeTermsWriter.close(BlockTreeTermsWriter.java:1082)\n at org.elasticsearch.index.codec.postingsformat.BloomFilterPostingsFormat$BloomFilteredFieldsConsumer.close(BloomFilterPostingsFormat.java:408)\n at org.elasticsearch.index.codec.postingsformat.ElasticSearch090PostingsFormat$1.close(ElasticSearch090PostingsFormat.java:63)\n at org.apache.lucene.codecs.perfield.PerFieldPostingsFormat$FieldsConsumerAndSuffix.close(PerFieldPostingsFormat.java:86)\n at org.apache.lucene.util.IOUtils.close(IOUtils.java:163)\n at org.apache.lucene.codecs.perfield.PerFieldPostingsFormat$FieldsWriter.close(PerFieldPostingsFormat.java:154)\n at org.apache.lucene.util.IOUtils.close(IOUtils.java:140)\n at org.apache.lucene.index.FreqProxTermsWriter.flush(FreqProxTermsWriter.java:102)\n at org.apache.lucene.index.TermsHash.flush(TermsHash.java:116)\n at org.apache.lucene.index.DocInverter.flush(DocInverter.java:53)\n at org.apache.lucene.index.DocFieldProcessor.flush(DocFieldProcessor.java:81)\n at org.apache.lucene.index.DocumentsWriterPerThread.flush(DocumentsWriterPerThread.java:466)\n at org.apache.lucene.index.DocumentsWriter.doFlush(DocumentsWriter.java:499)\n at org.apache.lucene.index.DocumentsWriter.flushAllThreads(DocumentsWriter.java:609)\n at org.apache.lucene.index.IndexWriter.getReader(IndexWriter.java:367)\n at org.apache.lucene.index.StandardDirectoryReader.doOpenFromWriter(StandardDirectoryReader.java:277)\n at org.apache.lucene.index.StandardDirectoryReader.doOpenIfChanged(StandardDirectoryReader.java:252)\n at org.apache.lucene.index.StandardDirectoryReader.doOpenIfChanged(StandardDirectoryReader.java:242)\n at org.apache.lucene.index.DirectoryReader.openIfChanged(DirectoryReader.java:170)\n at org.apache.lucene.search.SearcherManager.refreshIfNeeded(SearcherManager.java:118)\n at org.apache.lucene.search.SearcherManager.refreshIfNeeded(SearcherManager.java:58)\n at org.apache.lucene.search.ReferenceManager.doMaybeRefresh(ReferenceManager.java:155)\n at org.apache.lucene.search.ReferenceManager.maybeRefresh(ReferenceManager.java:204)\n at org.elasticsearch.index.engine.robin.RobinEngine.refresh(RobinEngine.java:786)\n at org.elasticsearch.index.shard.service.InternalIndexShard.refresh(InternalIndexShard.java:448)\n at org.elasticsearch.action.index.TransportIndexAction.shardOperationOnPrimary(TransportIndexAction.java:228)\n at org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$AsyncShardOperationAction.performOnPrimary(TransportShardReplicationOperationAction.java:556)\n at org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction$AsyncShardOperationAction$1.run(TransportShardReplicationOperationAction.java:426)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:724)\n```\n", "created_at": "2013-11-05T17:34:07Z" }, { "body": "configuration\n\n```\n final Node node = NodeBuilder.nodeBuilder()\n .local(true)\n .clusterName(clusterName)\n .settings(ImmutableSettings.settingsBuilder()\n .put(\"index.store.type\", \"memory\")\n .put(\"index.number_of_shards\", \"1\")\n .put(\"index.number_of_replicas\", \"0\")\n .put(\"gateway.type\", \"none\")\n .put(\"http.enabled\", false)\n .put(\"index.warmer.enabled\", false)\n .put(\"path.data\", \"target/es\")\n .put(\"path.logs\", \"target/es\")\n .put(\"path.work\", \"target/es\")\n .build())\n .build();\n```\n", "created_at": "2013-11-05T17:36:28Z" }, { "body": "can you maybe write isolate it to a test case that shows that it passes in 0.90.5, while it fails in 0.90.6 (with the `index.warmer.enabled` set to `false`)? it would be a great help in chasing it down...\n", "created_at": "2013-11-05T17:39:49Z" }, { "body": "```\nimport org.elasticsearch.client.Client;\nimport org.elasticsearch.common.settings.ImmutableSettings;\nimport org.elasticsearch.node.Node;\nimport org.elasticsearch.node.NodeBuilder;\n\nimport java.util.UUID;\n\npublic class Issue {\n public static void main(final String[] args) {\n final Node node = NodeBuilder.nodeBuilder()\n .local(true)\n .clusterName(\"test\")\n .settings(ImmutableSettings.settingsBuilder()\n .put(\"index.store.type\", \"memory\")\n .put(\"index.number_of_shards\", \"1\")\n .put(\"index.number_of_replicas\", \"0\")\n .put(\"gateway.type\", \"none\")\n .put(\"http.enabled\", false)\n .put(\"index.warmer.enabled\", false)\n .build())\n .build();\n node.start();\n final Client client = node.client();\n final byte[] source = \"{\\\"a\\\":\\\"a1\\\"}\".getBytes();\n for (int i = 0; i < 1000; i++) {\n final String id = UUID.randomUUID().toString();\n client.prepareIndex(\"index1\", \"type1\", id)\n .setSource(source)\n .setRefresh(true)\n .execute()\n .actionGet();\n }\n }\n}\n```\n", "created_at": "2013-11-05T18:14:26Z" }, { "body": "@s1monw and myself chased it up, seems like a regression in Lucene, opened an issue: https://issues.apache.org/jira/browse/LUCENE-5330 and already have a patch for it: https://issues.apache.org/jira/secure/attachment/12612263/LUCENE-5330.\n", "created_at": "2013-11-05T22:24:33Z" }, { "body": "thanks for looking into this\n", "created_at": "2013-11-05T22:58:44Z" } ], "number": 4093, "title": "0.90.6 memory issue" }
{ "body": "This is a workaround for LUCENE-5330 to prune the event queue on getReader()\n\nCloses #4093\n", "number": 4104, "review_comments": [], "title": "Apply fix for LUCENE-5330 pruning the IndexWriter queue to get rid of pending events." }
{ "commits": [ { "message": "Apply fix for LUCENE-5330 pruning the IndexWriter queue to get rid of pending event\n\nCloses #4093" } ], "files": [ { "diff": "@@ -0,0 +1,75 @@\n+/*\n+ * Licensed to ElasticSearch and Shay Banon under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. ElasticSearch licenses this\n+ * file to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.lucene.index;\n+\n+import org.apache.lucene.store.Directory;\n+import org.apache.lucene.util.Version;\n+\n+import java.io.IOException;\n+import java.lang.reflect.Method;\n+\n+public final class XIndexWriter extends IndexWriter {\n+\n+ private static final Method processEvents;\n+\n+\n+ static {\n+ // fix for https://issues.apache.org/jira/browse/LUCENE-5330\n+ assert Version.LUCENE_45.onOrAfter(org.elasticsearch.Version.CURRENT.luceneVersion) : \"This should be fixed in LUCENE-4.6\";\n+ try {\n+ processEvents = IndexWriter.class.getDeclaredMethod(\"processEvents\", boolean.class, boolean.class);\n+ processEvents.setAccessible(true);\n+ } catch (NoSuchMethodException e) {\n+ throw new RuntimeException(e);\n+ }\n+ }\n+\n+ public XIndexWriter(Directory d, IndexWriterConfig conf) throws IOException {\n+ super(d, conf);\n+ }\n+\n+ private void processEvents() {\n+ try {\n+ processEvents.invoke(this, false, true);\n+ } catch (Exception e) {\n+ throw new RuntimeException(e);\n+ }\n+ }\n+\n+ @Override\n+ public void rollback() throws IOException {\n+ super.rollback();\n+ processEvents();\n+ }\n+\n+ @Override\n+ public void close(boolean waitForMerges) throws IOException {\n+ super.close(waitForMerges);\n+ processEvents();\n+ }\n+\n+ @Override\n+ DirectoryReader getReader(boolean applyAllDeletes) throws IOException {\n+ DirectoryReader reader = super.getReader(applyAllDeletes);\n+ processEvents();\n+ return reader;\n+ }\n+\n+}\n\\ No newline at end of file", "filename": "src/main/java/org/apache/lucene/index/XIndexWriter.java", "status": "added" }, { "diff": "@@ -1361,7 +1361,7 @@ public void warm(AtomicReader reader) throws IOException {\n }\n }\n });\n- return new IndexWriter(store.directory(), config);\n+ return new XIndexWriter(store.directory(), config);\n } catch (LockObtainFailedException ex) {\n boolean isLocked = IndexWriter.isLocked(store.directory());\n logger.warn(\"Could not lock IndexWriter isLocked [{}]\", ex, isLocked);\n@@ -1605,4 +1605,5 @@ synchronized int awaitNoRecoveries(long timeout) throws InterruptedException {\n return ongoingRecoveries;\n }\n }\n+\n }", "filename": "src/main/java/org/elasticsearch/index/engine/robin/RobinEngine.java", "status": "modified" }, { "diff": "@@ -25,11 +25,16 @@\n import org.elasticsearch.action.admin.indices.segments.ShardSegments;\n import org.elasticsearch.common.settings.ImmutableSettings;\n import org.elasticsearch.index.engine.Segment;\n+import org.elasticsearch.index.query.QueryBuilders;\n import org.elasticsearch.test.AbstractIntegrationTest;\n import org.hamcrest.Matchers;\n import org.junit.Test;\n \n import java.util.Collection;\n+import java.util.UUID;\n+\n+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;\n+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;\n \n public class RobinEngineIntegrationTest extends AbstractIntegrationTest {\n \n@@ -75,4 +80,25 @@ private void assertTotalCompoundSegments(int i, int t, String index) {\n assertThat(total, Matchers.equalTo(t));\n \n }\n+ @Test\n+ public void test4093() {\n+ assertAcked(prepareCreate(\"test\").setSettings(ImmutableSettings.settingsBuilder()\n+ .put(\"index.store.type\", \"memory\")\n+ .put(\"index.number_of_shards\", \"1\")\n+ .put(\"index.number_of_replicas\", \"0\")\n+ .put(\"gateway.type\", \"none\")\n+ .put(\"http.enabled\", false)\n+ .put(RobinEngine.INDEX_COMPOUND_ON_FLUSH, randomBoolean())\n+ .put(\"index.warmer.enabled\", false)\n+ .build()).get());\n+ final int iters = between(500, 1000);\n+ for (int i = 0; i < iters; i++) {\n+ client().prepareIndex(\"test\", \"type1\")\n+ .setSource(\"a\", \"\" + i)\n+ .setRefresh(true)\n+ .execute()\n+ .actionGet();\n+ }\n+ assertHitCount(client().prepareCount(\"test\").setQuery(QueryBuilders.matchAllQuery()).get(), iters);\n+ }\n }", "filename": "src/test/java/org/elasticsearch/index/engine/robin/RobinEngineIntegrationTest.java", "status": "modified" } ] }
{ "body": "```\ncurl -XDELETE localhost:9200/test\ncurl -XPUT localhost:9200/test/test/1 -d '{ \"empty\": {}, \"not_empty\": { \"key\": \"value\" } }'\ncurl localhost:9200/test/test/1/_source\n```\n\nReturns `{ \"empty\": {}, \"not_empty\": { \"key\": \"value\" } }` as expected.\n\n```\ncurl -XDELETE localhost:9200/test\ncurl -XPUT localhost:9200/test\ncurl -XPUT localhost:9200/test/test/_mapping -d '{ \"test\": { \"_source\" : { \"excludes\": [ \"ignored\" ] } } }'\ncurl -XPUT localhost:9200/test/test/1 -d '{ \"empty\": {}, \"not_empty\": { \"key\": \"value\" } }'\ncurl localhost:9200/test/test/1/_source\n```\n\nReturns `{\"not_empty\":{\"key\":\"value\"}}` which is not expected.\n", "comments": [], "number": 4047, "title": "Empty objects are not stored in _source when an include/exclude list is present" }
{ "body": "...clude list is present\n\nCloses #4047 \n\nThis was originally being tracked by pull request #4048. I closed that pull request because it was sloppy.\n\nI am uncertain about the behavior for handling includes/excludes with empty objects and wildcards, so I added tests to cover the following cases, but please let me know if you think the results are incorrect...\n\nGiven\n\n```\n{\n \"object1\": { },\n \"object2\": { }\n}\n```\n\nAnd a mapping\n\n```\n{\n ...\n {\n \"_source\" : {\n \"excludes\": \"object1.*,\n \"includes\": \"object2.*\"\n }\n }\n}\n```\n\nResults in a filtered source of\n\n```\n{\n \"object1\": { },\n}\n```\n\nHowever, if given\n\n```\n{\n \"object1\": {\n \"key1\": \"value1\"\n },\n \"object2\": {\n \"key2\": \"value2\"\n }\n}\n```\n\nand the same mapping, the filtered source would be\n\n```\n{\n \"object2\": {\n \"key2\": \"value2\"\n }\n}\n```\n", "number": 4080, "review_comments": [ { "body": "nice!\n", "created_at": "2013-11-25T14:50:41Z" }, { "body": "I think this is problematic with:\n\n```\n{\n \"obj\": { \"f\" : 1 } \n}\n```\n\nand `excludes = [ \"*.f\"]` . This wouldn't match the \"obj.\" path and all of obj would be copied instead of filtered?\n", "created_at": "2013-11-25T14:57:08Z" }, { "body": "I'm not sure about this test. I think it's confusing if \n\n```\n{\n \"obj\" : { \"f\": 1 }\n}\n```\n\nreturns `{}`\n\nbut \n\n```\n{\n \"obj\" : { }\n}\n```\n\nreturns `{ \"obj\": {}}`\n", "created_at": "2013-11-25T15:04:19Z" }, { "body": "Maybe add a test where there is another property on the obj1 level and that it is being kept?\n", "created_at": "2013-11-25T15:06:35Z" }, { "body": "I understand why that can be confusing, but\n\n```\n{\n \"obj\": { \"f\": 1, \"b\": 2 }\n}\n```\n\nshould return `{ \"obj\": { \"b\": 2 } }`.\n\nAt filter time, we don't know the possible fields in the object, so unless all are excluded (`obj.*`) or the filter rejects everything from within the object (`obj.f` on `{ \"obj\": { \"f\": 1 } }`) I think it makes sense to leave the object intact. I am open to discussing this further, I just think that it is better to leave an object behind rather than aggressively remove it.\n", "created_at": "2013-11-25T15:24:25Z" }, { "body": "Done.\n", "created_at": "2013-11-25T15:25:40Z" }, { "body": "Take a look at the new tests I just added.\n", "created_at": "2013-11-25T15:44:09Z" }, { "body": "Honestly, because we don't know what the possible keys are, I think `obj.*` and `obj` are the only two things that should be capable of removing an object.\n\nIf I have these objects\n\n```\n{\n \"obj\": { \"f1\": \"v1\"}\n}\n\n{\n \"obj\": { \"f2\": \"v2\" }\n}\n\n{\n \"obj\": { \"f1\": \"v1\", \"f2\": \"v2\" }\n}\n```\n\nAnd `excludes = [ \"obj.f1\"]` I am left with\n\n```\n{\n}\n\n{\n \"obj\": { \"f2\": \"v2\" }\n}\n\n{\n \"obj\": { \"f2\": \"v2\" }\n}\n```\n\nwhich could be very weird.\n\nThis use case makes more sense if we are talking about some nested object that is indexed...\n\nExample:\n\n```\n{\n \"name\": \"John Doe\",\n \"identifiers\": {\n \"ssn\": \"987-65-4320\",\n \"facebook_uid\": \"12345\"\n }\n```\n\nand `excludes = [ \"*.ssn\"]` would drop the entire `identifiers` object if the only key was `ssn` for that object, even if we want the empty identifiers object to remain under all circumstances.\n", "created_at": "2013-11-25T15:48:11Z" } ], "title": "Fixes #4047 - Empty objects are not stored in _source when an include/ex..." }
{ "commits": [ { "message": "Fixes #4047 - Empty objects are not stored in _source when an include/exclude list is present" }, { "message": "Merge remote-tracking branch 'upstream/master' into issue_4047" }, { "message": "Fixes #4047 - Empty objects are not stored in _source when an include/exclude list is present" }, { "message": "Additional tests for issue #4047" } ], "files": [ { "diff": "@@ -153,14 +153,7 @@ private static void filter(Map<String, Object> map, Map<String, Object> into, St\n }\n sb.append(key);\n String path = sb.toString();\n- boolean excluded = false;\n- for (String exclude : excludes) {\n- if (Regex.simpleMatch(exclude, path)) {\n- excluded = true;\n- break;\n- }\n- }\n- if (excluded) {\n+ if (Regex.simpleMatch(excludes, path)) {\n sb.setLength(mark);\n continue;\n }\n@@ -207,10 +200,14 @@ private static void filter(Map<String, Object> map, Map<String, Object> into, St\n \n if (entry.getValue() instanceof Map) {\n Map<String, Object> innerInto = Maps.newHashMap();\n- // if we had an exact match, we want give deeper excludes their chance\n- filter((Map<String, Object>) entry.getValue(), innerInto, exactIncludeMatch ? Strings.EMPTY_ARRAY : includes, excludes, sb);\n- if (!innerInto.isEmpty()) {\n+ if (exactIncludeMatch && ((Map<String, Object>) entry.getValue()).isEmpty() && !Regex.simpleMatch(excludes, path + '.')) {\n into.put(entry.getKey(), innerInto);\n+ } else {\n+ // if we had an exact match, we want give deeper excludes their chance\n+ filter((Map<String, Object>) entry.getValue(), innerInto, exactIncludeMatch ? Strings.EMPTY_ARRAY : includes, excludes, sb);\n+ if (!innerInto.isEmpty()) {\n+ into.put(entry.getKey(), innerInto);\n+ }\n }\n } else if (entry.getValue() instanceof List) {\n List<Object> list = (List<Object>) entry.getValue();", "filename": "src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java", "status": "modified" }, { "diff": "@@ -368,4 +368,263 @@ public void filterWithEmptyIncludesExcludes() {\n assertThat(filteredMap.get(\"field\").toString(), equalTo(\"value\"));\n \n }\n+\n+ @Test\n+ public void testThatFilterIncludesEmptyObjectWhenUsingExcludes() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{\"nonExistingField\"});\n+\n+ assertThat(mapTuple.v2(), equalTo(filteredSource));\n+ }\n+\n+ @Test\n+ public void testThatFilterOmitsEmptyObjectWithExcludedProperties() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{\"obj.*\"});\n+\n+ assertThat(filteredSource.size(), equalTo(0));\n+ }\n+\n+ @Test\n+ public void testThatFilterIncludesEmptyObjectWithExcludedProperty() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{\"obj.f1\"});\n+\n+ assertThat(mapTuple.v2(), equalTo(filteredSource));\n+ }\n+\n+ @Test\n+ public void testThatFilterOmitsObjectsWithExcludedProperties() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj\")\n+ .field(\"f1\", \"v1\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{\"obj.f1\"});\n+\n+ assertThat(filteredSource.size(), equalTo(0));\n+ }\n+\n+ @Test\n+ public void testThatFilterIncludesObjectsWithSomeExcludedProperties() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj\")\n+ .field(\"f1\", \"v1\")\n+ .field(\"f2\", \"v2\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{\"obj.f1\"});\n+\n+ assertThat(filteredSource.size(), equalTo(1));\n+ assertThat(filteredSource, hasKey(\"obj\"));\n+ assertThat(((Map) filteredSource.get(\"obj\")).size(), equalTo(1));\n+ assertThat(((Map<String, Object>) filteredSource.get(\"obj\")), hasKey(\"f2\"));\n+ }\n+\n+ @Test\n+ public void testThatFilterExcludesObjectWithWildcardPrefix() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj\")\n+ .field(\"f1\", \"v1\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{\"*.f1\"});\n+\n+ assertThat(filteredSource.size(), equalTo(0));\n+ }\n+\n+ @Test\n+ public void testThatFilterExcludesFieldsWithWildcardPrefix() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj\")\n+ .field(\"f1\", \"v1\")\n+ .field(\"f2\", \"v2\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{\"*.f1\"});\n+\n+ assertThat(filteredSource.size(), equalTo(1));\n+ assertThat(filteredSource, hasKey(\"obj\"));\n+ assertThat(((Map) filteredSource.get(\"obj\")).size(), equalTo(1));\n+ assertThat(((Map<String, Object>) filteredSource.get(\"obj\")), hasKey(\"f2\"));\n+ }\n+\n+ @Test\n+ public void testThatFilterIncludesEmptyObjectWhenUsingIncludes() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{\"obj\"}, Strings.EMPTY_ARRAY);\n+\n+ assertThat(mapTuple.v2(), equalTo(filteredSource));\n+ }\n+\n+ @Test\n+ public void testThatFilterOmitsEmptyObjectsWithoutIncludedProperties() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{\"obj.*\"}, Strings.EMPTY_ARRAY);\n+\n+ assertThat(filteredSource.size(), equalTo(0));\n+ }\n+\n+ @Test\n+ public void testThatFilterOmitsObjectsWithoutIncludedProperties() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj\")\n+ .field(\"f1\", \"v1\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{\"obj.f2\"}, Strings.EMPTY_ARRAY);\n+\n+ assertThat(filteredSource.size(), equalTo(0));\n+ }\n+\n+ @Test\n+ public void testThatFilterIncludesObjectsWithSomeIncludedProperties() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj\")\n+ .field(\"f1\", \"v1\")\n+ .field(\"f2\", \"v2\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{\"obj.f2\"}, Strings.EMPTY_ARRAY);\n+\n+ assertThat(filteredSource.size(), equalTo(1));\n+ assertThat(filteredSource, hasKey(\"obj\"));\n+ assertThat(((Map) filteredSource.get(\"obj\")).size(), equalTo(1));\n+ assertThat(((Map<String, Object>) filteredSource.get(\"obj\")), hasKey(\"f2\"));\n+ }\n+\n+ @Test\n+ public void testFilterWithIncludesAndExcludes() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj1\")\n+ .endObject()\n+ .startObject(\"obj2\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{\"obj1.*\"}, new String[]{\"obj2.*\"});\n+\n+ assertThat(filteredSource.size(), equalTo(0));\n+ }\n+\n+ @Test\n+ public void testFilterExcludesWithNestedObjects() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj1\")\n+ .startObject(\"obj2\")\n+ .endObject()\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{\"*.obj2.*\"});\n+\n+ assertThat(filteredSource.size(), equalTo(0));\n+ }\n+\n+ @Test\n+ public void testFilterExcludesNestedObjectAndKeepsParent() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj1\")\n+ .field(\"f1\", \"v1\")\n+ .startObject(\"obj2\")\n+ .endObject()\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{\"*.obj2.*\"});\n+\n+ assertThat(filteredSource.size(), equalTo(1));\n+ assertThat(filteredSource, hasKey(\"obj1\"));\n+ assertThat(((Map) filteredSource.get(\"obj1\")).size(), equalTo(1));\n+ assertThat(((Map<String, Object>) filteredSource.get(\"obj1\")), hasKey(\"f1\"));\n+ }\n+\n+ @Test\n+ public void testFilterOmitsObjectWithNestedExcludedObjects() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj1\")\n+ .startObject(\"obj2\")\n+ .endObject()\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{\"*.obj2\"});\n+\n+ assertThat(filteredSource.size(), equalTo(0));\n+ }\n+\n+ @Test\n+ public void testFilterOmitsObjectWithNestedExcludedObject() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj1\")\n+ .startObject(\"obj2\")\n+ .endObject()\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{\"*.obj2\"});\n+\n+ assertThat(filteredSource.size(), equalTo(0));\n+ }\n+\n+ @Test\n+ public void testFilterIncludesObjectWithNestedIncludedObject() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"obj1\")\n+ .startObject(\"obj2\")\n+ .endObject()\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{\"*.obj2\"}, Strings.EMPTY_ARRAY);\n+\n+ assertThat(filteredSource.size(), equalTo(1));\n+ assertThat(filteredSource, hasKey(\"obj1\"));\n+ assertThat(((Map) filteredSource.get(\"obj1\")).size(), equalTo(1));\n+ assertThat(((Map<String, Object>) filteredSource.get(\"obj1\")), hasKey(\"obj2\"));\n+ assertThat(((Map) ((Map) filteredSource.get(\"obj1\")).get(\"obj2\")).size(), equalTo(0));\n+ }\n }", "filename": "src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java", "status": "modified" } ] }
{ "body": "```\ncurl -XDELETE localhost:9200/test\ncurl -XPUT localhost:9200/test/test/1 -d '{ \"empty\": {}, \"not_empty\": { \"key\": \"value\" } }'\ncurl localhost:9200/test/test/1/_source\n```\n\nReturns `{ \"empty\": {}, \"not_empty\": { \"key\": \"value\" } }` as expected.\n\n```\ncurl -XDELETE localhost:9200/test\ncurl -XPUT localhost:9200/test\ncurl -XPUT localhost:9200/test/test/_mapping -d '{ \"test\": { \"_source\" : { \"excludes\": [ \"ignored\" ] } } }'\ncurl -XPUT localhost:9200/test/test/1 -d '{ \"empty\": {}, \"not_empty\": { \"key\": \"value\" } }'\ncurl localhost:9200/test/test/1/_source\n```\n\nReturns `{\"not_empty\":{\"key\":\"value\"}}` which is not expected.\n", "comments": [], "number": 4047, "title": "Empty objects are not stored in _source when an include/exclude list is present" }
{ "body": "...clude list is present\n\nThis fixes an issue where an empty object would not be stored in the _source field. Fields whose values become empty as the result of filtering will continue to be removed from _source.\n\nCloses #4047\n", "number": 4048, "review_comments": [ { "body": "The innerInto may have been empty but we still don't want to return it (see comment on the discussion page).\n\nPerhaps change line 208 to only try to filter if the object is not empty?\n", "created_at": "2013-11-04T10:34:05Z" } ], "title": "Fixed bug where empty objects are not stored in _source if an include/ex..." }
{ "commits": [ { "message": "Fixed bug where empty objects are not stored in _source if an include/exclude list is present" }, { "message": "Fixed bug where empty objects are not stored in _source if an include/exclude list is present." }, { "message": "Merge remote-tracking branch 'upstream/master'" }, { "message": "Fixed bug where empty objects are not stored in _source if an include/exclude list is present." } ], "files": [ { "diff": "@@ -205,10 +205,10 @@ private static void filter(Map<String, Object> map, Map<String, Object> into, St\n }\n \n \n- if (entry.getValue() instanceof Map) {\n+ if (entry.getValue() instanceof Map && !((Map<String, Object>) entry.getValue()).isEmpty()) {\n Map<String, Object> innerInto = Maps.newHashMap();\n // if we had an exact match, we want give deeper excludes their chance\n- filter((Map<String, Object>) entry.getValue(), innerInto, exactIncludeMatch ? Strings.EMPTY_ARRAY : includes, excludes, sb);\n+ filter(((Map<String, Object>) entry.getValue()), innerInto, exactIncludeMatch ? Strings.EMPTY_ARRAY : includes, excludes, sb);\n if (!innerInto.isEmpty()) {\n into.put(entry.getKey(), innerInto);\n }", "filename": "src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java", "status": "modified" }, { "diff": "@@ -368,4 +368,122 @@ public void filterWithEmptyIncludesExcludes() {\n assertThat(filteredMap.get(\"field\").toString(), equalTo(\"value\"));\n \n }\n+\n+ @Test\n+ public void testThatFilteringWithEmptyObjectAndExclusionWorks() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"emptyObject\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{\"nonExistingField\"});\n+\n+ assertThat(mapTuple.v2(), equalTo(filteredSource));\n+ }\n+\n+ @Test\n+ public void testThatFilterIncludesEmptyObjectsWithoutExcludedProperties() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"someObject\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{\"someObject.*\"});\n+\n+ assertThat(filteredSource.size(), equalTo(1));\n+ assertThat(filteredSource, hasKey(\"someObject\"));\n+ assertThat(((Map) filteredSource.get(\"someObject\")).size(), equalTo(0));\n+ }\n+\n+ @Test\n+ public void testThatFilterOmitsObjectsWithExcludedProperties() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"someObject\")\n+ .field(\"someField\", \"someValue\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{\"someObject.someField\"});\n+\n+ assertThat(filteredSource.size(), equalTo(0));\n+ }\n+\n+ @Test\n+ public void testThatFilterIncludesObjectsWithSomeExcludedProperties() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"someObject\")\n+ .field(\"someField\", \"someValue\")\n+ .field(\"someOtherField\", \"someOtherValue\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{\"someObject.someField\"});\n+\n+ assertThat(filteredSource.size(), equalTo(1));\n+ assertThat(filteredSource, hasKey(\"someObject\"));\n+ assertThat(((Map) filteredSource.get(\"someObject\")).size(), equalTo(1));\n+ assertThat(((Map<String, Object>) filteredSource.get(\"someObject\")), hasKey(\"someOtherField\"));\n+ }\n+\n+ @Test\n+ public void testThatFilteringWithEmptyObjectAndInclusionWorks() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"emptyObject\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{\"emptyObject\"}, Strings.EMPTY_ARRAY);\n+\n+ assertThat(mapTuple.v2(), equalTo(filteredSource));\n+ }\n+\n+ @Test\n+ public void testThatFilterOmitsEmptyObjectsWithoutIncludedProperties() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"someObject\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{\"someObject.*\"}, Strings.EMPTY_ARRAY);\n+\n+ assertThat(filteredSource.size(), equalTo(0));\n+ }\n+\n+ @Test\n+ public void testThatFilterOmitsObjectsWithoutIncludedProperties() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"someObject\")\n+ .field(\"someField\", \"someValue\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{\"someObject.someOtherField\"}, Strings.EMPTY_ARRAY);\n+\n+ assertThat(filteredSource.size(), equalTo(0));\n+ }\n+\n+ @Test\n+ public void testThatFilterIncludesObjectsWithSomeIncludedProperties() throws Exception {\n+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"someObject\")\n+ .field(\"someField\", \"someValue\")\n+ .field(\"someOtherField\", \"someOtherValue\")\n+ .endObject()\n+ .endObject();\n+\n+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);\n+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{\"someObject.someOtherField\"}, Strings.EMPTY_ARRAY);\n+\n+ assertThat(filteredSource.size(), equalTo(1));\n+ assertThat(filteredSource, hasKey(\"someObject\"));\n+ assertThat(((Map) filteredSource.get(\"someObject\")).size(), equalTo(1));\n+ assertThat(((Map<String, Object>) filteredSource.get(\"someObject\")), hasKey(\"someOtherField\"));\n+ }\n }", "filename": "src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java", "status": "modified" } ] }
{ "body": "Although it might not make much sense to percolate a document containing a completion field, that's what you end up doing if you percolate while indexing, and your mapping contains a completion field. When adding the completion field to the memory index a NullPointerException is thrown. \n\nThis happens with the 0.90 branch. Happens also with master, at least percolating an existing document.\n\nHere is the recreation:\n\n```\ncurl -XPUT localhost:9200/hotels -d '\n{\n \"mappings\": {\n \"hotel\" : {\n \"properties\" : {\n \"name\" : { \"type\" : \"string\" },\n \"city\" : { \"type\" : \"string\" },\n \"name_suggest\" : {\n \"type\" : \"completion\"\n }\n }\n }\n }\n}'\n\ncurl -XGET localhost:9200/hotels/hotel/_percolate -d '{\n \"doc\" : {\n \"name\" : \"Mercure Hotel Munich\",\n \"city\" : \"Munich\",\n \"name_suggest\" : \"Mercure Hotel Munich\"\n }\n}\n'\n```\n\nHere is the stacktrace:\n\n```\njava.lang.RuntimeException: java.lang.NullPointerException\n at org.apache.lucene.index.memory.MemoryIndex.addField(MemoryIndex.java:463)\n at org.apache.lucene.index.memory.MemoryIndex.addField(MemoryIndex.java:370)\n at org.elasticsearch.index.percolator.PercolatorExecutor.percolate(PercolatorExecutor.java:450)\n at org.elasticsearch.index.percolator.PercolatorExecutor.percolate(PercolatorExecutor.java:422)\n at org.elasticsearch.index.percolator.PercolatorService.percolate(PercolatorService.java:111)\n at org.elasticsearch.action.percolate.TransportPercolateAction.shardOperation(TransportPercolateAction.java:93)\n at org.elasticsearch.action.percolate.TransportPercolateAction.shardOperation(TransportPercolateAction.java:41)\n at org.elasticsearch.action.support.single.custom.TransportSingleCustomOperationAction$AsyncSingleAction$2.run(TransportSingleCustomOperationAction.java:175)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:724)\nCaused by: java.lang.NullPointerException\n at org.apache.lucene.util.BytesRefHash.add(BytesRefHash.java:274)\n at org.apache.lucene.index.memory.MemoryIndex.addField(MemoryIndex.java:437)\n ... 10 more\n```\n", "comments": [], "number": 4028, "title": "NPE when percolating a document that contains a completion field" }
{ "body": "The CompletionTokenStream doesn't properly forward the call to its attributes, so when the percolator needs to access terms of this stream, null was returned and this isn't expected in the MemoryIndex.\n\nRelates to #4028\n", "number": 4040, "review_comments": [], "title": "Fixed bug that document with a suggest field can't be percolated." }
{ "commits": [ { "message": "Fixed bug that document with a suggest field can't be percolated.\n\nThe CompletionTokenStream doesn't properly forward the call to its attributes, so when the percolator needs to access terms of this stream, null was returned and this isn't expected in the MemoryIndex.\n\nCloses #4028" }, { "message": "Added tests" } ], "files": [ { "diff": "@@ -36,7 +36,7 @@\n */\n public final class CompletionTokenStream extends TokenStream {\n \n- private final PayloadAttribute payloadAttr = addAttribute(PayloadAttribute.class);;\n+ private final PayloadAttribute payloadAttr = addAttribute(PayloadAttribute.class);\n private final PositionIncrementAttribute posAttr = addAttribute(PositionIncrementAttribute.class);\n private final ByteTermAttribute bytesAtt = addAttribute(ByteTermAttribute.class);\n \n@@ -49,6 +49,7 @@ public final class CompletionTokenStream extends TokenStream {\n private final BytesRef scratch = new BytesRef();\n \n public CompletionTokenStream(TokenStream input, BytesRef payload, ToFiniteStrings toFiniteStrings) throws IOException {\n+ super(input);\n this.input = input;\n this.payload = payload;\n this.toFiniteStrings = toFiniteStrings;\n@@ -95,6 +96,7 @@ public void end() throws IOException {\n \n @Override\n public void close() throws IOException {\n+ super.close();\n if (posInc == -1) {\n input.close();\n }\n@@ -115,6 +117,7 @@ public interface ByteTermAttribute extends TermToBytesRefAttribute {\n public void setBytesRef(BytesRef bytes);\n }\n \n+ // Unused class, should this be removed?\n public static final class ByteTermAttributeImpl extends AttributeImpl implements ByteTermAttribute, TermToBytesRefAttribute {\n private BytesRef bytes;\n ", "filename": "src/main/java/org/elasticsearch/search/suggest/completion/CompletionTokenStream.java", "status": "modified" }, { "diff": "@@ -28,14 +28,17 @@\n import org.elasticsearch.action.admin.indices.segments.ShardSegments;\n import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;\n import org.elasticsearch.action.index.IndexRequestBuilder;\n+import org.elasticsearch.action.percolate.PercolateResponse;\n import org.elasticsearch.action.search.SearchPhaseExecutionException;\n import org.elasticsearch.action.suggest.SuggestResponse;\n+import org.elasticsearch.client.Requests;\n import org.elasticsearch.common.settings.ImmutableSettings;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.index.mapper.MapperException;\n import org.elasticsearch.index.mapper.MapperParsingException;\n import org.elasticsearch.index.mapper.core.CompletionFieldMapper;\n+import org.elasticsearch.percolator.PercolatorService;\n import org.elasticsearch.search.sort.FieldSortBuilder;\n import org.elasticsearch.search.suggest.completion.CompletionStats;\n import org.elasticsearch.search.suggest.completion.CompletionSuggestion;\n@@ -56,6 +59,7 @@\n import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;\n import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;\n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;\n import static org.hamcrest.Matchers.*;\n \n@@ -89,6 +93,36 @@ public void testSimple() throws Exception {\n assertSuggestionsNotInOrder(\"t\", \"The Prodigy\", \"Turbonegro\", \"Turbonegro Get it on\", \"The Prodigy Firestarter\");\n }\n \n+ @Test\n+ public void testSuggestFieldWithPercolateApi() throws Exception {\n+ createIndexAndMapping();\n+ String[][] input = {{\"Foo Fighters\"}, {\"Foo Fighters\"}, {\"Foo Fighters\"}, {\"Foo Fighters\"},\n+ {\"Generator\", \"Foo Fighters Generator\"}, {\"Learn to Fly\", \"Foo Fighters Learn to Fly\"},\n+ {\"The Prodigy\"}, {\"The Prodigy\"}, {\"The Prodigy\"}, {\"Firestarter\", \"The Prodigy Firestarter\"},\n+ {\"Turbonegro\"}, {\"Turbonegro\"}, {\"Get it on\", \"Turbonegro Get it on\"}}; // work with frequencies\n+ for (int i = 0; i < input.length; i++) {\n+ client().prepareIndex(INDEX, TYPE, \"\" + i)\n+ .setSource(jsonBuilder()\n+ .startObject().startObject(FIELD)\n+ .startArray(\"input\").value(input[i]).endArray()\n+ .endObject()\n+ .endObject()\n+ )\n+ .execute().actionGet();\n+ }\n+\n+ client().prepareIndex(INDEX, PercolatorService.TYPE_NAME, \"4\")\n+ .setSource(jsonBuilder().startObject().field(\"query\", matchAllQuery()).endObject())\n+ .execute().actionGet();\n+\n+ refresh();\n+\n+ PercolateResponse response = client().preparePercolate().setIndices(INDEX).setDocumentType(TYPE)\n+ .setGetRequest(Requests.getRequest(INDEX).type(TYPE).id(\"1\"))\n+ .execute().actionGet();\n+ assertThat(response.getCount(), equalTo(1l));\n+ }\n+\n @Test\n public void testBasicPrefixSuggestion() throws Exception {\n createIndexAndMapping();", "filename": "src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java", "status": "modified" }, { "diff": "@@ -25,10 +25,7 @@\n import org.apache.lucene.analysis.synonym.SynonymFilter;\n import org.apache.lucene.analysis.synonym.SynonymMap;\n import org.apache.lucene.analysis.synonym.SynonymMap.Builder;\n-import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;\n-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;\n-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;\n-import org.apache.lucene.analysis.tokenattributes.TypeAttribute;\n+import org.apache.lucene.analysis.tokenattributes.*;\n import org.apache.lucene.search.suggest.analyzing.XAnalyzingSuggester;\n import org.apache.lucene.util.BytesRef;\n import org.apache.lucene.util.CharsRef;\n@@ -42,6 +39,8 @@\n import java.io.StringReader;\n import java.util.Set;\n \n+import static org.hamcrest.Matchers.equalTo;\n+\n public class CompletionTokenStreamTest extends ElasticsearchTokenStreamTestCase {\n \n final XAnalyzingSuggester suggester = new XAnalyzingSuggester(new SimpleAnalyzer(TEST_VERSION_CURRENT));\n@@ -115,6 +114,31 @@ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {\n assertEquals(count, maxPos);\n \n }\n+\n+ @Test\n+ public void testSuggestTokenFilterProperlyDelegateInputStream() throws Exception {\n+ TokenStream tokenStream = new MockTokenizer(new StringReader(\"mykeyword\"), MockTokenizer.WHITESPACE, true);\n+ BytesRef payload = new BytesRef(\"Surface keyword|friggin payload|10\");\n+ TokenStream suggestTokenStream = new ByteTermAttrToCharTermAttrFilter(new CompletionTokenStream(tokenStream, payload, new CompletionTokenStream.ToFiniteStrings() {\n+ @Override\n+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {\n+ return suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream);\n+ }\n+ }));\n+ TermToBytesRefAttribute termAtt = suggestTokenStream.getAttribute(TermToBytesRefAttribute.class);\n+ BytesRef ref = termAtt.getBytesRef();\n+ assertNotNull(ref);\n+ suggestTokenStream.reset();\n+\n+ while (suggestTokenStream.incrementToken()) {\n+ termAtt.fillBytesRef();\n+ assertThat(ref.utf8ToString(), equalTo(\"mykeyword\"));\n+ }\n+ suggestTokenStream.end();\n+ suggestTokenStream.close();\n+ }\n+\n+\n \n @Test(expected = IllegalArgumentException.class)\n public void testInValidNumberOfExpansions() throws IOException {", "filename": "src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java", "status": "modified" } ] }
{ "body": "We created an index and tried to \"curl -PUT\" a document, failed after a timeout with:\n\n```\n{\n \"error\": \"NoShardAvailableActionException[[test][0] No shard available for [[test][test_AS24Elasticsearch][1]: routing [null]]]\",\n \"status\": 500\n}\n```\n\nThe reason was this line in the elasticsearch.yml:\n\n```\nindex.search.slowlog.threshold.query.warn: s\n```\n\nWhen reproducing this i found that the elasticsearch.yml has to be broken at startup of the elasticsearch service. \n\nIt produced about 1'000'000 lines in the logfile.\n\nI post this hoping for a more precise error message, an earlier parsing of the elasticsearch.yml or that people running into the same problem will find this post and identify their problem faster.\n\nThe wrong stanza in elasticsearch.yml:\n\n```\nindex.search.slowlog.level: TRACE\nindex.search.slowlog.threshold.query.warn: s\nindex.search.slowlog.threshold.query.info: s\nindex.search.slowlog.threshold.query.debug: s\nindex.search.slowlog.threshold.query.trace: ms\n```\n\n(This kind of mistake is, as you will guess, the result of a beginner meddling with Puppet)\n\nIn the logfile - only the first 82 lines:\n\n```\n[2013-03-05 09:35:10,805][WARN ][indices.cluster ] [dexxxv001] [sunytest][0] failed to create shard\norg.elasticsearch.index.shard.IndexShardCreationException: [sunytest][0] failed to create shard\n at org.elasticsearch.index.service.InternalIndexService.createShard(InternalIndexService.java:323)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyInitializingShard(IndicesClusterStateService.java:561)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.applyNewOrUpdatedShards(IndicesClusterStateService.java:526)\n at org.elasticsearch.indices.cluster.IndicesClusterStateService.clusterChanged(IndicesClusterStateService.java:171)\n at org.elasticsearch.cluster.service.InternalClusterService$2.run(InternalClusterService.java:315)\n at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)\n at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)\n at java.lang.Thread.run(Thread.java:722)\nCaused by: org.elasticsearch.ElasticSearchParseException: Failed to parse [s]\n at org.elasticsearch.common.unit.TimeValue.parseTimeValue(TimeValue.java:253)\n at org.elasticsearch.common.settings.ImmutableSettings.getAsTime(ImmutableSettings.java:191)\n at org.elasticsearch.index.search.slowlog.ShardSlowLogSearchService.<init>(ShardSlowLogSearchService.java:132)\n at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)\n at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)\n at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)\n at java.lang.reflect.Constructor.newInstance(Constructor.java:525)\n at org.elasticsearch.common.inject.DefaultConstructionProxyFactory$1.newInstance(DefaultConstructionProxyFactory.java:54)\n at org.elasticsearch.common.inject.ConstructorInjector.construct(ConstructorInjector.java:86)\n at org.elasticsearch.common.inject.ConstructorBindingImpl$Factory.get(ConstructorBindingImpl.java:98)\n at org.elasticsearch.common.inject.ProviderToInternalFactoryAdapter$1.call(ProviderToInternalFactoryAdapter.java:45)\n at org.elasticsearch.common.inject.InjectorImpl.callInContext(InjectorImpl.java:819)\n at org.elasticsearch.common.inject.ProviderToInternalFactoryAdapter.get(ProviderToInternalFactoryAdapter.java:42)\n at org.elasticsearch.common.inject.Scopes$1$1.get(Scopes.java:56)\n at org.elasticsearch.common.inject.InternalFactoryToProviderAdapter.get(InternalFactoryToProviderAdapter.java:45)\n at org.elasticsearch.common.inject.SingleParameterInjector.inject(SingleParameterInjector.java:42)\n at org.elasticsearch.common.inject.SingleParameterInjector.getAll(SingleParameterInjector.java:66)\n at org.elasticsearch.common.inject.ConstructorInjector.construct(ConstructorInjector.java:85)\n at org.elasticsearch.common.inject.ConstructorBindingImpl$Factory.get(ConstructorBindingImpl.java:98)\n at org.elasticsearch.common.inject.ProviderToInternalFactoryAdapter$1.call(ProviderToInternalFactoryAdapter.java:45)\n at org.elasticsearch.common.inject.InjectorImpl.callInContext(InjectorImpl.java:819)\n at org.elasticsearch.common.inject.ProviderToInternalFactoryAdapter.get(ProviderToInternalFactoryAdapter.java:42)\n at org.elasticsearch.common.inject.Scopes$1$1.get(Scopes.java:56)\n at org.elasticsearch.common.inject.InternalFactoryToProviderAdapter.get(InternalFactoryToProviderAdapter.java:45)\n at org.elasticsearch.common.inject.SingleParameterInjector.inject(SingleParameterInjector.java:42)\n at org.elasticsearch.common.inject.SingleParameterInjector.getAll(SingleParameterInjector.java:66)\n at org.elasticsearch.common.inject.ConstructorInjector.construct(ConstructorInjector.java:85)\n at org.elasticsearch.common.inject.ConstructorBindingImpl$Factory.get(ConstructorBindingImpl.java:98)\n at org.elasticsearch.common.inject.FactoryProxy.get(FactoryProxy.java:52)\n at org.elasticsearch.common.inject.ProviderToInternalFactoryAdapter$1.call(ProviderToInternalFactoryAdapter.java:45)\n at org.elasticsearch.common.inject.InjectorImpl.callInContext(InjectorImpl.java:819)\n at org.elasticsearch.common.inject.ProviderToInternalFactoryAdapter.get(ProviderToInternalFactoryAdapter.java:42)\n at org.elasticsearch.common.inject.Scopes$1$1.get(Scopes.java:56)\n at org.elasticsearch.common.inject.InternalFactoryToProviderAdapter.get(InternalFactoryToProviderAdapter.java:45)\n at org.elasticsearch.common.inject.InjectorBuilder$1.call(InjectorBuilder.java:200)\n at org.elasticsearch.common.inject.InjectorBuilder$1.call(InjectorBuilder.java:193)\n at org.elasticsearch.common.inject.InjectorImpl.callInContext(InjectorImpl.java:812)\n at org.elasticsearch.common.inject.InjectorBuilder.loadEagerSingletons(InjectorBuilder.java:193)\n at org.elasticsearch.common.inject.InjectorBuilder.injectDynamically(InjectorBuilder.java:175)\n at org.elasticsearch.common.inject.InjectorBuilder.build(InjectorBuilder.java:110)\n at org.elasticsearch.common.inject.InjectorImpl.createChildInjector(InjectorImpl.java:129)\n at org.elasticsearch.common.inject.ModulesBuilder.createChildInjector(ModulesBuilder.java:66)\n at org.elasticsearch.index.service.InternalIndexService.createShard(InternalIndexService.java:321)\n ... 7 more\nCaused by: java.lang.NumberFormatException: empty String\n at sun.misc.FloatingDecimal.readJavaFormatString(FloatingDecimal.java:1011)\n at java.lang.Double.parseDouble(Double.java:540)\n```\n", "comments": [ { "body": "We don't validate it since its used during actual shard creation. When will happen is that an index will be created, but when the shard is actually allocated, it will fail to be allocated (so the index is in red state).\n", "created_at": "2013-03-05T19:47:06Z" }, { "body": "reverted, as we do not want to create an exception to the rule for parsing time based settings. need to come up with something more consistent here\n", "created_at": "2013-11-19T15:17:23Z" }, { "body": "Possibly this can be handled by the proper settings framework planned in #6732?\n", "created_at": "2014-11-29T14:12:15Z" }, { "body": "Closing as a duplicate of #2997\n", "created_at": "2015-09-19T17:36:36Z" } ], "number": 2730, "title": "NoShardAvailableActionException caused by missing value in elasticsearch.yml" }
{ "body": "In case of a misconfigured slow search/index configuration (unparseable\nTimeValue) an exception is thrown.\n\nThis is not a problem when creating a shard of an index, as an exception\nis returned and all is good. However, this is a huge problem, when\nstarting up a node, as the shard creation is repeated endlessly.\n\nThis patch changes the behaviour to go on as usual and just disable the\nslowlog, as an improper configuration of logging should not affect the\nallocation behaviour.\n\nCloses #2730\n", "number": 4018, "review_comments": [], "title": "Ignore slow log configuration on shard creation" }
{ "commits": [ { "message": "Ignore slow log configuration on shard creation\n\nIn case of a misconfigured slow search/index configuration (unparseable\nTimeValue) an exception is thrown.\n\nThis is not a problem when creating a shard of an index, as an exception\nis returned and all is good. However, this is a huge problem, when\nstarting up a node, as the shard creation is repeated endlessly.\n\nThis patch changes the behaviour to go on as usual and just disable the\nslowlog, as an improper configuration of logging should not affect the\nallocation behaviour.\n\nCloses #2730" } ], "files": [ { "diff": "@@ -19,6 +19,7 @@\n \n package org.elasticsearch.index.search.slowlog;\n \n+import org.elasticsearch.ElasticSearchParseException;\n import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.logging.ESLogger;\n@@ -125,15 +126,15 @@ public ShardSlowLogSearchService(ShardId shardId, @IndexSettings Settings indexS\n \n this.reformat = componentSettings.getAsBoolean(\"reformat\", true);\n \n- this.queryWarnThreshold = componentSettings.getAsTime(\"threshold.query.warn\", TimeValue.timeValueNanos(-1)).nanos();\n- this.queryInfoThreshold = componentSettings.getAsTime(\"threshold.query.info\", TimeValue.timeValueNanos(-1)).nanos();\n- this.queryDebugThreshold = componentSettings.getAsTime(\"threshold.query.debug\", TimeValue.timeValueNanos(-1)).nanos();\n- this.queryTraceThreshold = componentSettings.getAsTime(\"threshold.query.trace\", TimeValue.timeValueNanos(-1)).nanos();\n+ this.queryWarnThreshold = parseTimeSetting(\"threshold.query.warn\", -1);\n+ this.queryInfoThreshold = parseTimeSetting(\"threshold.query.info\", -1);\n+ this.queryDebugThreshold = parseTimeSetting(\"threshold.query.debug\", -1);\n+ this.queryTraceThreshold = parseTimeSetting(\"threshold.query.trace\", -1);\n \n- this.fetchWarnThreshold = componentSettings.getAsTime(\"threshold.fetch.warn\", TimeValue.timeValueNanos(-1)).nanos();\n- this.fetchInfoThreshold = componentSettings.getAsTime(\"threshold.fetch.info\", TimeValue.timeValueNanos(-1)).nanos();\n- this.fetchDebugThreshold = componentSettings.getAsTime(\"threshold.fetch.debug\", TimeValue.timeValueNanos(-1)).nanos();\n- this.fetchTraceThreshold = componentSettings.getAsTime(\"threshold.fetch.trace\", TimeValue.timeValueNanos(-1)).nanos();\n+ this.fetchWarnThreshold = parseTimeSetting(\"threshold.fetch.warn\", -1);\n+ this.fetchInfoThreshold = parseTimeSetting(\"threshold.fetch.info\", -1);\n+ this.fetchDebugThreshold = parseTimeSetting(\"threshold.fetch.debug\", -1);\n+ this.fetchTraceThreshold = parseTimeSetting(\"threshold.fetch.trace\", -1);\n \n this.level = componentSettings.get(\"level\", \"TRACE\").toUpperCase(Locale.ROOT);\n \n@@ -146,6 +147,15 @@ public ShardSlowLogSearchService(ShardId shardId, @IndexSettings Settings indexS\n indexSettingsService.addListener(new ApplySettings());\n }\n \n+ private long parseTimeSetting(String name, long defaultNanos) {\n+ try {\n+ return componentSettings.getAsTime(name, TimeValue.timeValueNanos(defaultNanos)).nanos();\n+ } catch (ElasticSearchParseException e) {\n+ logger.error(\"Could not parse setting for [{}], disabling\", name);\n+ return -1;\n+ }\n+ }\n+\n public void onQueryPhase(SearchContext context, long tookInNanos) {\n if (queryWarnThreshold >= 0 && tookInNanos > queryWarnThreshold) {\n queryLogger.warn(\"{}\", new SlowLogSearchContextPrinter(context, tookInNanos, reformat));", "filename": "src/main/java/org/elasticsearch/index/search/slowlog/ShardSlowLogSearchService.java", "status": "modified" }, { "diff": "@@ -0,0 +1,70 @@\n+/*\n+ * Licensed to ElasticSearch and Shay Banon under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. ElasticSearch licenses this\n+ * file to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.elasticsearch.index.search.slowlog;\n+\n+import org.elasticsearch.common.settings.ImmutableSettings;\n+import org.elasticsearch.common.settings.Settings;\n+import org.elasticsearch.index.Index;\n+import org.elasticsearch.index.settings.IndexSettingsService;\n+import org.elasticsearch.index.shard.ShardId;\n+import org.elasticsearch.test.ElasticsearchTestCase;\n+import org.junit.Test;\n+\n+import static org.hamcrest.Matchers.is;\n+\n+/**\n+ *\n+ */\n+public class ShardSlowLogSearchServiceTests extends ElasticsearchTestCase {\n+\n+ private Index index = new Index(\"test\");\n+ private ShardId shardId = new ShardId(index, 0);\n+\n+\n+ @Test\n+ public void creatingShardSlowLogSearchServiceWithBrokenSettingsShouldWork() throws Exception {\n+ Settings brokenIndexSettings = ImmutableSettings.builder()\n+ .put(\"index.search.slowlog.threshold.query.warn\", \"s\")\n+ .build();\n+\n+ IndexSettingsService indexSettingsService = new IndexSettingsService(shardId.index(), brokenIndexSettings);\n+ new ShardSlowLogSearchService(shardId, brokenIndexSettings, indexSettingsService);\n+ }\n+\n+ @Test\n+ public void updatingViaListenerWithBrokenSettingsLeavesSettingsAsIs() throws Exception {\n+ Settings indexSettings = ImmutableSettings.builder()\n+ .put(\"index.search.slowlog.threshold.query.warn\", \"1s\")\n+ .build();\n+\n+ IndexSettingsService indexSettingsService = new IndexSettingsService(shardId.index(), indexSettings);\n+ ShardSlowLogSearchService shardSlowLogSearchService = new ShardSlowLogSearchService(shardId, indexSettings, indexSettingsService);\n+\n+ Settings updatedSettings = ImmutableSettings.builder()\n+ .put(\"index.search.slowlog.threshold.query.warn\", \"s\")\n+ .build();\n+ indexSettingsService.refreshSettings(updatedSettings);\n+\n+ // this is still the time from the indexSettings above, but was not overriden from the settings update\n+ // this basically ensures that the parsing exception was caught in the refreshSettings() methods\n+ String configuredTime = shardSlowLogSearchService.indexSettings().get(\"index.search.slowlog.threshold.query.warn\");\n+ assertThat(configuredTime, is(\"1s\"));\n+ }\n+\n+}", "filename": "src/test/java/org/elasticsearch/index/search/slowlog/ShardSlowLogSearchServiceTests.java", "status": "added" } ] }
{ "body": "When a node completes recovering a shard, it changes the shard status to `POST_RECOVERY` and sends a shard started message to the master. The master processes it and send a new cluster state which cause the shard to be moved `STARTED`.\n\nIf the master dies before processing that message, the message needs to be resent to the new master. We already have a mechanism in place for that but it needs to be extended to cover the `POST_RECOVERY` state.\n", "comments": [], "number": 4009, "title": "Not resending shard started messages when shard state is POST_RECOVERY and master died before processing the previous one" }
{ "body": "Closes #4009\n", "number": 4011, "review_comments": [], "title": "Also resend shard started message if shard state is in POST_RECOVERY and master thinks it's initializing" }
{ "commits": [ { "message": "Also resend shard started message if shard state is in POST_RECOVERY and master thinks it's initializing.\n\nCloses #4009" } ], "files": [ { "diff": "@@ -145,8 +145,9 @@ protected void doClose() throws ElasticSearchException {\n \n @Override\n public void clusterChanged(final ClusterChangedEvent event) {\n- if (!indicesService.changesAllowed())\n+ if (!indicesService.changesAllowed()) {\n return;\n+ }\n \n if (!lifecycle.started()) {\n return;\n@@ -505,8 +506,9 @@ private void processAliases(String index, Collection<AliasMetaData> aliases, Ind\n }\n \n private void applyNewOrUpdatedShards(final ClusterChangedEvent event) throws ElasticSearchException {\n- if (!indicesService.changesAllowed())\n+ if (!indicesService.changesAllowed()) {\n return;\n+ }\n \n RoutingTable routingTable = event.state().routingTable();\n RoutingNode routingNodes = event.state().readOnlyRoutingNodes().nodesToShards().get(event.state().nodes().localNodeId());\n@@ -618,14 +620,15 @@ private void applyInitializingShard(final RoutingTable routingTable, final Disco\n \n if (indexService.hasShard(shardId)) {\n IndexShard indexShard = indexService.shardSafe(shardId);\n- if (indexShard.state() == IndexShardState.STARTED) {\n- // the master thinks we are initializing, but we are already started\n- // (either master failover, or a cluster event before we managed to tell the master we started), mark us as started\n+ if (indexShard.state() == IndexShardState.STARTED || indexShard.state() == IndexShardState.POST_RECOVERY) {\n+ // the master thinks we are initializing, but we are already started or on POST_RECOVERY and waiting\n+ // for master to confirm a shard started message (either master failover, or a cluster event before\n+ // we managed to tell the master we started), mark us as started\n if (logger.isTraceEnabled()) {\n- logger.trace(\"[{}][{}] master [{}] marked shard as initializing, but shard already created, mark shard as started\");\n+ logger.trace(\"[{}][{}] master [{}] marked shard as initializing, but shard has state [{}], mark shard as started\", indexShard.state());\n }\n shardStateAction.shardStarted(shardRouting, indexMetaData.getUUID(),\n- \"master \" + nodes.masterNode() + \" marked shard as initializing, but shard already started, mark shard as started\");\n+ \"master \" + nodes.masterNode() + \" marked shard as initializing, but shard state is [\" + indexShard.state() + \"], mark shard as started\");\n return;\n } else {\n if (indexShard.ignoreRecoveryAttempt()) {", "filename": "src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java", "status": "modified" } ] }
{ "body": "To reproduce, run:\n\n```\n curl -XPOST \"http://localhost:9200/_mtermvectors\" -d'\n {\n \"docs\": [\n {\n \"_index\": \"indexX\",\n \"_type\": \"testX\",\n \"_id\": \"1\"\n }]\n }'\n```\n\n(given you do not have an index called \"indexX\")\n", "comments": [], "number": 3989, "title": "Multi term vector request never returns if the shards for all requested documents are non existent" }
{ "body": "Multi term vector request never returned if shards for all requested documents\nwere non existent.\n\ncloses #3989\n", "number": 3990, "review_comments": [], "title": "Fix issue 3989" }
{ "commits": [ { "message": "Fix issue 3989\n\nMulti term vector request never returned if shards for all requested documents\nwere non existent.\n\ncloses #3989" } ], "files": [ { "diff": "@@ -85,9 +85,13 @@ protected void doExecute(final MultiTermVectorsRequest request, final ActionList\n }\n shardRequest.add(i, termVectorRequest);\n }\n-\n+ \n+ if (shardRequests.size() == 0) {\n+ // only failures..\n+ listener.onResponse(new MultiTermVectorsResponse(responses.toArray(new MultiTermVectorsItemResponse[responses.length()])));\n+ }\n+ \n final AtomicInteger counter = new AtomicInteger(shardRequests.size());\n-\n for (final MultiTermVectorsShardRequest shardRequest : shardRequests.values()) {\n shardAction.execute(shardRequest, new ActionListener<MultiTermVectorsShardResponse>() {\n @Override", "filename": "src/main/java/org/elasticsearch/action/termvector/TransportMultiTermVectorsAction.java", "status": "modified" }, { "diff": "@@ -23,8 +23,11 @@\n import org.apache.lucene.index.Fields;\n import org.elasticsearch.action.termvector.MultiTermVectorsItemResponse;\n import org.elasticsearch.action.termvector.MultiTermVectorsRequestBuilder;\n+import org.elasticsearch.action.termvector.MultiTermVectorsResponse;\n+import org.elasticsearch.action.termvector.TermVectorRequestBuilder;\n import org.junit.Test;\n \n+import static org.hamcrest.Matchers.equalTo;\n public class MultiTermVectorsTests extends AbstractTermVectorTests {\n \n @Test\n@@ -61,4 +64,12 @@ public void testDuelESLucene() throws Exception {\n }\n \n }\n+ public void testMissingIndexThrowsMissingIndex() throws Exception {\n+ TermVectorRequestBuilder requestBuilder = client().prepareTermVector(\"testX\", \"typeX\", Integer.toString(1));\n+ MultiTermVectorsRequestBuilder mtvBuilder = new MultiTermVectorsRequestBuilder(client());\n+ mtvBuilder.add(requestBuilder.request());\n+ MultiTermVectorsResponse response = mtvBuilder.execute().actionGet();\n+ assertThat(response.getResponses().length, equalTo(1));\n+ assertThat(response.getResponses()[0].getFailure().getMessage(), equalTo(\"[\" + response.getResponses()[0].getIndex() + \"] missing\"));\n+ }\n }", "filename": "src/test/java/org/elasticsearch/termvectors/MultiTermVectorsTests.java", "status": "modified" } ] }
{ "body": "If a cluster has three indices `foo`, `bar` and `baz`, the index list `b*,+foo` in search request is resolved into all three indices, while `+foo,b*` throws `IndexMissingException[[+foo] missing]`\n\nRepro:\n\n```\ncurl -XPUT \"http://localhost:9200/foo/doc/1?pretty\" -d '{\"f\": \"v\"}'\ncurl -XPUT \"http://localhost:9200/bar/doc/1?pretty\" -d '{\"f\": \"v\"}'\ncurl -XPUT \"http://localhost:9200/baz/doc/1?pretty\" -d '{\"f\": \"v\"}'\ncurl -XPOST \"http://localhost:9200/_refresh?pretty\"\necho \"Searching indices b*,+foo - works\"\ncurl \"http://localhost:9200/b*,%2bfoo/_search?pretty\"\necho \"Searching indices +foo,b* - doesn't work\"\ncurl \"http://localhost:9200/%2bfoo,b*/_search?pretty\"\n```\n", "comments": [], "number": 3979, "title": "The +index pattern without a wildcard in the index list is handled inconsistently " }
{ "body": "...sistently\n\nFixes #3979\n", "number": 3980, "review_comments": [], "title": "The +index pattern without a wildcard in the index list is handled incon..." }
{ "commits": [ { "message": "The +index pattern without a wildcard in the index list is handled inconsistently\n\nFixes #3979" } ], "files": [ { "diff": "@@ -671,6 +671,10 @@ public String[] convertFromWildcards(String[] aliasesOrIndices, boolean wildcard\n }\n boolean add = true;\n if (aliasOrIndex.charAt(0) == '+') {\n+ // if its the first, add empty result set\n+ if (i == 0) {\n+ result = new HashSet<String>();\n+ }\n add = true;\n aliasOrIndex = aliasOrIndex.substring(1);\n } else if (aliasOrIndex.charAt(0) == '-') {", "filename": "src/main/java/org/elasticsearch/cluster/metadata/MetaData.java", "status": "modified" }, { "diff": "@@ -27,7 +27,6 @@\n import org.junit.Test;\n \n import static com.google.common.collect.Sets.newHashSet;\n-import static org.hamcrest.MatcherAssert.assertThat;\n import static org.hamcrest.Matchers.equalTo;\n \n /**\n@@ -61,46 +60,48 @@ public void convertWildcardsTests() {\n assertThat(newHashSet(md.convertFromWildcards(new String[]{\"testYY*\", \"alias*\"}, true, IgnoreIndices.MISSING)), equalTo(newHashSet(\"alias1\", \"alias2\", \"alias3\", \"testYYY\")));\n assertThat(newHashSet(md.convertFromWildcards(new String[]{\"-kuku\"}, true, IgnoreIndices.MISSING)), equalTo(newHashSet(\"testXXX\", \"testXYY\", \"testYYY\")));\n assertThat(newHashSet(md.convertFromWildcards(new String[]{\"+test*\", \"-testYYY\"}, true, IgnoreIndices.MISSING)), equalTo(newHashSet(\"testXXX\", \"testXYY\")));\n+ assertThat(newHashSet(md.convertFromWildcards(new String[]{\"+testX*\", \"+testYYY\"}, true, IgnoreIndices.MISSING)), equalTo(newHashSet(\"testXXX\", \"testXYY\", \"testYYY\")));\n+ assertThat(newHashSet(md.convertFromWildcards(new String[]{\"+testYYY\", \"+testX*\"}, true, IgnoreIndices.MISSING)), equalTo(newHashSet(\"testXXX\", \"testXYY\", \"testYYY\")));\n }\n \n private IndexMetaData.Builder indexBuilder(String index) {\n return IndexMetaData.builder(index).settings(ImmutableSettings.settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0));\n }\n- \n+\n @Test(expected = IndexMissingException.class)\n public void concreteIndicesIgnoreIndicesOneMissingIndex() {\n- \t MetaData.Builder mdBuilder = MetaData.builder()\n- .put(indexBuilder(\"testXXX\"))\n- .put(indexBuilder(\"kuku\"));\n- MetaData md = mdBuilder.build();\n- md.concreteIndices(new String[]{\"testZZZ\"}, IgnoreIndices.MISSING, true);\n+ MetaData.Builder mdBuilder = MetaData.builder()\n+ .put(indexBuilder(\"testXXX\"))\n+ .put(indexBuilder(\"kuku\"));\n+ MetaData md = mdBuilder.build();\n+ md.concreteIndices(new String[]{\"testZZZ\"}, IgnoreIndices.MISSING, true);\n }\n- \n+\n @Test\n public void concreteIndicesIgnoreIndicesOneMissingIndexOtherFound() {\n- \t MetaData.Builder mdBuilder = MetaData.builder()\n- .put(indexBuilder(\"testXXX\"))\n- .put(indexBuilder(\"kuku\"));\n- MetaData md = mdBuilder.build();\n- assertThat(newHashSet(md.concreteIndices(new String[]{\"testXXX\",\"testZZZ\"}, IgnoreIndices.MISSING, true)), equalTo(newHashSet(\"testXXX\")));\n+ MetaData.Builder mdBuilder = MetaData.builder()\n+ .put(indexBuilder(\"testXXX\"))\n+ .put(indexBuilder(\"kuku\"));\n+ MetaData md = mdBuilder.build();\n+ assertThat(newHashSet(md.concreteIndices(new String[]{\"testXXX\", \"testZZZ\"}, IgnoreIndices.MISSING, true)), equalTo(newHashSet(\"testXXX\")));\n }\n \n @Test(expected = IndexMissingException.class)\n public void concreteIndicesIgnoreIndicesAllMissing() {\n- \t MetaData.Builder mdBuilder = MetaData.builder()\n- .put(indexBuilder(\"testXXX\"))\n- .put(indexBuilder(\"kuku\"));\n- MetaData md = mdBuilder.build();\n- assertThat(newHashSet(md.concreteIndices(new String[]{\"testMo\",\"testMahdy\"}, IgnoreIndices.MISSING, true)), equalTo(newHashSet(\"testXXX\")));\n+ MetaData.Builder mdBuilder = MetaData.builder()\n+ .put(indexBuilder(\"testXXX\"))\n+ .put(indexBuilder(\"kuku\"));\n+ MetaData md = mdBuilder.build();\n+ assertThat(newHashSet(md.concreteIndices(new String[]{\"testMo\", \"testMahdy\"}, IgnoreIndices.MISSING, true)), equalTo(newHashSet(\"testXXX\")));\n }\n- \n+\n @Test\n public void concreteIndicesIgnoreIndicesEmptyRequest() {\n- \t MetaData.Builder mdBuilder = MetaData.builder()\n- .put(indexBuilder(\"testXXX\"))\n- .put(indexBuilder(\"kuku\"));\n- MetaData md = mdBuilder.build();\n- assertThat(newHashSet(md.concreteIndices(new String[]{}, IgnoreIndices.MISSING, true)), equalTo(Sets.<String>newHashSet(\"kuku\",\"testXXX\")));\n+ MetaData.Builder mdBuilder = MetaData.builder()\n+ .put(indexBuilder(\"testXXX\"))\n+ .put(indexBuilder(\"kuku\"));\n+ MetaData md = mdBuilder.build();\n+ assertThat(newHashSet(md.concreteIndices(new String[]{}, IgnoreIndices.MISSING, true)), equalTo(Sets.<String>newHashSet(\"kuku\", \"testXXX\")));\n }\n \n @Test", "filename": "src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java", "status": "modified" } ] }
{ "body": "A lenient multi_match query with a boosted field with type mismatch crashes. Simple example:\n\n`curl -XPUT http://localhost:9200/blog/post/1?pretty=1 -d '{\"foo\":123, \"bar\":\"xyzzy\"}'`\n`curl -XGET http://localhost:9200/blog/post/_count?pretty=1 -d '{\"multi_match\": {\"fields\": [\"foo^2\", \"bar\"], \"lenient\": true, \"query\": \"xyzzy\"}}' # crashes with NullPointerException`\n\nInterestingly, it works for internal _id field:\n\n`curl -XGET http://localhost:9200/blog/post/_count?pretty=1 -d '{\"multi_match\": {\"fields\": [\"_id^2\", \"bar\"], \"lenient\": true, \"query\": \"xyzzy\"}}' # works`\n\nAnd it doesn't crash when there's no type mismatch:\n\n`curl -XGET http://localhost:9200/blog/post/_count?pretty=1 -d '{\"multi_match\": {\"fields\": [\"foo^2\", \"bar\"], \"lenient\": true, \"query\": \"123\"}}' # works`\n\nOther queries for reference:\n\n`curl -XGET http://localhost:9200/blog/post/_count?pretty=1 -d '{\"multi_match\": {\"fields\": [\"foo\", \"bar\"], \"lenient\": true, \"query\": \"xyzzy\"}}' # works`\n`curl -XGET http://localhost:9200/blog/post/_count?pretty=1 -d '{\"multi_match\": {\"fields\": [\"foo\", \"bar\"], \"query\": \"xyzzy\"}}' # crashes with NumberFormatException - expected`\n", "comments": [ { "body": "More information about error:\n\nHappen the error in class LongFieldMapper.java in the method parsevalue(). The cause of error is a NumberFormatExpcetion, becuase the value(xyzzy) arrive in the method is String.\n\njava.lang.NumberFormatException: For input string: \"xyzzy\"\n", "created_at": "2013-09-27T23:21:51Z" } ], "number": 3797, "title": "multi_match lenient query with boosted field crashes with NullPointerException" }
{ "body": "I changed the class MultiMatchQuery for support the issue #3797\n", "number": 3968, "review_comments": [], "title": "Resolved issue #3797" }
{ "commits": [ { "message": "Issue resolved #3797\n\nI changed the class MultiMatchQuery for support the issue #3797" } ], "files": [ { "diff": "@@ -46,34 +46,41 @@ public MultiMatchQuery(QueryParseContext parseContext) {\n super(parseContext);\n }\n \n- private Query parseAndApply(Type type, String fieldName, Object value, String minimumShouldMatch) throws IOException {\n- Query query = parse(type, fieldName, value);\n+ private Query parseAndApply(Type type, String fieldName, Object value, String minimumShouldMatch, Float boostValue) throws IOException {\n+ Query query;\n+ \n+ try {\n+ query = parse(type, fieldName, value);\n+ } catch (NumberFormatException e) {\n+ query = null;\n+ }\n+ \n if (query instanceof BooleanQuery) {\n Queries.applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch);\n }\n+ \n+ if (boostValue != null && query != null) {\n+ query.setBoost(boostValue);\n+ }\n+ \n return query;\n }\n \n public Query parse(Type type, Map<String, Float> fieldNames, Object value, String minimumShouldMatch) throws IOException {\n if (fieldNames.size() == 1) {\n Map.Entry<String, Float> fieldBoost = fieldNames.entrySet().iterator().next();\n Float boostValue = fieldBoost.getValue();\n- final Query query = parseAndApply(type, fieldBoost.getKey(), value, minimumShouldMatch);\n- if (boostValue != null) {\n- query.setBoost(boostValue);\n- }\n+ final Query query = parseAndApply(type, fieldBoost.getKey(), value, minimumShouldMatch, boostValue);\n+\n return query;\n }\n \n if (useDisMax) {\n DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(tieBreaker);\n boolean clauseAdded = false;\n for (String fieldName : fieldNames.keySet()) {\n- Query query = parseAndApply(type, fieldName, value, minimumShouldMatch);\n Float boostValue = fieldNames.get(fieldName);\n- if (boostValue != null) {\n- query.setBoost(boostValue);\n- }\n+ Query query = parseAndApply(type, fieldName, value, minimumShouldMatch, boostValue);\n \n if (query != null) {\n clauseAdded = true;\n@@ -84,11 +91,9 @@ public Query parse(Type type, Map<String, Float> fieldNames, Object value, Strin\n } else {\n BooleanQuery booleanQuery = new BooleanQuery();\n for (String fieldName : fieldNames.keySet()) {\n- Query query = parseAndApply(type, fieldName, value, minimumShouldMatch);\n Float boostValue = fieldNames.get(fieldName);\n- if (boostValue != null) {\n- query.setBoost(boostValue);\n- }\n+ Query query = parseAndApply(type, fieldName, value, minimumShouldMatch, boostValue);\n+ \n if (query != null) {\n booleanQuery.add(query, BooleanClause.Occur.SHOULD);\n }", "filename": "src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java", "status": "modified" }, { "diff": "@@ -60,7 +60,38 @@\n */\n public class SimpleQueryTests extends AbstractIntegrationTest {\n \n+ @Test // see https://github.com/elasticsearch/elasticsearch/issues/3797\n+ public void testMultiMatchLenientIssue3797() {\n+ createIndex(\"test\");\n+ ensureGreen();\n+ client().prepareIndex(\"test\", \"type1\", \"1\").setSource(\"field1\", 123, \"field2\", \"value2\").get();\n+ refresh();\n \n+ SearchResponse searchResponse = client().prepareSearch(\"test\")\n+ .setQuery(QueryBuilders.multiMatchQuery(\"value2\", \"field1^2\", \"field2\").lenient(true).useDisMax(false)).get();\n+ assertHitCount(searchResponse, 1l);\n+\n+ searchResponse = client().prepareSearch(\"test\")\n+ .setQuery(QueryBuilders.multiMatchQuery(\"value2\", \"field1^2\", \"field2\").lenient(true).useDisMax(true)).get();\n+ assertHitCount(searchResponse, 1l);\n+ \n+ \n+ searchResponse = client().prepareSearch().setQuery(QueryBuilders.multiMatchQuery(\"value2\", \"field1^2\", \"field2\").lenient(true)).execute().actionGet();\n+ assertHitCount(searchResponse, 1l);\n+ \n+ searchResponse = client().prepareSearch().setQuery(QueryBuilders.multiMatchQuery(\"value2\", \"_id^2\", \"field2\").lenient(true)).execute().actionGet();\n+ assertHitCount(searchResponse, 1l);\n+ \n+ searchResponse = client().prepareSearch().setQuery(QueryBuilders.multiMatchQuery(\"123\", \"field1^2\", \"field2\").lenient(true)).execute().actionGet();\n+ assertHitCount(searchResponse, 1l);\n+ \n+ searchResponse = client().prepareSearch().setQuery(QueryBuilders.multiMatchQuery(\"value2\", \"field1\", \"field2\").lenient(true)).execute().actionGet();\n+ assertHitCount(searchResponse, 1l);\n+ \n+ searchResponse = client().prepareSearch().setQuery(QueryBuilders.multiMatchQuery(\"value2\", \"field1\", \"field2\")).execute().actionGet();\n+ assertHitCount(searchResponse, 1l);\n+ }\n+ \n @Test // see https://github.com/elasticsearch/elasticsearch/issues/3177\n public void testIssue3177() {\n run(prepareCreate(\"test\").setSettings(ImmutableSettings.settingsBuilder().put(\"index.number_of_shards\", 1)));", "filename": "src/test/java/org/elasticsearch/search/query/SimpleQueryTests.java", "status": "modified" } ] }
{ "body": "once you set `\"cluster.routing.allocation.awareness.attributes\"` via the API you can't reset it to an empty list which would disable the awareness entirely. This happens since the `Settings.getAsArray` returns the default if the list is null or empty which essentially prevents an update to an empty list.\n", "comments": [], "number": 3931, "title": "Awareness attributes can't be reset once they are set." }
{ "body": "Currently we don't allow resetting the awareness\nattribute via the API since it requires at least one\nnon-empty string to update the setting. This commit\nallows resetting this using an empty string.\n\nCloses #3931\n", "number": 3935, "review_comments": [], "title": "Allow awareness attributes to be reset via the API" }
{ "commits": [ { "message": "Allow awareness attributes to be reset via the API\n\nCurrently we don't allow resetting the awareness\nattribute via the API since it requires at least one\nnon-empty string to update the setting. This commit\nallows resetting this using an empty string.\n\nCloses #3931" } ], "files": [ { "diff": "@@ -26,6 +26,7 @@\n import org.elasticsearch.cluster.routing.RoutingNode;\n import org.elasticsearch.cluster.routing.ShardRouting;\n import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;\n+import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.settings.ImmutableSettings;\n import org.elasticsearch.common.settings.Settings;\n@@ -88,6 +89,9 @@ class ApplySettings implements NodeSettingsService.Listener {\n @Override\n public void onRefreshSettings(Settings settings) {\n String[] awarenessAttributes = settings.getAsArray(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, null);\n+ if (awarenessAttributes == null && \"\".equals(settings.get(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, null))) {\n+ awarenessAttributes = Strings.EMPTY_ARRAY; // the empty string resets this\n+ }\n if (awarenessAttributes != null) {\n logger.info(\"updating [cluster.routing.allocation.awareness.attributes] from [{}] to [{}]\", AwarenessAllocationDecider.this.awarenessAttributes, awarenessAttributes);\n AwarenessAllocationDecider.this.awarenessAttributes = awarenessAttributes;", "filename": "src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java", "status": "modified" }, { "diff": "@@ -179,5 +179,52 @@ public void testAwarenessZonesIncrementalNodes() throws InterruptedException {\n assertThat(counts.get(A_0), equalTo(5));\n assertThat(counts.get(B_0), equalTo(3));\n assertThat(counts.get(B_1), equalTo(2));\n+ \n+ String noZoneNode = cluster().startNode();\n+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(\"4\").execute().actionGet();\n+ assertThat(health.isTimedOut(), equalTo(false));\n+ client().admin().cluster().prepareReroute().get();\n+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(\"4\").setWaitForActiveShards(10).setWaitForRelocatingShards(0).execute().actionGet();\n+\n+ assertThat(health.isTimedOut(), equalTo(false));\n+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();\n+\n+ counts = new ObjectIntOpenHashMap<String>();\n+\n+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {\n+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {\n+ for (ShardRouting shardRouting : indexShardRoutingTable) {\n+ counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);\n+ }\n+ }\n+ }\n+ \n+ assertThat(counts.get(A_0), equalTo(5));\n+ assertThat(counts.get(B_0), equalTo(3));\n+ assertThat(counts.get(B_1), equalTo(2));\n+ assertThat(counts.containsKey(noZoneNode), equalTo(false));\n+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(ImmutableSettings.settingsBuilder().put(\"cluster.routing.allocation.awareness.attributes\", \"\").build()).get();\n+ \n+ \n+ client().admin().cluster().prepareReroute().get();\n+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(\"4\").setWaitForActiveShards(10).setWaitForRelocatingShards(0).execute().actionGet();\n+\n+ assertThat(health.isTimedOut(), equalTo(false));\n+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();\n+\n+ counts = new ObjectIntOpenHashMap<String>();\n+\n+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {\n+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {\n+ for (ShardRouting shardRouting : indexShardRoutingTable) {\n+ counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);\n+ }\n+ }\n+ }\n+ \n+ assertThat(counts.get(A_0), equalTo(3));\n+ assertThat(counts.get(B_0), equalTo(3));\n+ assertThat(counts.get(B_1), equalTo(2));\n+ assertThat(counts.get(noZoneNode), equalTo(2));\n }\n }", "filename": "src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java", "status": "modified" } ] }
{ "body": "If you submit an indexing request and specify a parent but the mapping doesn't define a parent type, we currently silently ignore the parent value: \n\n```\ncurl -XPUT \"http://localhost:9200/test/type/1?parent=1\" -d'\n{\n \"field\": \"value\" \n}'\n```\n\nWe should reject the request.\n\nThe breaking part: Index requests with the parent flag will be rejected if there is no `_parent` metadata field in the mapping.\n", "comments": [], "number": 3848, "title": "Reject indexing requests which specify a parent, if no parent type is defined" }
{ "body": "Prohibit indexing a document with parent for a type that doesn't have a `_parent` field configured.\n\nRelates to #3848\n", "number": 3905, "review_comments": [], "title": "Prohibit indexing a document with parent for a non child type." }
{ "commits": [ { "message": "Prohibit indexing a document with parent for a type that doesn't have a `_parent` field configured.\n\nCloses #3848" }, { "message": "Added check that no rootMappers can be added. This will prohibit adding a _parent field to an existing mapping.\nAlso serialize the hasParentField property." } ], "files": [ { "diff": "@@ -578,6 +578,14 @@ public void process(MetaData metaData, String aliasOrIndex, @Nullable MappingMet\n if (mappingMd.routing().required() && routing == null) {\n throw new RoutingMissingException(index, type, id);\n }\n+\n+ if (parent != null && !mappingMd.hasParentField()) {\n+ throw new ElasticSearchIllegalArgumentException(\"Can't specify parent if no parent field has been configured\");\n+ }\n+ } else {\n+ if (parent != null) {\n+ throw new ElasticSearchIllegalArgumentException(\"Can't specify parent if no parent field has been configured\");\n+ }\n }\n \n // generate id if not already provided and id generation is allowed", "filename": "src/main/java/org/elasticsearch/action/index/IndexRequest.java", "status": "modified" }, { "diff": "@@ -20,6 +20,7 @@\n package org.elasticsearch.cluster.metadata;\n \n import org.elasticsearch.ElasticSearchIllegalStateException;\n+import org.elasticsearch.Version;\n import org.elasticsearch.action.TimestampParsingException;\n import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.Strings;\n@@ -258,13 +259,15 @@ public int hashCode() {\n private Id id;\n private Routing routing;\n private Timestamp timestamp;\n+ private boolean hasParentField;\n \n public MappingMetaData(DocumentMapper docMapper) {\n this.type = docMapper.type();\n this.source = docMapper.mappingSource();\n this.id = new Id(docMapper.idFieldMapper().path());\n this.routing = new Routing(docMapper.routingFieldMapper().required(), docMapper.routingFieldMapper().path());\n this.timestamp = new Timestamp(docMapper.timestampFieldMapper().enabled(), docMapper.timestampFieldMapper().path(), docMapper.timestampFieldMapper().dateTimeFormatter().format());\n+ this.hasParentField = docMapper.parentFieldMapper().active();\n }\n \n public MappingMetaData(CompressedString mapping) throws IOException {\n@@ -344,14 +347,20 @@ private void initMappers(Map<String, Object> withoutType) {\n } else {\n this.timestamp = Timestamp.EMPTY;\n }\n+ if (withoutType.containsKey(\"_parent\")) {\n+ this.hasParentField = true;\n+ } else {\n+ this.hasParentField = false;\n+ }\n }\n \n- public MappingMetaData(String type, CompressedString source, Id id, Routing routing, Timestamp timestamp) {\n+ public MappingMetaData(String type, CompressedString source, Id id, Routing routing, Timestamp timestamp, boolean hasParentField) {\n this.type = type;\n this.source = source;\n this.id = id;\n this.routing = routing;\n this.timestamp = timestamp;\n+ this.hasParentField = hasParentField;\n }\n \n void updateDefaultMapping(MappingMetaData defaultMapping) {\n@@ -374,6 +383,10 @@ public CompressedString source() {\n return this.source;\n }\n \n+ public boolean hasParentField() {\n+ return hasParentField;\n+ }\n+\n /**\n * Converts the serialized compressed form of the mappings into a parsed map.\n */\n@@ -516,6 +529,9 @@ public static void writeTo(MappingMetaData mappingMd, StreamOutput out) throws I\n out.writeBoolean(false);\n }\n out.writeString(mappingMd.timestamp().format());\n+ if (out.getVersion().onOrAfter(Version.V_0_90_6)) {\n+ out.writeBoolean(mappingMd.hasParentField());\n+ }\n }\n \n @Override\n@@ -553,7 +569,13 @@ public static MappingMetaData readFrom(StreamInput in) throws IOException {\n Routing routing = new Routing(in.readBoolean(), in.readBoolean() ? in.readString() : null);\n // timestamp\n Timestamp timestamp = new Timestamp(in.readBoolean(), in.readBoolean() ? in.readString() : null, in.readString());\n- return new MappingMetaData(type, source, id, routing, timestamp);\n+ final boolean hasParentField;\n+ if (in.getVersion().onOrAfter(Version.V_0_90_6)) {\n+ hasParentField = in.readBoolean();\n+ } else {\n+ hasParentField = true; // We assume here that the type has a parent field, which is confirm with the behaviour of <= 0.90.5\n+ }\n+ return new MappingMetaData(type, source, id, routing, timestamp, hasParentField);\n }\n \n public static class ParseContext {", "filename": "src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java", "status": "modified" }, { "diff": "@@ -123,7 +123,7 @@ public void refresh(List<AtomicReaderContext> atomicReaderContexts) throws IOExc\n BytesRef spare = new BytesRef();\n for (String type : indexService.mapperService().types()) {\n ParentFieldMapper parentFieldMapper = indexService.mapperService().documentMapper(type).parentFieldMapper();\n- if (parentFieldMapper != null) {\n+ if (parentFieldMapper.active()) {\n parentTypes.add(new HashedBytesArray(Strings.toUTF8Bytes(parentFieldMapper.type(), spare)));\n }\n }", "filename": "src/main/java/org/elasticsearch/index/cache/id/simple/SimpleIdCache.java", "status": "modified" }, { "diff": "@@ -226,7 +226,7 @@ public GetResult innerGet(String type, String id, String[] gFields, boolean real\n Object value = null;\n if (field.equals(RoutingFieldMapper.NAME) && docMapper.routingFieldMapper().fieldType().stored()) {\n value = source.routing;\n- } else if (field.equals(ParentFieldMapper.NAME) && docMapper.parentFieldMapper() != null && docMapper.parentFieldMapper().fieldType().stored()) {\n+ } else if (field.equals(ParentFieldMapper.NAME) && docMapper.parentFieldMapper().active() && docMapper.parentFieldMapper().fieldType().stored()) {\n value = source.parent;\n } else if (field.equals(TimestampFieldMapper.NAME) && docMapper.timestampFieldMapper().fieldType().stored()) {\n value = source.timestamp;", "filename": "src/main/java/org/elasticsearch/index/get/ShardGetService.java", "status": "modified" }, { "diff": "@@ -177,7 +177,7 @@ public Builder(String index, @Nullable Settings indexSettings, RootObjectMapper.\n this.rootMappers.put(TimestampFieldMapper.class, new TimestampFieldMapper());\n this.rootMappers.put(TTLFieldMapper.class, new TTLFieldMapper());\n this.rootMappers.put(VersionFieldMapper.class, new VersionFieldMapper());\n- // don't add parent field, by default its \"null\"\n+ this.rootMappers.put(ParentFieldMapper.class, new ParentFieldMapper());\n }\n \n public Builder meta(ImmutableMap<String, Object> meta) {\n@@ -306,7 +306,7 @@ public DocumentMapper(String index, @Nullable Settings indexSettings, DocumentMa\n \n this.typeFilter = typeMapper().termFilter(type, null);\n \n- if (rootMapper(ParentFieldMapper.class) != null) {\n+ if (rootMapper(ParentFieldMapper.class).active()) {\n // mark the routing field mapper as required\n rootMapper(RoutingFieldMapper.class).markAsRequired();\n }\n@@ -631,8 +631,12 @@ public void traverse(ObjectMapperListener listener) {\n \n public synchronized MergeResult merge(DocumentMapper mergeWith, MergeFlags mergeFlags) {\n MergeContext mergeContext = new MergeContext(this, mergeFlags);\n- rootObjectMapper.merge(mergeWith.rootObjectMapper, mergeContext);\n+ if (rootMappers.size() != mergeWith.rootMappers.size()) {\n+ mergeContext.addConflict(\"Adding root mapper for type isn't allowed\");\n+ return new MergeResult(mergeContext.buildConflicts());\n+ }\n \n+ rootObjectMapper.merge(mergeWith.rootObjectMapper, mergeContext);\n for (Map.Entry<Class<? extends RootMapper>, RootMapper> entry : rootMappers.entrySet()) {\n // root mappers included in root object will get merge in the rootObjectMapper\n if (entry.getValue().includeInObject()) {", "filename": "src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java", "status": "modified" }, { "diff": "@@ -130,6 +130,13 @@ protected ParentFieldMapper(String name, String indexName, String type, Postings\n this.typeAsBytes = new BytesRef(type);\n }\n \n+ public ParentFieldMapper() {\n+ super(new Names(Defaults.NAME, Defaults.NAME, Defaults.NAME, Defaults.NAME), Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE),\n+ Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER, null, null, null, null, null);\n+ type = null;\n+ typeAsBytes = null;\n+ }\n+\n public String type() {\n return type;\n }\n@@ -169,6 +176,10 @@ public boolean includeInObject() {\n \n @Override\n protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {\n+ if (!active()) {\n+ return;\n+ }\n+\n if (context.parser().currentName() != null && context.parser().currentName().equals(Defaults.NAME)) {\n // we are in the parsing of _parent phase\n String parentId = context.parser().text();\n@@ -253,7 +264,7 @@ public Filter termFilter(Object value, @Nullable QueryParseContext context) {\n \n List<String> types = new ArrayList<String>(context.mapperService().types().size());\n for (DocumentMapper documentMapper : context.mapperService()) {\n- if (documentMapper.parentFieldMapper() == null) {\n+ if (!documentMapper.parentFieldMapper().active()) {\n types.add(documentMapper.type());\n }\n }\n@@ -284,7 +295,7 @@ public Filter termsFilter(List values, @Nullable QueryParseContext context) {\n \n List<String> types = new ArrayList<String>(context.mapperService().types().size());\n for (DocumentMapper documentMapper : context.mapperService()) {\n- if (documentMapper.parentFieldMapper() == null) {\n+ if (!documentMapper.parentFieldMapper().active()) {\n types.add(documentMapper.type());\n }\n }\n@@ -319,6 +330,10 @@ protected String contentType() {\n \n @Override\n public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {\n+ if (!active()) {\n+ return builder;\n+ }\n+\n builder.startObject(CONTENT_TYPE);\n builder.field(\"type\", type);\n builder.endObject();\n@@ -327,6 +342,21 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n \n @Override\n public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException {\n- // do nothing here, no merging, but also no exception\n+ ParentFieldMapper other = (ParentFieldMapper) mergeWith;\n+ if (active() == other.active()) {\n+ return;\n+ }\n+\n+ if (active() != other.active() || !type.equals(other.type)) {\n+ mergeContext.addConflict(\"The _parent field can't be added or updated\");\n+ }\n }\n+\n+ /**\n+ * @return Whether the _parent field is actually used.\n+ */\n+ public boolean active() {\n+ return type != null;\n+ }\n+\n }", "filename": "src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java", "status": "modified" }, { "diff": "@@ -121,7 +121,7 @@ public Filter parse(QueryParseContext parseContext) throws IOException, QueryPar\n if (childDocMapper == null) {\n throw new QueryParsingException(parseContext.index(), \"No mapping for for type [\" + childType + \"]\");\n }\n- if (childDocMapper.parentFieldMapper() == null) {\n+ if (!childDocMapper.parentFieldMapper().active()) {\n throw new QueryParsingException(parseContext.index(), \"Type [\" + childType + \"] does not have parent mapping\");\n }\n String parentType = childDocMapper.parentFieldMapper().type();", "filename": "src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java", "status": "modified" }, { "diff": "@@ -123,7 +123,7 @@ public Query parse(QueryParseContext parseContext) throws IOException, QueryPars\n if (childDocMapper == null) {\n throw new QueryParsingException(parseContext.index(), \"[has_child] No mapping for for type [\" + childType + \"]\");\n }\n- if (childDocMapper.parentFieldMapper() == null) {\n+ if (!childDocMapper.parentFieldMapper().active()) {\n throw new QueryParsingException(parseContext.index(), \"[has_child] Type [\" + childType + \"] does not have parent mapping\");\n }\n String parentType = childDocMapper.parentFieldMapper().type();", "filename": "src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java", "status": "modified" }, { "diff": "@@ -135,7 +135,7 @@ public Filter parse(QueryParseContext parseContext) throws IOException, QueryPar\n parentTypes.add(parentType);\n for (DocumentMapper documentMapper : parseContext.mapperService()) {\n ParentFieldMapper parentFieldMapper = documentMapper.parentFieldMapper();\n- if (parentFieldMapper != null) {\n+ if (parentFieldMapper.active()) {\n DocumentMapper parentTypeDocumentMapper = parseContext.mapperService().documentMapper(parentFieldMapper.type());\n if (parentTypeDocumentMapper == null) {\n // Only add this, if this parentFieldMapper (also a parent) isn't a child of another parent.", "filename": "src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java", "status": "modified" }, { "diff": "@@ -134,7 +134,7 @@ public Query parse(QueryParseContext parseContext) throws IOException, QueryPars\n parentTypes.add(parentType);\n for (DocumentMapper documentMapper : parseContext.mapperService()) {\n ParentFieldMapper parentFieldMapper = documentMapper.parentFieldMapper();\n- if (parentFieldMapper != null) {\n+ if (parentFieldMapper.active()) {\n DocumentMapper parentTypeDocumentMapper = parseContext.mapperService().documentMapper(parentFieldMapper.type());\n if (parentTypeDocumentMapper == null) {\n // Only add this, if this parentFieldMapper (also a parent) isn't a child of another parent.", "filename": "src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java", "status": "modified" }, { "diff": "@@ -120,7 +120,7 @@ public Query parse(QueryParseContext parseContext) throws IOException, QueryPars\n if (childDocMapper == null) {\n throw new QueryParsingException(parseContext.index(), \"No mapping for for type [\" + childType + \"]\");\n }\n- if (childDocMapper.parentFieldMapper() == null) {\n+ if (!childDocMapper.parentFieldMapper().active()) {\n throw new QueryParsingException(parseContext.index(), \"Type [\" + childType + \"] does not have parent mapping\");\n }\n String parentType = childDocMapper.parentFieldMapper().type();", "filename": "src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java", "status": "modified" }, { "diff": "@@ -26,7 +26,6 @@\n import org.junit.Test;\n \n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n-import static org.hamcrest.MatcherAssert.assertThat;\n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.nullValue;\n \n@@ -37,7 +36,7 @@ public void testParseIdAlone() throws Exception {\n MappingMetaData md = new MappingMetaData(\"type1\", new CompressedString(\"\"),\n new MappingMetaData.Id(\"id\"),\n new MappingMetaData.Routing(true, \"routing\"),\n- new MappingMetaData.Timestamp(true, \"timestamp\", \"dateOptionalTime\"));\n+ new MappingMetaData.Timestamp(true, \"timestamp\", \"dateOptionalTime\"), false);\n byte[] bytes = jsonBuilder().startObject().field(\"field1\", \"value1\").field(\"field2\", \"value2\")\n .field(\"id\", \"id\").field(\"routing\", \"routing_value\").field(\"timestamp\", \"1\").endObject().bytes().toBytes();\n MappingMetaData.ParseContext parseContext = md.createParseContext(null, \"routing_value\", \"1\");\n@@ -55,7 +54,7 @@ public void testFailIfIdIsNoValue() throws Exception {\n MappingMetaData md = new MappingMetaData(\"type1\", new CompressedString(\"\"),\n new MappingMetaData.Id(\"id\"),\n new MappingMetaData.Routing(true, \"routing\"),\n- new MappingMetaData.Timestamp(true, \"timestamp\", \"dateOptionalTime\"));\n+ new MappingMetaData.Timestamp(true, \"timestamp\", \"dateOptionalTime\"), false);\n byte[] bytes = jsonBuilder().startObject().field(\"field1\", \"value1\").field(\"field2\", \"value2\")\n .startArray(\"id\").value(\"id\").endArray().field(\"routing\", \"routing_value\").field(\"timestamp\", \"1\").endObject().bytes().toBytes();\n MappingMetaData.ParseContext parseContext = md.createParseContext(null, \"routing_value\", \"1\");\n@@ -82,7 +81,7 @@ public void testParseRoutingAlone() throws Exception {\n MappingMetaData md = new MappingMetaData(\"type1\", new CompressedString(\"\"),\n new MappingMetaData.Id(\"id\"),\n new MappingMetaData.Routing(true, \"routing\"),\n- new MappingMetaData.Timestamp(true, \"timestamp\", \"dateOptionalTime\"));\n+ new MappingMetaData.Timestamp(true, \"timestamp\", \"dateOptionalTime\"), false);\n byte[] bytes = jsonBuilder().startObject().field(\"field1\", \"value1\").field(\"field2\", \"value2\")\n .field(\"id\", \"id\").field(\"routing\", \"routing_value\").field(\"timestamp\", \"1\").endObject().bytes().toBytes();\n MappingMetaData.ParseContext parseContext = md.createParseContext(\"id\", null, \"1\");\n@@ -100,7 +99,7 @@ public void testParseTimestampAlone() throws Exception {\n MappingMetaData md = new MappingMetaData(\"type1\", new CompressedString(\"\"),\n new MappingMetaData.Id(\"id\"),\n new MappingMetaData.Routing(true, \"routing\"),\n- new MappingMetaData.Timestamp(true, \"timestamp\", \"dateOptionalTime\"));\n+ new MappingMetaData.Timestamp(true, \"timestamp\", \"dateOptionalTime\"), false);\n byte[] bytes = jsonBuilder().startObject().field(\"field1\", \"value1\").field(\"field2\", \"value2\")\n .field(\"id\", \"id\").field(\"routing\", \"routing_value\").field(\"timestamp\", \"1\").endObject().bytes().toBytes();\n MappingMetaData.ParseContext parseContext = md.createParseContext(\"id\", \"routing_value1\", null);\n@@ -118,7 +117,7 @@ public void testParseIdAndRoutingAndTimestamp() throws Exception {\n MappingMetaData md = new MappingMetaData(\"type1\", new CompressedString(\"\"),\n new MappingMetaData.Id(\"id\"),\n new MappingMetaData.Routing(true, \"routing\"),\n- new MappingMetaData.Timestamp(true, \"timestamp\", \"dateOptionalTime\"));\n+ new MappingMetaData.Timestamp(true, \"timestamp\", \"dateOptionalTime\"), false);\n byte[] bytes = jsonBuilder().startObject().field(\"field1\", \"value1\").field(\"field2\", \"value2\")\n .field(\"id\", \"id\").field(\"routing\", \"routing_value\").field(\"timestamp\", \"1\").endObject().bytes().toBytes();\n MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);\n@@ -133,7 +132,7 @@ public void testParseIdAndRoutingAndTimestampWithPath() throws Exception {\n MappingMetaData md = new MappingMetaData(\"type1\", new CompressedString(\"\"),\n new MappingMetaData.Id(\"obj1.id\"),\n new MappingMetaData.Routing(true, \"obj1.routing\"),\n- new MappingMetaData.Timestamp(true, \"obj2.timestamp\", \"dateOptionalTime\"));\n+ new MappingMetaData.Timestamp(true, \"obj2.timestamp\", \"dateOptionalTime\"), false);\n byte[] bytes = jsonBuilder().startObject().field(\"field1\", \"value1\").field(\"field2\", \"value2\")\n .startObject(\"obj0\").field(\"field1\", \"value1\").field(\"field2\", \"value2\").endObject()\n .startObject(\"obj1\").field(\"id\", \"id\").field(\"routing\", \"routing_value\").endObject()\n@@ -151,7 +150,7 @@ public void testParseIdWithPath() throws Exception {\n MappingMetaData md = new MappingMetaData(\"type1\", new CompressedString(\"\"),\n new MappingMetaData.Id(\"obj1.id\"),\n new MappingMetaData.Routing(true, \"obj1.routing\"),\n- new MappingMetaData.Timestamp(true, \"obj2.timestamp\", \"dateOptionalTime\"));\n+ new MappingMetaData.Timestamp(true, \"obj2.timestamp\", \"dateOptionalTime\"), false);\n byte[] bytes = jsonBuilder().startObject().field(\"field1\", \"value1\").field(\"field2\", \"value2\")\n .startObject(\"obj0\").field(\"field1\", \"value1\").field(\"field2\", \"value2\").endObject()\n .startObject(\"obj1\").field(\"id\", \"id\").field(\"routing\", \"routing_value\").endObject()\n@@ -172,7 +171,7 @@ public void testParseRoutingWithPath() throws Exception {\n MappingMetaData md = new MappingMetaData(\"type1\", new CompressedString(\"\"),\n new MappingMetaData.Id(\"obj1.id\"),\n new MappingMetaData.Routing(true, \"obj1.routing\"),\n- new MappingMetaData.Timestamp(true, \"obj2.timestamp\", \"dateOptionalTime\"));\n+ new MappingMetaData.Timestamp(true, \"obj2.timestamp\", \"dateOptionalTime\"), false);\n byte[] bytes = jsonBuilder().startObject().field(\"field1\", \"value1\").field(\"field2\", \"value2\")\n .startObject(\"obj0\").field(\"field1\", \"value1\").field(\"field2\", \"value2\").endObject()\n .startObject(\"obj1\").field(\"id\", \"id\").field(\"routing\", \"routing_value\").endObject()\n@@ -193,7 +192,7 @@ public void testParseTimestampWithPath() throws Exception {\n MappingMetaData md = new MappingMetaData(\"type1\", new CompressedString(\"\"),\n new MappingMetaData.Id(\"obj1.id\"),\n new MappingMetaData.Routing(true, \"obj1.routing\"),\n- new MappingMetaData.Timestamp(true, \"obj2.timestamp\", \"dateOptionalTime\"));\n+ new MappingMetaData.Timestamp(true, \"obj2.timestamp\", \"dateOptionalTime\"), false);\n byte[] bytes = jsonBuilder().startObject().field(\"field1\", \"value1\").field(\"field2\", \"value2\")\n .startObject(\"obj0\").field(\"field1\", \"value1\").field(\"field2\", \"value2\").endObject()\n .startObject(\"obj1\").field(\"routing\", \"routing_value\").endObject()\n@@ -214,7 +213,7 @@ public void testParseIdAndRoutingAndTimestampWithinSamePath() throws Exception {\n MappingMetaData md = new MappingMetaData(\"type1\", new CompressedString(\"\"),\n new MappingMetaData.Id(\"obj1.id\"),\n new MappingMetaData.Routing(true, \"obj1.routing\"),\n- new MappingMetaData.Timestamp(true, \"obj1.timestamp\", \"dateOptionalTime\"));\n+ new MappingMetaData.Timestamp(true, \"obj1.timestamp\", \"dateOptionalTime\"), false);\n byte[] bytes = jsonBuilder().startObject().field(\"field1\", \"value1\").field(\"field2\", \"value2\")\n .startObject(\"obj0\").field(\"field1\", \"value1\").field(\"field2\", \"value2\").endObject()\n .startObject(\"obj1\").field(\"id\", \"id\").field(\"routing\", \"routing_value\").field(\"timestamp\", \"1\").endObject()\n@@ -232,7 +231,7 @@ public void testParseIdAndRoutingAndTimestampWithinSamePathAndMoreLevels() throw\n MappingMetaData md = new MappingMetaData(\"type1\", new CompressedString(\"\"),\n new MappingMetaData.Id(\"obj1.obj0.id\"),\n new MappingMetaData.Routing(true, \"obj1.obj2.routing\"),\n- new MappingMetaData.Timestamp(true, \"obj1.obj3.timestamp\", \"dateOptionalTime\"));\n+ new MappingMetaData.Timestamp(true, \"obj1.obj3.timestamp\", \"dateOptionalTime\"), false);\n byte[] bytes = jsonBuilder().startObject().field(\"field1\", \"value1\").field(\"field2\", \"value2\")\n .startObject(\"obj0\").field(\"field1\", \"value1\").field(\"field2\", \"value2\").endObject()\n .startObject(\"obj1\")\n@@ -261,7 +260,7 @@ public void testParseIdAndRoutingAndTimestampWithSameRepeatedObject() throws Exc\n MappingMetaData md = new MappingMetaData(\"type1\", new CompressedString(\"\"),\n new MappingMetaData.Id(\"obj1.id\"),\n new MappingMetaData.Routing(true, \"obj1.routing\"),\n- new MappingMetaData.Timestamp(true, \"obj1.timestamp\", \"dateOptionalTime\"));\n+ new MappingMetaData.Timestamp(true, \"obj1.timestamp\", \"dateOptionalTime\"), false);\n byte[] bytes = jsonBuilder().startObject().field(\"field1\", \"value1\").field(\"field2\", \"value2\")\n .startObject(\"obj0\").field(\"field1\", \"value1\").field(\"field2\", \"value2\").endObject()\n .startObject(\"obj1\").field(\"id\", \"id\").endObject()\n@@ -281,7 +280,7 @@ public void testParseIdRoutingTimestampWithRepeatedField() throws Exception {\n MappingMetaData md = new MappingMetaData(\"type1\", new CompressedString(\"\"),\n new MappingMetaData.Id(\"field1\"),\n new MappingMetaData.Routing(true, \"field1.field1\"),\n- new MappingMetaData.Timestamp(true, \"field1\", \"dateOptionalTime\"));\n+ new MappingMetaData.Timestamp(true, \"field1\", \"dateOptionalTime\"), false);\n \n byte[] bytes = jsonBuilder().startObject()\n .field(\"aaa\", \"wr\")\n@@ -304,7 +303,7 @@ public void testParseNoIdRoutingWithRepeatedFieldAndObject() throws Exception {\n MappingMetaData md = new MappingMetaData(\"type1\", new CompressedString(\"\"),\n new MappingMetaData.Id(\"id\"),\n new MappingMetaData.Routing(true, \"field1.field1.field2\"),\n- new MappingMetaData.Timestamp(true, \"field1\", \"dateOptionalTime\"));\n+ new MappingMetaData.Timestamp(true, \"field1\", \"dateOptionalTime\"), false);\n \n byte[] bytes = jsonBuilder().startObject()\n .field(\"aaa\", \"wr\")\n@@ -327,7 +326,7 @@ public void testParseRoutingWithRepeatedFieldAndValidRouting() throws Exception\n MappingMetaData md = new MappingMetaData(\"type1\", new CompressedString(\"\"),\n new MappingMetaData.Id(null),\n new MappingMetaData.Routing(true, \"field1.field2\"),\n- new MappingMetaData.Timestamp(true, \"field1\", \"dateOptionalTime\"));\n+ new MappingMetaData.Timestamp(true, \"field1\", \"dateOptionalTime\"), false);\n \n byte[] bytes = jsonBuilder().startObject()\n .field(\"aaa\", \"wr\")", "filename": "src/test/java/org/elasticsearch/cluster/metadata/MappingMetaDataParserTests.java", "status": "modified" }, { "diff": "@@ -24,8 +24,8 @@\n import org.elasticsearch.test.ElasticsearchTestCase;\n import org.junit.Test;\n \n-import static org.hamcrest.MatcherAssert.assertThat;\n import static org.hamcrest.Matchers.equalTo;\n+import static org.hamcrest.Matchers.nullValue;\n \n /**\n *\n@@ -46,8 +46,8 @@ public void parentNotMapped() throws Exception {\n .bytes()).type(\"type\").id(\"1\"));\n \n // no _parent mapping, used as a simple field\n- assertThat(doc.parent(), equalTo(null));\n- assertThat(doc.rootDoc().get(\"_parent\"), equalTo(\"1122\"));\n+ assertThat(doc.parent(), nullValue());\n+ assertThat(doc.rootDoc().get(\"_parent\"), nullValue());\n }\n \n @Test", "filename": "src/test/java/org/elasticsearch/index/mapper/parent/ParentMappingTests.java", "status": "modified" }, { "diff": "@@ -20,6 +20,9 @@\n package org.elasticsearch.search.child;\n \n import org.elasticsearch.ElasticSearchException;\n+import org.elasticsearch.ElasticSearchIllegalArgumentException;\n+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;\n+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;\n import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;\n import org.elasticsearch.action.count.CountResponse;\n import org.elasticsearch.action.explain.ExplainResponse;\n@@ -30,6 +33,7 @@\n import org.elasticsearch.common.Priority;\n import org.elasticsearch.common.lucene.search.function.CombineFunction;\n import org.elasticsearch.common.settings.ImmutableSettings;\n+import org.elasticsearch.index.mapper.MergeMappingException;\n import org.elasticsearch.index.query.QueryBuilder;\n import org.elasticsearch.index.query.QueryBuilders;\n import org.elasticsearch.search.facet.terms.TermsFacet;\n@@ -1832,4 +1836,64 @@ public void testHasChildQueryOnlyReturnsSingleChildType() {\n assertHitCount(searchResponse, 0l);\n }\n \n+ @Test\n+ public void indexChildDocWithNoParentMapping() throws ElasticSearchException, IOException {\n+ client().admin().indices().prepareCreate(\"test\")\n+ .setSettings(\n+ ImmutableSettings.settingsBuilder()\n+ .put(\"index.number_of_shards\", 1)\n+ .put(\"index.number_of_replicas\", 0)\n+ ).execute().actionGet();\n+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();\n+ client().admin().indices().preparePutMapping(\"test\").setType(\"child1\").setSource(jsonBuilder().startObject().startObject(\"type\")\n+ .endObject().endObject()).execute().actionGet();\n+\n+ client().prepareIndex(\"test\", \"parent\", \"p1\").setSource(\"p_field\", \"p_value1\", \"_parent\", \"bla\").execute().actionGet();\n+ try {\n+ client().prepareIndex(\"test\", \"child1\", \"c1\").setParent(\"p1\").setSource(\"c_field\", \"blue\").execute().actionGet();\n+ fail();\n+ } catch (ElasticSearchIllegalArgumentException e) {\n+ assertThat(e.getMessage(), equalTo(\"Can't specify parent if no parent field has been configured\"));\n+ }\n+ try {\n+ client().prepareIndex(\"test\", \"child2\", \"c2\").setParent(\"p1\").setSource(\"c_field\", \"blue\").execute().actionGet();\n+ fail();\n+ } catch (ElasticSearchIllegalArgumentException e) {\n+ assertThat(e.getMessage(), equalTo(\"Can't specify parent if no parent field has been configured\"));\n+ }\n+\n+ refresh();\n+ }\n+\n+ @Test\n+ public void testAddingParentToExistingMapping() throws ElasticSearchException, IOException {\n+ client().admin().indices().prepareCreate(\"test\")\n+ .setSettings(\n+ ImmutableSettings.settingsBuilder()\n+ .put(\"index.number_of_shards\", 1)\n+ .put(\"index.number_of_replicas\", 0)\n+ ).execute().actionGet();\n+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();\n+\n+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(\"test\").setType(\"child\").setSource(\"number\", \"type=integer\")\n+ .execute().actionGet();\n+ assertThat(putMappingResponse.isAcknowledged(), equalTo(true));\n+\n+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings(\"test\").execute().actionGet();\n+ Map<String, Object> mapping = getMappingsResponse.getMappings().get(\"test\").get(\"child\").getSourceAsMap();\n+ assertThat(mapping.size(), equalTo(1));\n+ assertThat(mapping.get(\"properties\"), notNullValue());\n+\n+ try {\n+ // Adding _parent metadata field to existing mapping is prohibited:\n+ client().admin().indices().preparePutMapping(\"test\").setType(\"child\").setSource(jsonBuilder().startObject().startObject(\"type\")\n+ .startObject(\"_parent\").field(\"type\", \"parent\").endObject()\n+ .endObject().endObject()).execute().actionGet();\n+ fail();\n+ } catch (MergeMappingException e) {\n+ assertThat(e.getMessage(), equalTo(\"Merge failed with failures {[The _parent field can't be added or updated]}\"));\n+ }\n+\n+ }\n+\n }", "filename": "src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java", "status": "modified" } ] }
{ "body": "First of all, thanks for building such an amazing service. I am loving my experience with ElasticSearch so far.\n\nWhat I've run into is that trying to use the BooleanQueryBuilder in conjunction with the GeoShapeQueryBuilder is resulting in the following exception.\n\n```\norg.elasticsearch.search.builder.SearchSourceBuilderException: Failed to build search source\n at org.elasticsearch.search.builder.SearchSourceBuilder.buildAsBytes(SearchSourceBuilder.java:579)\n at org.elasticsearch.action.search.SearchRequest.source(SearchRequest.java:258)\n at org.elasticsearch.action.search.SearchRequestBuilder.doExecute(SearchRequestBuilder.java:839)\n at org.elasticsearch.action.ActionRequestBuilder.execute(ActionRequestBuilder.java:85)\n at org.elasticsearch.action.ActionRequestBuilder.execute(ActionRequestBuilder.java:59)\n at com.tomtom.lbs.vectordb.data.services.ElasticSearchService.search(ElasticSearchService.java:119)\n ... 38 more\nCaused by: org.elasticsearch.common.jackson.core.JsonGenerationException: Current context not an ARRAY but OBJECT\n at org.elasticsearch.common.jackson.core.base.GeneratorBase._reportError(GeneratorBase.java:444)\n at org.elasticsearch.common.jackson.dataformat.smile.SmileGenerator.writeEndArray(SmileGenerator.java:553)\n at org.elasticsearch.common.xcontent.json.JsonXContentGenerator.writeEndArray(JsonXContentGenerator.java:59)\n at org.elasticsearch.common.xcontent.XContentBuilder.endArray(XContentBuilder.java:227)\n at org.elasticsearch.index.query.BoolQueryBuilder.doXArrayContent(BoolQueryBuilder.java:182)\n at org.elasticsearch.index.query.BoolQueryBuilder.doXContent(BoolQueryBuilder.java:149)\n at org.elasticsearch.index.query.BaseQueryBuilder.toXContent(BaseQueryBuilder.java:65)\n at org.elasticsearch.search.builder.SearchSourceBuilder.toXContent(SearchSourceBuilder.java:601)\n at org.elasticsearch.search.builder.SearchSourceBuilder.buildAsBytes(SearchSourceBuilder.java:576)\n ... 43 more\n```\n\nHere is my code to build and execute the query:\n\n``` java\nGeoShapeQueryBuilder geoQuery = QueryBuilders.geoShapeQuery(\"searchGeometry\", new RectangleImpl(y1, y2, x1, x2, SpatialContext.GEO));\nBoolQueryBuilder query = QueryBuilders.boolQuery();\n\nif (featureIds.length > 0)\n query.must(QueryBuilders.termQuery(\"featureType\", featureIds[0]));\n\nquery.must(geoQuery);\n\nSearchRequestBuilder request = esClient.prepareSearch(\"index_name\")\n .setSearchType(SearchType.QUERY_THEN_FETCH)\n .setQuery(query)\n .setFrom(0)\n .setSize(maxResults)\n .setExplain(false);\n\nSearchResponse response = request.execute().actionGet();\n```\n\nAm I doing something wrong? I have a feeling I am but it looks right to me.\n\nThis isn't a big deal for me, because I can just build a JSON query manually and that works perfectly fine. However it'd be nice to be able to use the QueryBuilders as make the code much easier to read.\n\nThanks!\n", "comments": [ { "body": "Hey,\n\nI can reproduce this and will take a further look at it... the combination of geoquery and term query leads to that (you get sort of a different exception when you switch term and geoquery in the must query).\n\nThanks for notifying!\n", "created_at": "2013-10-11T08:41:56Z" }, { "body": "Thanks!\n", "created_at": "2013-10-11T14:03:03Z" } ], "number": 3878, "title": "Search using BooleanQueryBuilder and GeoShapeQueryBuilder results in \"Current context not an ARRAY but OBJECT\"" }
{ "body": "After closing #3878 it came to me, that we might have other code snippets of missing or too much `builder.endObject()` / `builder.startObject()` methods (same for arrays), so I wrote this kind of hacky shell script\n\n``` sh\n#/bin/bash\nfor x in Object Array ; do\n\n start=\"\\.start$x\"\n end=\"\\.end$x\"\n\n for i in $(ag -l \"$start\") ; do\n startingCount=$(grep -c \"$start\" $i)\n endingCount=$(grep -c \"$end\" $i)\n\n if [ $startingCount -ne $endingCount ] ; then\n echo \"$i $x: start $startingCount end $endingCount\"\n fi\n done\n\ndone\n```\n\nonly found two other pieces of code and a couple of false positives.\n\nI am thinking if we could automate this by doing something similar as the `forbidden-api` maven module does and parse the source itself (to prevent false positives).\n\nAnyone having experience with that?\n", "number": 3902, "review_comments": [], "title": "Added missing builder.endObject calls" }
{ "commits": [ { "message": "Added missing builder.endObject calls" } ], "files": [ { "diff": "@@ -52,6 +52,7 @@ public void onResponse(PendingClusterTasksResponse response) {\n XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);\n builder.startObject();\n response.toXContent(builder, request);\n+ builder.endObject();\n channel.sendResponse(new XContentRestResponse(request, RestStatus.OK, builder));\n } catch (Throwable e) {\n onFailure(e);", "filename": "src/main/java/org/elasticsearch/rest/action/admin/cluster/tasks/RestPendingClusterTasksAction.java", "status": "modified" }, { "diff": "@@ -172,6 +172,7 @@ public synchronized void createRiver(RiverName riverName, Map<String, Object> se\n builder.field(\"name\", clusterService.localNode().name());\n builder.field(\"transport_address\", clusterService.localNode().address().toString());\n builder.endObject();\n+ builder.endObject();\n \n client.prepareIndex(riverIndexName, riverName.name(), \"_status\")\n .setConsistencyLevel(WriteConsistencyLevel.ONE)", "filename": "src/main/java/org/elasticsearch/river/RiversService.java", "status": "modified" } ] }
{ "body": "The problem only manifests when all of the following conditions are met:\n- a custom word delimiter is used when analyzing a string field\n- the query string query is used\n- use_dis_max parameter is set to false\n- more than one field is used for the query\n- query string contains a special character (like \".\" or \"/\")\n\nThis is how to reproduce the issue:\n\n``` sh\n# create the index\ncurl -XPOST http://localhost:9200/test -d '{\n \"settings\": {\n \"index\": {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 0\n },\n \"analysis\": {\n \"analyzer\": {\n \"text_ascii\": {\n \"type\": \"custom\",\n \"tokenizer\": \"whitespace\",\n \"filter\": [\"asciifolding\",\n \"lowercase\",\n \"custom_word_delimiter\"]\n }\n },\n \"filter\": {\n \"custom_word_delimiter\": {\n \"type\": \"word_delimiter\",\n \"generate_word_parts\": true,\n \"generate_number_parts\": false,\n \"catenate_numbers\": true,\n \"catenate_words\": false,\n \"split_on_case_change\": false,\n \"split_on_numerics\": false,\n \"stem_english_possessive\": false\n }\n }\n }\n },\n \"mappings\": {\n \"person\": {\n \"properties\": {\n \"name\": {\n \"type\": \"string\",\n \"analyzer\": \"text_ascii\"\n },\n \"address\": {\n \"type\": \"string\",\n \"analyzer\": \"text_ascii\"\n }\n }\n }\n }\n}\n'\n\n# index the document\ncurl -XPUT 'http://localhost:9200/test/person/1' -d '{\n \"name\" : \"bogdan mihai dumitrescu\",\n \"address\" : \"amsterdam\"\n}'\n\n# search for the document with a dot in the query \n# expect one result to be returned, but this returns 0 hits\ncurl -XPOST 'http://localhost:9200/test/_search' -d '{\n \"query\": {\n \"query_string\": {\n \"query\": \"bogdan.dumitrescu\",\n \"fields\": [\"name\", \"address\"],\n \"use_dis_max\": false,\n \"default_operator\": \"and\",\n \"analyzer\": \"text_ascii\"\n }\n }\n}'\n```\n", "comments": [], "number": 3898, "title": "No results are found with specific use case when using a custom word_delimiter filter" }
{ "body": "Closes #3898\n\nI've tracked the problem down to the MapperQueryParser class. As you can see from the commit the \"true\" value was hard coded for one of the code branches instead of using the 'quoted' parameter sent to the method.\n", "number": 3899, "review_comments": [], "title": "Replaced hardcoded boolean value with correct parameter." }
{ "commits": [ { "message": "Replaced hardcoded boolean value with correct parameter\n\nImplemented changes suggested by review.\n\nCloses #3898" } ], "files": [ { "diff": "@@ -181,7 +181,7 @@ public Query getFieldQuery(String field, String queryText, boolean quoted) throw\n } else {\n List<BooleanClause> clauses = new ArrayList<BooleanClause>();\n for (String mField : fields) {\n- Query q = getFieldQuerySingle(mField, queryText, true);\n+ Query q = getFieldQuerySingle(mField, queryText, quoted);\n if (q != null) {\n applyBoost(mField, q);\n clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));", "filename": "src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java", "status": "modified" }, { "diff": "@@ -1598,4 +1598,35 @@ public void testSimpleDFSQuery() throws ElasticSearchException, IOException {\n assertNoFailures(response);\n }\n \n+ @Test // see https://github.com/elasticsearch/elasticsearch/issues/3898\n+ public void testCustomWordDelimiterQueryString() {\n+ client().admin().indices().prepareCreate(\"test\")\n+ .setSettings(\"analysis.analyzer.my_analyzer.type\", \"custom\",\n+ \"analysis.analyzer.my_analyzer.tokenizer\", \"whitespace\",\n+ \"analysis.analyzer.my_analyzer.filter\", \"custom_word_delimiter\",\n+ \"analysis.filter.custom_word_delimiter.type\", \"word_delimiter\",\n+ \"analysis.filter.custom_word_delimiter.generate_word_parts\", \"true\",\n+ \"analysis.filter.custom_word_delimiter.generate_number_parts\", \"false\",\n+ \"analysis.filter.custom_word_delimiter.catenate_numbers\", \"true\",\n+ \"analysis.filter.custom_word_delimiter.catenate_words\", \"false\",\n+ \"analysis.filter.custom_word_delimiter.split_on_case_change\", \"false\",\n+ \"analysis.filter.custom_word_delimiter.split_on_numerics\", \"false\",\n+ \"analysis.filter.custom_word_delimiter.stem_english_possessive\", \"false\")\n+ .addMapping(\"type1\", \"field1\", \"type=string,analyzer=my_analyzer\", \"field2\", \"type=string,analyzer=my_analyzer\")\n+ .get();\n+\n+ ensureGreen();\n+\n+ client().prepareIndex(\"test\", \"type1\", \"1\").setSource(\"field1\", \"foo bar baz\", \"field2\", \"not needed\").get();\n+ refresh();\n+\n+ SearchResponse response = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ QueryBuilders.queryString(\"foo.baz\").useDisMax(false).defaultOperator(QueryStringQueryBuilder.Operator.AND)\n+ .field(\"field1\").field(\"field2\")).get();\n+\n+ assertHitCount(response, 1l);\n+ }\n+\n }", "filename": "src/test/java/org/elasticsearch/search/query/SimpleQueryTests.java", "status": "modified" } ] }
{ "body": "If multiple tokens are output at the same position then `match` queries are not working correctly if `AND` operator is used.\n\nFirst I noticed this issue when using Hunspell token filter (something similar has been reported in [LUCENE-5057](https://issues.apache.org/jira/browse/LUCENE-5057) but it is not really a Lucene issue). With Hunspell it is possible to get multiple output tokens from a single input token, all at the same position. However, client query usually contains only one of those tokens or token that can output different set of tokens. When using `match` query and `AND` operator the document is not matching (while it should be).\n\n_I also think that this can impact other linguistics packages (like Basis`s RBL?)_\n\nSimilar situation can be simulated using synonym filter. Imagine that we are using query time synonyms.\n\nLet's say we index simple document:\n\n```\n{ text : \"Quick brown fox\" }\n```\n\nand we define query time synonym \"quick, fast\". Now let's see what we can do with this in the following [recreation script](https://gist.github.com/lukas-vlcek/6923179) (using ES 0.90.5), output commented below:\n\n```\n#!/bin/sh\n\necho \"Elasticsearch version\"\ncurl localhost:9200; echo; echo;\n\necho \"Delete index\"; curl -X DELETE 'localhost:9200/i'; echo; echo;\n\necho \"Create index with analysis and mappings\"; curl -X PUT 'localhost:9200/i' -d '{\n \"settings\" : {\n \"analysis\" : {\n \"analyzer\" : {\n \"index\" : {\n \"type\" : \"custom\",\n \"tokenizer\" : \"standard\",\n \"filter\" : [\"lowercase\"]\n },\n \"search\" : {\n \"type\" : \"custom\",\n \"tokenizer\" : \"standard\",\n \"filter\" : [\"lowercase\",\"synonym\"]\n }\n },\n \"filter\" : {\n \"synonym\" : {\n \"type\" : \"synonym\",\n \"synonyms\" : [\n \"fast, quick\"\n ]\n }}},\n \"mappings\" : {\n \"t\" : {\n \"properties\" : {\n \"text\" : {\n \"type\" : \"string\",\n \"index_analyzer\" : \"index\",\n \"search_analyzer\" : \"search\"\n}}}}}}'; echo; echo;\n\n# Wait for all the index shards to be allocated\ncurl -s -X GET 'http://localhost:9200/_cluster/health?wait_for_status=yellow&timeout=5s' > /dev/null\n\necho \"Test synonyms for 'fast': should output two tokens\"; curl -X POST 'localhost:9200/i/_analyze?analyzer=search&format=text&text=fast'; echo; echo;\n\necho \"Index data: 'Quick brown fox'\"; curl -X POST 'localhost:9200/i/t' -d '{\n \"text\" : \"Quick brown fox\"\n}'; echo; echo;\n\necho \"Refresh Lucene reader\"; curl -X POST 'localhost:9200/i/_refresh'; echo; echo;\n\necho \"Testing search\";\necho ===========================\necho \"1) query_string: quick\";\ncurl -X GET 'localhost:9200/_search' -d '{\"query\":{\"query_string\":{\"query\":\"quick\",\"default_field\":\"text\"}}}'; echo; echo;\n\necho \"2) query_string: fast - is search_analyzer used?\";\ncurl -X GET 'localhost:9200/_search' -d '{\"query\":{\"query_string\":{\"query\":\"fast\",\"default_field\":\"text\"}}}'; echo; echo;\n\necho \"2.5) query_string: fast - forcing search_analyzer\";\ncurl -X GET 'localhost:9200/_search' -d '{\"query\":{\"query_string\":{\"query\":\"fast\",\"default_field\":\"text\",\"analyzer\":\"search\"}}}'; echo; echo;\n\necho \"3) query_string: fast - forcing search_analyzer, forcing AND operator\";\ncurl -X GET 'localhost:9200/_search' -d '{\"query\":{\"query_string\":{\"query\":\"fast\",\"default_field\":\"text\",\"analyzer\":\"search\",\"default_operator\":\"AND\"}}}'; echo; echo;\n\necho \"4) match query: quick\";\ncurl -X GET 'localhost:9200/_search' -d '{\"query\":{\"match\":{\"text\":{\"query\":\"quick\",\"analyzer\":\"search\"}}}}'; echo; echo;\n\necho \"5) match query: fast\";\ncurl -X GET 'localhost:9200/_search' -d '{\"query\":{\"match\":{\"text\":{\"query\":\"fast\",\"analyzer\":\"search\"}}}}'; echo; echo;\n\necho \"6) match query: fast - forcing AND operator\";\ncurl -X GET 'localhost:9200/_search' -d '{\"query\":{\"match\":{\"text\":{\"query\":\"fast\",\"analyzer\":\"search\",\"operator\":\"AND\"}}}}'; echo; echo;\n```\n\nOutput of queries:\n\n```\n1) query_string: quick\n{\"took\":4,\"timed_out\":false,\"_shards\":{\"total\":5,\"successful\":5,\"failed\":0},\"hits\":{\"total\":1,\"max_score\":0.15342641,\"hits\":[{\"_index\":\"i\",\"_type\":\"t\",\"_id\":\"0N2FX_vxR5qsMTYczFPl1w\",\"_score\":0.15342641, \"_source\" : {\n \"text\" : \"Quick brown fox\"\n}}]}}\n\n2) query_string: fast - is search_analyzer used?\n{\"took\":2,\"timed_out\":false,\"_shards\":{\"total\":5,\"successful\":5,\"failed\":0},\"hits\":{\"total\":0,\"max_score\":null,\"hits\":[]}}\n\n2.5) query_string: fast - forcing search_analyzer\n{\"took\":3,\"timed_out\":false,\"_shards\":{\"total\":5,\"successful\":5,\"failed\":0},\"hits\":{\"total\":1,\"max_score\":0.04500804,\"hits\":[{\"_index\":\"i\",\"_type\":\"t\",\"_id\":\"0N2FX_vxR5qsMTYczFPl1w\",\"_score\":0.04500804, \"_source\" : {\n \"text\" : \"Quick brown fox\"\n}}]}}\n\n3) query_string: fast - forcing search_analyzer, forcing AND operator\n{\"took\":2,\"timed_out\":false,\"_shards\":{\"total\":5,\"successful\":5,\"failed\":0},\"hits\":{\"total\":1,\"max_score\":0.04500804,\"hits\":[{\"_index\":\"i\",\"_type\":\"t\",\"_id\":\"0N2FX_vxR5qsMTYczFPl1w\",\"_score\":0.04500804, \"_source\" : {\n \"text\" : \"Quick brown fox\"\n}}]}}\n\n4) match query: quick\n{\"took\":2,\"timed_out\":false,\"_shards\":{\"total\":5,\"successful\":5,\"failed\":0},\"hits\":{\"total\":1,\"max_score\":0.04500804,\"hits\":[{\"_index\":\"i\",\"_type\":\"t\",\"_id\":\"0N2FX_vxR5qsMTYczFPl1w\",\"_score\":0.04500804, \"_source\" : {\n \"text\" : \"Quick brown fox\"\n}}]}}\n\n5) match query: fast\n{\"took\":3,\"timed_out\":false,\"_shards\":{\"total\":5,\"successful\":5,\"failed\":0},\"hits\":{\"total\":1,\"max_score\":0.04500804,\"hits\":[{\"_index\":\"i\",\"_type\":\"t\",\"_id\":\"0N2FX_vxR5qsMTYczFPl1w\",\"_score\":0.04500804, \"_source\" : {\n \"text\" : \"Quick brown fox\"\n}}]}}\n\n6) match query: fast - forcing AND operator\n{\"took\":4,\"timed_out\":false,\"_shards\":{\"total\":5,\"successful\":5,\"failed\":0},\"hits\":{\"total\":0,\"max_score\":null,\"hits\":[]}}\n```\n\nMy comments on results:\n\n_(note that comment no.2 may contain question regarding other non related issue)_\n\n1) `query_string` for query \"quick\" works as expected.\n\n2) `query_string` for query \"fast\" does not seem to work. According to the [documentation](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-core-types.html#string) I was expecting that `search_analyzer` defined in `string` type mapping would be used. But anyway, this should not be the topic of this issue... :smile:\n\n2.5) `query_string` for query \"fast\" works (if I explicitly force `search` analyzer) so we can say query time synonym works fine.\n\n3) The same situation as in 2.5) except we are forcing `AND` operator. It should work and it is working.\n\n4) Now, let's use `match` query and query for \"quick\". It works fine.\n\n5) Again, `match` query but query for \"fast\". It works, so far so good.\n\n6) The same as in 5) except we are forcing `AND` operator. It should work (I hope) but it is not.\n\nIf I could speculate about why this is happening:\n\na) MatchQueryParser does something like:\n\n```\n... if (\"and\".equalsIgnoreCase(op)) {\n matchQuery.setOccur(BooleanClause.Occur.MUST);\n} ...\n```\n\nb) and MatchQuery does not take account on the position of tokens. It simply stacks all incoming tokens into BooleanQuery. It contains patterns similar to the following excerpt:\n\n```\nBooleanQuery q = new BooleanQuery(positionCount == 1);\nfor (int i = 0; i < numTokens; i++) {\n boolean hasNext = buffer.incrementToken();\n assert hasNext == true;\n final Query currentQuery = newTermQuery(mapper, new Term(field, termToByteRef(termAtt)));\n q.add(currentQuery, occur);\n}\n```\n\nThe position of tokens is not taken into account which would explain why this is not working as expected in combination with `AND` operator in situations described above.\nI think if incoming tokens share the same position it should generate Boolean subquery with `OR` operator (?).\n", "comments": [ { "body": "> I think if incoming tokens share the same position it should generate Boolean subquery with OR operator (?).\n\nI agree!\n", "created_at": "2013-10-11T13:00:36Z" }, { "body": "btw @s1monw I do not want to hijack this issue but what do you think about my comment no.2 (to me it seems that the search analyzer is not used while it should be, no?) Is it worth opening a new issue or I am misunderstanding something here?\n", "created_at": "2013-10-11T13:43:13Z" }, { "body": "I updated the PR with a test for your issue no. 2 but I can't reproduce it though. Works just fine and uses the right filter or do I miss something?\n", "created_at": "2013-10-11T15:02:58Z" }, { "body": "If my recreation script returns one hit for the second query to you then this means it has been probably fixed already (or hard to say ... ). Just ignore it...\nThanks!\n", "created_at": "2013-10-11T15:19:13Z" }, { "body": "I will try to recreate it via REST maybe there is some problem there. I don't think I will get to it today so I will update it later!\n", "created_at": "2013-10-11T15:24:43Z" } ], "number": 3881, "title": "Multiple tokens at the same position not working correctly with match query if AND operator is used" }
{ "body": "SynonymFilters produces token streams with stacked tokens such that\nconjunction queries need to be parsed in a special way such that the\nstacked tokens are added as an innner disjuncition.\n\nCloses #3881\n", "number": 3897, "review_comments": [ { "body": "This change looks hackish and fixing CommonTermsQuery to handle several terms at the same position looks hard to me (eg. if one of the terms has a high freq and another one has a low freq) so maybe we should just leave CommonTermsQuery as-is and state in the doc that when commonTermsCutoff is set, tokens at the same position won't be handled gracefully?\n", "created_at": "2013-10-14T09:31:31Z" }, { "body": "yeah it's hard though. I agree we should just leave it as it is.\n", "created_at": "2013-10-14T09:55:29Z" }, { "body": "s/The if/If/\n", "created_at": "2013-10-14T11:49:09Z" }, { "body": ";) \n", "created_at": "2013-10-14T11:50:02Z" } ], "title": "Add match query support for stacked tokens" }
{ "commits": [ { "message": "Add match query support for stacked tokens\n\nSynonymFilters produces token streams with stacked tokens such that\nconjunction queries need to be parsed in a special way such that the\nstacked tokens are added as an innner disjuncition.\n\nCloses #3881" }, { "message": "[DOCS] Note that cutoff_frequency doesn't handle stacked tokens gracefully" } ], "files": [ { "diff": "@@ -97,6 +97,13 @@ The `cutoff_frequency` can either be relative to the number of documents\n in the index if in the range `[0..1)` or absolute if greater or equal to\n `1.0`.\n \n+Note: If the `cutoff_frequency` is used and the operator is `and`\n+_stacked tokens_ (tokens that are on the same position like `synonym` filter emits)\n+are not handled gracefully as they are in a pure `and` query. For instance the query\n+`fast fox` is analyzed into 3 terms `[fast, quick, fox]` where `quick` is a synonym\n+for `fast` on the same token positions the query might require `fast` and `quick` to \n+match if the operator is `and`. \n+\n Here is an example showing a query composed of stopwords exclusivly:\n \n [source,js]", "filename": "docs/reference/query-dsl/queries/match-query.asciidoc", "status": "modified" }, { "diff": "@@ -27,6 +27,7 @@\n import org.apache.lucene.index.Term;\n import org.apache.lucene.queries.ExtendedCommonTermsQuery;\n import org.apache.lucene.search.*;\n+import org.apache.lucene.search.BooleanClause.Occur;\n import org.apache.lucene.util.BytesRef;\n import org.apache.lucene.util.UnicodeUtil;\n import org.elasticsearch.ElasticSearchIllegalArgumentException;\n@@ -241,6 +242,28 @@ public Query parse(Type type, String fieldName, Object value) throws IOException\n q.add(new Term(field, termToByteRef(termAtt)));\n }\n return wrapSmartNameQuery(q, smartNameFieldMappers, parseContext);\n+ } if (severalTokensAtSamePosition && occur == Occur.MUST) {\n+ BooleanQuery q = new BooleanQuery(positionCount == 1);\n+ Query currentQuery = null;\n+ for (int i = 0; i < numTokens; i++) {\n+ boolean hasNext = buffer.incrementToken();\n+ assert hasNext == true;\n+ if (posIncrAtt != null && posIncrAtt.getPositionIncrement() == 0) {\n+ if (!(currentQuery instanceof BooleanQuery)) {\n+ Query t = currentQuery;\n+ currentQuery = new BooleanQuery(true);\n+ ((BooleanQuery)currentQuery).add(t, BooleanClause.Occur.SHOULD);\n+ }\n+ ((BooleanQuery)currentQuery).add(newTermQuery(mapper, new Term(field, termToByteRef(termAtt))), BooleanClause.Occur.SHOULD);\n+ } else {\n+ if (currentQuery != null) {\n+ q.add(currentQuery, occur);\n+ }\n+ currentQuery = newTermQuery(mapper, new Term(field, termToByteRef(termAtt)));\n+ }\n+ }\n+ q.add(currentQuery, occur);\n+ return wrapSmartNameQuery(q, smartNameFieldMappers, parseContext);\n } else {\n BooleanQuery q = new BooleanQuery(positionCount == 1);\n for (int i = 0; i < numTokens; i++) {", "filename": "src/main/java/org/elasticsearch/index/search/MatchQuery.java", "status": "modified" }, { "diff": "@@ -40,9 +40,7 @@\n import static org.elasticsearch.index.query.FilterBuilders.*;\n import static org.elasticsearch.index.query.QueryBuilders.*;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;\n-import static org.hamcrest.Matchers.allOf;\n-import static org.hamcrest.Matchers.containsString;\n-import static org.hamcrest.Matchers.equalTo;\n+import static org.hamcrest.Matchers.*;\n \n /**\n *", "filename": "src/test/java/org/elasticsearch/count/query/SimpleQueryTests.java", "status": "modified" }, { "diff": "@@ -21,11 +21,13 @@\n \n import org.apache.lucene.util.English;\n import org.elasticsearch.ElasticSearchException;\n+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;\n import org.elasticsearch.action.index.IndexRequestBuilder;\n import org.elasticsearch.action.search.SearchPhaseExecutionException;\n import org.elasticsearch.action.search.SearchResponse;\n import org.elasticsearch.action.search.SearchType;\n import org.elasticsearch.common.settings.ImmutableSettings;\n+import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.index.query.*;\n import org.elasticsearch.index.query.CommonTermsQueryBuilder.Operator;\n@@ -44,6 +46,9 @@\n import java.util.Random;\n import java.util.concurrent.ExecutionException;\n \n+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;\n+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;\n+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;\n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n import static org.elasticsearch.index.query.FilterBuilders.*;\n import static org.elasticsearch.index.query.QueryBuilders.*;\n@@ -1617,4 +1622,145 @@ public void testMultiFieldQueryString() {\n assertHitCount(client().prepareSearch(\"test\").setQuery(queryString(\"/value[01]/\").field(\"field1\").field(\"field2\")).get(), 1);\n assertHitCount(client().prepareSearch(\"test\").setQuery(queryString(\"field\\\\*:/value[01]/\")).get(), 1);\n }\n+ \n+ // see #3881 - for extensive description of the issue\n+ @Test\n+ public void testMatchQueryWithSynonyms() throws IOException {\n+ CreateIndexRequestBuilder builder = prepareCreate(\"test\").setSettings(settingsBuilder()\n+ .put(SETTING_NUMBER_OF_SHARDS, 1)\n+ .put(SETTING_NUMBER_OF_REPLICAS, 0)\n+ .put(\"index.analysis.analyzer.index.type\", \"custom\")\n+ .put(\"index.analysis.analyzer.index.tokenizer\", \"standard\")\n+ .put(\"index.analysis.analyzer.index.filter\", \"lowercase\")\n+ .put(\"index.analysis.analyzer.search.type\", \"custom\")\n+ .put(\"index.analysis.analyzer.search.tokenizer\", \"standard\")\n+\n+ .putArray(\"index.analysis.analyzer.search.filter\", \"lowercase\", \"synonym\")\n+ .put(\"index.analysis.filter.synonym.type\", \"synonym\")\n+ .putArray(\"index.analysis.filter.synonym.synonyms\", \"fast, quick\"));\n+ \n+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject(\"test\")\n+ .startObject(\"properties\")\n+ .startObject(\"text\")\n+ .field(\"type\", \"string\")\n+ .field(\"index_analyzer\", \"index\")\n+ .field(\"search_analyzer\", \"search\")\n+ .endObject()\n+ .endObject()\n+ .endObject().endObject();\n+ assertAcked(builder.addMapping(\"test\", mapping));\n+ ensureGreen();\n+ client().prepareIndex(\"test\", \"test\", \"1\").setSource(jsonBuilder().startObject()\n+ .field(\"text\", \"quick brown fox\")\n+ .endObject())\n+ .execute().actionGet();\n+ client().admin().indices().prepareRefresh().execute().actionGet();\n+ SearchResponse searchResponse = client().prepareSearch(\"test\").setQuery(QueryBuilders.matchQuery(\"text\", \"quick\").operator(MatchQueryBuilder.Operator.AND)).get();\n+ assertHitCount(searchResponse, 1);\n+ searchResponse = client().prepareSearch(\"test\").setQuery(QueryBuilders.matchQuery(\"text\", \"quick brown\").operator(MatchQueryBuilder.Operator.AND)).get();\n+ assertHitCount(searchResponse, 1);\n+ searchResponse = client().prepareSearch(\"test\").setQuery(QueryBuilders.matchQuery(\"text\", \"fast\").operator(MatchQueryBuilder.Operator.AND)).get();\n+ assertHitCount(searchResponse, 1);\n+ \n+ client().prepareIndex(\"test\", \"test\", \"2\").setSource(jsonBuilder().startObject()\n+ .field(\"text\", \"fast brown fox\")\n+ .endObject())\n+ .execute().actionGet();\n+ client().admin().indices().prepareRefresh().execute().actionGet();\n+ searchResponse = client().prepareSearch(\"test\").setQuery(QueryBuilders.matchQuery(\"text\", \"quick\").operator(MatchQueryBuilder.Operator.AND)).get();\n+ assertHitCount(searchResponse, 2);\n+ searchResponse = client().prepareSearch(\"test\").setQuery(QueryBuilders.matchQuery(\"text\", \"quick brown\").operator(MatchQueryBuilder.Operator.AND)).get();\n+ assertHitCount(searchResponse, 2);\n+ }\n+ \n+ @Test\n+ public void testMatchQueryWithStackedStems() throws IOException {\n+ CreateIndexRequestBuilder builder = prepareCreate(\"test\").setSettings(settingsBuilder()\n+ .put(SETTING_NUMBER_OF_SHARDS, 1)\n+ .put(SETTING_NUMBER_OF_REPLICAS, 0)\n+ .put(\"index.analysis.analyzer.index.type\", \"custom\")\n+ .put(\"index.analysis.analyzer.index.tokenizer\", \"standard\")\n+ .put(\"index.analysis.analyzer.index.filter\", \"lowercase\")\n+ .put(\"index.analysis.analyzer.search.type\", \"custom\")\n+ .put(\"index.analysis.analyzer.search.tokenizer\", \"standard\")\n+ .putArray(\"index.analysis.analyzer.search.filter\", \"lowercase\", \"keyword_repeat\", \"porterStem\", \"unique_stem\")\n+ .put(\"index.analysis.filter.unique_stem.type\", \"unique\")\n+ .put(\"index.analysis.filter.unique_stem.only_on_same_position\", true));\n+ \n+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject(\"test\")\n+ .startObject(\"properties\")\n+ .startObject(\"text\")\n+ .field(\"type\", \"string\")\n+ .field(\"index_analyzer\", \"index\")\n+ .field(\"search_analyzer\", \"search\")\n+ .endObject()\n+ .endObject()\n+ .endObject().endObject();\n+ assertAcked(builder.addMapping(\"test\", mapping));\n+ ensureGreen();\n+ client().prepareIndex(\"test\", \"test\", \"1\").setSource(jsonBuilder().startObject()\n+ .field(\"text\", \"the fox runs across the street\")\n+ .endObject())\n+ .execute().actionGet();\n+ client().admin().indices().prepareRefresh().execute().actionGet();\n+ SearchResponse searchResponse = client().prepareSearch(\"test\").setQuery(QueryBuilders.matchQuery(\"text\", \"fox runs\").operator(MatchQueryBuilder.Operator.AND)).get();\n+ assertHitCount(searchResponse, 1);\n+ \n+ client().prepareIndex(\"test\", \"test\", \"2\").setSource(jsonBuilder().startObject()\n+ .field(\"text\", \"run fox run\")\n+ .endObject())\n+ .execute().actionGet();\n+ client().admin().indices().prepareRefresh().execute().actionGet();\n+ searchResponse = client().prepareSearch(\"test\").setQuery(QueryBuilders.matchQuery(\"text\", \"fox runs\").operator(MatchQueryBuilder.Operator.AND)).get();\n+ assertHitCount(searchResponse, 2);\n+ }\n+ \n+ @Test\n+ public void testQueryStringWithSynonyms() throws IOException {\n+ CreateIndexRequestBuilder builder = prepareCreate(\"test\").setSettings(settingsBuilder()\n+ .put(SETTING_NUMBER_OF_SHARDS, 1)\n+ .put(SETTING_NUMBER_OF_REPLICAS, 0)\n+ .put(\"index.analysis.analyzer.index.type\", \"custom\")\n+ .put(\"index.analysis.analyzer.index.tokenizer\", \"standard\")\n+ .put(\"index.analysis.analyzer.index.filter\", \"lowercase\")\n+ .put(\"index.analysis.analyzer.search.type\", \"custom\")\n+ .put(\"index.analysis.analyzer.search.tokenizer\", \"standard\")\n+\n+ .putArray(\"index.analysis.analyzer.search.filter\", \"lowercase\", \"synonym\")\n+ .put(\"index.analysis.filter.synonym.type\", \"synonym\")\n+ .putArray(\"index.analysis.filter.synonym.synonyms\", \"fast, quick\"));\n+ \n+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject(\"test\")\n+ .startObject(\"properties\")\n+ .startObject(\"text\")\n+ .field(\"type\", \"string\")\n+ .field(\"index_analyzer\", \"index\")\n+ .field(\"search_analyzer\", \"search\")\n+ .endObject()\n+ .endObject()\n+ .endObject().endObject();\n+ assertAcked(builder.addMapping(\"test\", mapping));\n+ ensureGreen();\n+ client().prepareIndex(\"test\", \"test\", \"1\").setSource(jsonBuilder().startObject()\n+ .field(\"text\", \"quick brown fox\")\n+ .endObject())\n+ .execute().actionGet();\n+ client().admin().indices().prepareRefresh().execute().actionGet();\n+ SearchResponse searchResponse = client().prepareSearch(\"test\").setQuery(QueryBuilders.queryString(\"quick\").defaultField(\"text\").defaultOperator(QueryStringQueryBuilder.Operator.AND)).get();\n+ assertHitCount(searchResponse, 1);\n+ searchResponse = client().prepareSearch(\"test\").setQuery(QueryBuilders.queryString(\"quick brown\").defaultField(\"text\").defaultOperator(QueryStringQueryBuilder.Operator.AND)).get();\n+ assertHitCount(searchResponse, 1);\n+ searchResponse = client().prepareSearch().setQuery(QueryBuilders.queryString(\"fast\").defaultField(\"text\").defaultOperator(QueryStringQueryBuilder.Operator.AND)).get();\n+ assertHitCount(searchResponse, 1);\n+ \n+ client().prepareIndex(\"test\", \"test\", \"2\").setSource(jsonBuilder().startObject()\n+ .field(\"text\", \"fast brown fox\")\n+ .endObject())\n+ .execute().actionGet();\n+ client().admin().indices().prepareRefresh().execute().actionGet();\n+ searchResponse = client().prepareSearch(\"test\").setQuery(QueryBuilders.queryString(\"quick\").defaultField(\"text\").defaultOperator(QueryStringQueryBuilder.Operator.AND)).get();\n+ assertHitCount(searchResponse, 2);\n+ searchResponse = client().prepareSearch(\"test\").setQuery(QueryBuilders.queryString(\"quick brown\").defaultField(\"text\").defaultOperator(QueryStringQueryBuilder.Operator.AND)).get();\n+ assertHitCount(searchResponse, 2);\n+ }\n }", "filename": "src/test/java/org/elasticsearch/search/query/SimpleQueryTests.java", "status": "modified" } ] }
{ "body": "For \"linear\", \"gauss\", \"exp\" ... functions, the \"origin\" function should allow parameters and date math. \"now\" as origin fails to parse. Just as \"now-7d\" would as well. Obviously offset can replace part of the date math, the origin still would be useful to be set in that way.\n", "comments": [ { "body": "good point, thanks for opening this!\n", "created_at": "2013-10-11T12:43:18Z" }, { "body": "thanks @jaysonminard for bringing this up!\n", "created_at": "2013-10-11T15:28:20Z" } ], "number": 3892, "title": "function_score (FunctionScoreQuery) decay functions do not allow date math" }
{ "body": "The parser used the method that was supposed to be used for parsing on\nthe indexing side that never supported date math.\n\nCloses #3892\n", "number": 3895, "review_comments": [], "title": "Support date math for `origin` decay function parsing" }
{ "commits": [ { "message": "Support date math for `origin` decay function parsing\n\nThe parser used the method that was supposed to be used for parsing on\nthe indexing side that never supported date math.\n\nCloses #3892" } ], "files": [ { "diff": "@@ -294,44 +294,48 @@ public Query fuzzyQuery(String value, String minSim, int prefixLength, int maxEx\n \n @Override\n public Query termQuery(Object value, @Nullable QueryParseContext context) {\n- long now = context == null ? System.currentTimeMillis() : context.nowInMillis();\n- long lValue = dateMathParser.parse(convertToString(value), now);\n+ long lValue = parseToMilliseconds(value, context);\n return NumericRangeQuery.newLongRange(names.indexName(), precisionStep,\n lValue, lValue, true, true);\n }\n+ \n+ public long parseToMilliseconds(Object value, @Nullable QueryParseContext context) {\n+ return parseToMilliseconds(value, context, false);\n+ }\n+ \n+ public long parseToMilliseconds(Object value, @Nullable QueryParseContext context, boolean includeUpper) {\n+ long now = context == null ? System.currentTimeMillis() : context.nowInMillis();\n+ return includeUpper ? dateMathParser.parseUpperInclusive(convertToString(value), now) : dateMathParser.parse(convertToString(value), now);\n+ }\n \n @Override\n public Filter termFilter(Object value, @Nullable QueryParseContext context) {\n- long now = context == null ? System.currentTimeMillis() : context.nowInMillis();\n- long lValue = dateMathParser.parse(convertToString(value), now);\n+ final long lValue = parseToMilliseconds(value, context);\n return NumericRangeFilter.newLongRange(names.indexName(), precisionStep,\n lValue, lValue, true, true);\n }\n \n @Override\n public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {\n- long now = context == null ? System.currentTimeMillis() : context.nowInMillis();\n return NumericRangeQuery.newLongRange(names.indexName(), precisionStep,\n- lowerTerm == null ? null : dateMathParser.parse(convertToString(lowerTerm), now),\n- upperTerm == null ? null : (includeUpper && parseUpperInclusive) ? dateMathParser.parseUpperInclusive(convertToString(upperTerm), now) : dateMathParser.parse(convertToString(upperTerm), now),\n+ lowerTerm == null ? null : parseToMilliseconds(lowerTerm, context),\n+ upperTerm == null ? null : parseToMilliseconds(upperTerm, context, includeUpper && parseUpperInclusive),\n includeLower, includeUpper);\n }\n \n @Override\n public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {\n- long now = context == null ? System.currentTimeMillis() : context.nowInMillis();\n return NumericRangeFilter.newLongRange(names.indexName(), precisionStep,\n- lowerTerm == null ? null : dateMathParser.parse(convertToString(lowerTerm), now),\n- upperTerm == null ? null : (includeUpper && parseUpperInclusive) ? dateMathParser.parseUpperInclusive(convertToString(upperTerm), now) : dateMathParser.parse(convertToString(upperTerm), now),\n+ lowerTerm == null ? null : parseToMilliseconds(lowerTerm, context),\n+ upperTerm == null ? null : parseToMilliseconds(upperTerm, context, includeUpper && parseUpperInclusive),\n includeLower, includeUpper);\n }\n \n @Override\n public Filter rangeFilter(IndexFieldDataService fieldData, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) {\n- long now = context == null ? System.currentTimeMillis() : context.nowInMillis();\n return NumericRangeFieldDataFilter.newLongRange((IndexNumericFieldData<?>) fieldData.getForField(this),\n- lowerTerm == null ? null : dateMathParser.parse(convertToString(lowerTerm), now),\n- upperTerm == null ? null : (includeUpper && parseUpperInclusive) ? dateMathParser.parseUpperInclusive(convertToString(upperTerm), now) : dateMathParser.parse(convertToString(upperTerm), now),\n+ lowerTerm == null ? null : parseToMilliseconds(lowerTerm, context),\n+ upperTerm == null ? null : parseToMilliseconds(upperTerm, context, includeUpper && parseUpperInclusive),\n includeLower, includeUpper);\n }\n ", "filename": "src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java", "status": "modified" }, { "diff": "@@ -250,11 +250,11 @@ private ScoreFunction parseDateVariable(String fieldName, XContentParser parser,\n }\n long origin = SearchContext.current().nowInMillis();\n if (originString != null) {\n- origin = dateFieldMapper.value(originString).longValue();\n+ origin = dateFieldMapper.parseToMilliseconds(originString, parseContext);\n }\n \n if (scaleString == null) {\n- throw new ElasticSearchParseException(DecayFunctionBuilder.SCALE + \"must be set for date fields.\");\n+ throw new ElasticSearchParseException(DecayFunctionBuilder.SCALE + \" must be set for date fields.\");\n }\n TimeValue val = TimeValue.parseTimeValue(scaleString, TimeValue.timeValueHours(24));\n double scale = val.getMillis();", "filename": "src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java", "status": "modified" }, { "diff": "@@ -33,7 +33,6 @@\n import org.elasticsearch.index.query.functionscore.gauss.GaussDecayFunctionBuilder;\n import org.elasticsearch.search.SearchHits;\n import org.elasticsearch.test.AbstractIntegrationTest;\n-import org.elasticsearch.test.hamcrest.ElasticSearchAssertions;\n import org.joda.time.DateTime;\n import org.junit.Test;\n \n@@ -47,7 +46,7 @@\n import static org.elasticsearch.index.query.QueryBuilders.termQuery;\n import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.*;\n import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;\n-import static org.elasticsearch.test.hamcrest.ElasticSearchAssertions.assertAcked;\n+import static org.elasticsearch.test.hamcrest.ElasticSearchAssertions.*;\n import static org.hamcrest.Matchers.*;\n \n public class DecayFunctionScoreTests extends AbstractIntegrationTest {\n@@ -450,14 +449,44 @@ public void testExceptionThrownIfScaleLE0() throws Exception {\n functionScoreQuery(termQuery(\"test\", \"value\"), gaussDecayFunction(\"num1\", \"2013-05-28\", \"-1d\")))));\n \n SearchResponse sr = response.actionGet();\n- ElasticSearchAssertions.assertNoFailures(sr);\n- SearchHits sh = sr.getHits();\n- assertThat(sh.hits().length, equalTo(2));\n- assertThat(sh.getAt(0).getId(), equalTo(\"2\"));\n- assertThat(sh.getAt(1).getId(), equalTo(\"1\"));\n+ assertOrderedSearchHits(sr, \"2\", \"1\");\n+ }\n+ \n+ @Test\n+ public void testParseDateMath() throws Exception {\n+ \n+ assertAcked(prepareCreate(\"test\").addMapping(\n+ \"type1\",\n+ jsonBuilder().startObject().startObject(\"type1\").startObject(\"properties\").startObject(\"test\").field(\"type\", \"string\")\n+ .endObject().startObject(\"num1\").field(\"type\", \"date\").endObject().endObject().endObject().endObject()));\n+ ensureYellow();\n+ client().index(\n+ indexRequest(\"test\").type(\"type1\").id(\"1\")\n+ .source(jsonBuilder().startObject().field(\"test\", \"value\").field(\"num1\", System.currentTimeMillis()).endObject())).actionGet();\n+ client().index(\n+ indexRequest(\"test\").type(\"type1\").id(\"2\")\n+ .source(jsonBuilder().startObject().field(\"test\", \"value\").field(\"num1\", System.currentTimeMillis() - (1000 * 60 * 60 * 24)).endObject())).actionGet();\n+ refresh();\n+\n+ SearchResponse sr = client().search(\n+ searchRequest().source(\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), gaussDecayFunction(\"num1\", \"now\", \"2d\"))))).get();\n+\n+ assertNoFailures(sr);\n+ assertOrderedSearchHits(sr, \"1\", \"2\");\n+ \n+ sr = client().search(\n+ searchRequest().source(\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), gaussDecayFunction(\"num1\", \"now-1d\", \"2d\"))))).get();\n+\n+ assertNoFailures(sr);\n+ assertOrderedSearchHits(sr, \"2\", \"1\");\n \n }\n \n+\n @Test(expected = ElasticSearchIllegalStateException.class)\n public void testExceptionThrownIfScaleRefNotBetween0And1() throws Exception {\n DecayFunctionBuilder gfb = new GaussDecayFunctionBuilder(\"num1\", \"2013-05-28\", \"1d\").setDecay(100);\n@@ -501,7 +530,7 @@ public void testValueMissingLin() throws Exception {\n .add(linearDecayFunction(\"num2\", \"0.0\", \"1\")).scoreMode(\"multiply\"))));\n \n SearchResponse sr = response.actionGet();\n- ElasticSearchAssertions.assertNoFailures(sr);\n+ assertNoFailures(sr);\n SearchHits sh = sr.getHits();\n assertThat(sh.hits().length, equalTo(4));\n double[] scores = new double[4];\n@@ -549,7 +578,7 @@ public void testDateWithoutOrigin() throws Exception {\n .scoreMode(\"multiply\"))));\n \n SearchResponse sr = response.actionGet();\n- ElasticSearchAssertions.assertNoFailures(sr);\n+ assertNoFailures(sr);\n SearchHits sh = sr.getHits();\n assertThat(sh.hits().length, equalTo(3));\n double[] scores = new double[4];\n@@ -602,7 +631,7 @@ public void testManyDocsLin() throws Exception {\n .scoreMode(\"multiply\").boostMode(CombineFunction.REPLACE.getName()))));\n \n SearchResponse sr = response.actionGet();\n- ElasticSearchAssertions.assertNoFailures(sr);\n+ assertNoFailures(sr);\n SearchHits sh = sr.getHits();\n assertThat(sh.hits().length, equalTo(numDocs));\n double[] scores = new double[numDocs];", "filename": "src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java", "status": "modified" } ] }
{ "body": "Trying to use the new `function_score` query in 0.90.5 I stumbled across some errors. In a couple of cases the code does not work as documented.\n## boost_mode `multiply`\n\nUsing `\"boost_mode\": \"multiply\"` fails:\n\n```\n{\n \"query\": {\n \"function_score\": {\n \"boost_mode\": \"multiply\",\n \"query\": {\n \"match\": {\n \"title\": \"elasticsearch\"\n }\n },\n \"functions\": [\n {\n \"gauss\": {\n \"date\": {\n \"scale\": \"4w\"\n }\n }\n }\n ]\n }\n }\n}\n```\n\nresult:\n\n```\nQueryParsingException[[psb-1.1] function_score illegal boost_mode [multiply]]\n```\n\n`\"boost_mode\": \"mult\"` works. However, `score_mode` just behaves the other way round:\n\n```\nQueryParsingException[[psb-1.1] function_score illegal score_mode [mult]];\n```\n## `boost` as decay_function\n\nBoth the blog post introducing the feature and the documentation propose a query along the lines of\n\n```\n\"query\": {\n \"function_score\": {\n \"query\": {\n \"match\": { \"title\": \"elasticsearch\"}\n },\n \"functions\": [\n { \"boost\": 1 },\n {\n \"gauss\": {\n \"timestamp\": {\n \"scale\": \"4w\"\n }\n }\n }\n ],\n \"score_mode\": \"sum\"\n }\n}\n```\n\nrespectively\n\n```\n\"function_score\": {\n \"functions\": [\n {\n \"boost\": \"3\",\n \"filter\": {...}\n },\n {\n \"filter\": {...},\n \"script_score\": {\n \"params\": {\n \"param1\": 2,\n \"param2\": 3.1\n },\n \"script\": \"_score * doc['my_numeric_field'].value / pow(param1, param2)\"\n }\n }\n ],\n \"query\": {...},\n \"score_mode\": \"first\"\n}\n```\n\nThe second sample is intended to provide a replacement for the `custom_filters_score_query`. However, the first example fails against 0.90.5 with the following trace:\n\n```\nParse Failure [Failed to parse source [{ \"query\": { \"function_score\": { \"query\": { \"match\": { \"title\": \"elasticsearch\" } }, \"functions\": [ { \"boost\": 1 }, { \"gauss\": { \"timestamp\": { \"scale\": \"4w\" } } } ], \"score_mode\": \"sum\" } } }]]]; nested: QueryParsingException[[psb-1.1] No function with the name [boost] is registered.]; }\n```\n\nI failed to figure out what's the right incantation to invoke `boost`.\n", "comments": [ { "body": "\"boost_mode\": \"mult\" is indeed wrong, it was supposed to be working with \"boost_mode\": \"multiply\". I will fix this.\n\nAs for the boost function, the documentation is wrong. The keyword is \"boost_factor\" instead of \"boost\". I will update the documentation accordingly.\n\nThanks a lot for reporting this! \n", "created_at": "2013-10-10T11:31:18Z" }, { "body": "If `multiply` is the right keyword to use with boost_mode, the first two samples in the doc are false as well since they still mention `mult`:\n\n```\n\"function_score\": {\n \"(query|filter)\": {},\n \"boost\": \"boost for the whole query\",\n \"FUNCTION\": {},\n \"boost_mode\":\"(mult|replace|...)\"\n}\n```\n\nand\n\n```\n\"function_score\": {\n \"(query|filter)\": {},\n \"boost\": \"boost for the whole query\",\n \"functions\": [\n {\n \"filter\": {},\n \"FUNCTION\": {}\n },\n {\n \"FUNCTION\": {}\n }\n ],\n \"max_boost\": number,\n \"score_mode\": \"(mult|max|...)\",\n \"boost_mode\": \"(mult|replace|...)\"\n}\n```\n\nThanks for the quick response.\n", "created_at": "2013-10-10T15:45:11Z" } ], "number": 3872, "title": "inconsistent behavior and documentation(?) errors with function_score" }
{ "body": "```\n- \"boost\" should be \"boost_factor\"\n- \"mult\" should be \"multiply\"\n```\n\ncloses #3872 for master\n", "number": 3891, "review_comments": [], "title": "fix naming in function_score" }
{ "commits": [ { "message": "fix naming in function_score\n\n - \"boost\" should be \"boost_factor\"\n - \"mult\" should be \"multiply\"\n\nAlso, store combine function names in ImmutableMap instead of iterating\nover all possible names each time.\n\ncloses #3872 for master" } ], "files": [ { "diff": "@@ -448,7 +448,7 @@ and the <<query-dsl-custom-filters-score-query>>\n \"custom_filters_score\": {\n \"filters\": [\n {\n- \"boost\": \"3\",\n+ \"boost_factor\": \"3\",\n \"filter\": {...}\n },\n {\n@@ -472,7 +472,7 @@ becomes:\n \"function_score\": {\n \"functions\": [\n {\n- \"boost\": \"3\",\n+ \"boost_factor\": \"3\",\n \"filter\": {...}\n },\n {", "filename": "docs/reference/query-dsl/queries/function-score-query.asciidoc", "status": "modified" }, { "diff": "@@ -31,7 +31,7 @@ public float combine(double queryBoost, double queryScore, double funcScore, dou\n \n @Override\n public String getName() {\n- return \"mult\";\n+ return \"multiply\";\n }\n \n @Override", "filename": "src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java", "status": "modified" }, { "diff": "@@ -19,6 +19,8 @@\n \n package org.elasticsearch.index.query.functionscore;\n \n+import com.google.common.collect.ImmutableMap;\n+import com.google.common.collect.ImmutableMap.Builder;\n import org.apache.lucene.search.Filter;\n import org.apache.lucene.search.Query;\n import org.elasticsearch.common.Strings;\n@@ -54,6 +56,17 @@ public FunctionScoreQueryParser(ScoreFunctionParserMapper funtionParserMapper) {\n public String[] names() {\n return new String[] { NAME, Strings.toCamelCase(NAME) };\n }\n+ \n+ private static final ImmutableMap<String, CombineFunction> combineFunctionsMap;\n+\n+ static {\n+ CombineFunction[] values = CombineFunction.values();\n+ Builder<String, CombineFunction> combineFunctionMapBuilder = ImmutableMap.<String, CombineFunction>builder();\n+ for (CombineFunction combineFunction : values) {\n+ combineFunctionMapBuilder.put(combineFunction.getName(), combineFunction);\n+ }\n+ combineFunctionsMap = combineFunctionMapBuilder.build();\n+ }\n \n @Override\n public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {\n@@ -179,11 +192,10 @@ private FiltersFunctionScoreQuery.ScoreMode parseScoreMode(QueryParseContext par\n \n private CombineFunction parseBoostMode(QueryParseContext parseContext, XContentParser parser) throws IOException {\n String boostMode = parser.text();\n- for (CombineFunction cf : CombineFunction.values()) {\n- if (cf.getName().equals(boostMode)) {\n- return cf;\n- }\n+ CombineFunction cf = combineFunctionsMap.get(boostMode);\n+ if (cf == null) {\n+ throw new QueryParsingException(parseContext.index(), NAME + \" illegal boost_mode [\" + boostMode + \"]\");\n }\n- throw new QueryParsingException(parseContext.index(), NAME + \" illegal boost_mode [\" + boostMode + \"]\");\n+ return cf;\n }\n }\n\\ No newline at end of file", "filename": "src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryParser.java", "status": "modified" } ] }
{ "body": "First of all, thanks for building such an amazing service. I am loving my experience with ElasticSearch so far.\n\nWhat I've run into is that trying to use the BooleanQueryBuilder in conjunction with the GeoShapeQueryBuilder is resulting in the following exception.\n\n```\norg.elasticsearch.search.builder.SearchSourceBuilderException: Failed to build search source\n at org.elasticsearch.search.builder.SearchSourceBuilder.buildAsBytes(SearchSourceBuilder.java:579)\n at org.elasticsearch.action.search.SearchRequest.source(SearchRequest.java:258)\n at org.elasticsearch.action.search.SearchRequestBuilder.doExecute(SearchRequestBuilder.java:839)\n at org.elasticsearch.action.ActionRequestBuilder.execute(ActionRequestBuilder.java:85)\n at org.elasticsearch.action.ActionRequestBuilder.execute(ActionRequestBuilder.java:59)\n at com.tomtom.lbs.vectordb.data.services.ElasticSearchService.search(ElasticSearchService.java:119)\n ... 38 more\nCaused by: org.elasticsearch.common.jackson.core.JsonGenerationException: Current context not an ARRAY but OBJECT\n at org.elasticsearch.common.jackson.core.base.GeneratorBase._reportError(GeneratorBase.java:444)\n at org.elasticsearch.common.jackson.dataformat.smile.SmileGenerator.writeEndArray(SmileGenerator.java:553)\n at org.elasticsearch.common.xcontent.json.JsonXContentGenerator.writeEndArray(JsonXContentGenerator.java:59)\n at org.elasticsearch.common.xcontent.XContentBuilder.endArray(XContentBuilder.java:227)\n at org.elasticsearch.index.query.BoolQueryBuilder.doXArrayContent(BoolQueryBuilder.java:182)\n at org.elasticsearch.index.query.BoolQueryBuilder.doXContent(BoolQueryBuilder.java:149)\n at org.elasticsearch.index.query.BaseQueryBuilder.toXContent(BaseQueryBuilder.java:65)\n at org.elasticsearch.search.builder.SearchSourceBuilder.toXContent(SearchSourceBuilder.java:601)\n at org.elasticsearch.search.builder.SearchSourceBuilder.buildAsBytes(SearchSourceBuilder.java:576)\n ... 43 more\n```\n\nHere is my code to build and execute the query:\n\n``` java\nGeoShapeQueryBuilder geoQuery = QueryBuilders.geoShapeQuery(\"searchGeometry\", new RectangleImpl(y1, y2, x1, x2, SpatialContext.GEO));\nBoolQueryBuilder query = QueryBuilders.boolQuery();\n\nif (featureIds.length > 0)\n query.must(QueryBuilders.termQuery(\"featureType\", featureIds[0]));\n\nquery.must(geoQuery);\n\nSearchRequestBuilder request = esClient.prepareSearch(\"index_name\")\n .setSearchType(SearchType.QUERY_THEN_FETCH)\n .setQuery(query)\n .setFrom(0)\n .setSize(maxResults)\n .setExplain(false);\n\nSearchResponse response = request.execute().actionGet();\n```\n\nAm I doing something wrong? I have a feeling I am but it looks right to me.\n\nThis isn't a big deal for me, because I can just build a JSON query manually and that works perfectly fine. However it'd be nice to be able to use the QueryBuilders as make the code much easier to read.\n\nThanks!\n", "comments": [ { "body": "Hey,\n\nI can reproduce this and will take a further look at it... the combination of geoquery and term query leads to that (you get sort of a different exception when you switch term and geoquery in the must query).\n\nThanks for notifying!\n", "created_at": "2013-10-11T08:41:56Z" }, { "body": "Thanks!\n", "created_at": "2013-10-11T14:03:03Z" } ], "number": 3878, "title": "Search using BooleanQueryBuilder and GeoShapeQueryBuilder results in \"Current context not an ARRAY but OBJECT\"" }
{ "body": "A missing endObject() resulted in serialization errors.\n\nCloses #3878\n", "number": 3887, "review_comments": [], "title": "Fix toXContent of GeoShapeQueryBuilder" }
{ "commits": [ { "message": "Fix toXContent of GeoShapeQueryBuilder\n\nA missing endObject() resulted in serialization errors.\n\nCloses #3878" } ], "files": [ { "diff": "@@ -160,6 +160,8 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep\n }\n \n builder.endObject();\n+\n+ builder.endObject();\n }\n \n }", "filename": "src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java", "status": "modified" }, { "diff": "@@ -0,0 +1,35 @@\n+/*\n+ * Licensed to ElasticSearch and Shay Banon under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. ElasticSearch licenses this\n+ * file to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.index.query;\n+\n+import org.elasticsearch.common.geo.builders.EnvelopeBuilder;\n+import org.elasticsearch.common.geo.builders.ShapeBuilder;\n+import org.elasticsearch.common.xcontent.json.JsonXContent;\n+import org.junit.Test;\n+\n+public class GeoShapeQueryBuilderTests {\n+\n+ @Test // see #3878\n+ public void testThatXContentSerializationInsideOfArrayWorks() throws Exception {\n+ EnvelopeBuilder envelopeBuilder = ShapeBuilder.newEnvelope().topLeft(0, 0).bottomRight(10, 10);\n+ GeoShapeQueryBuilder geoQuery = QueryBuilders.geoShapeQuery(\"searchGeometry\", envelopeBuilder);\n+ JsonXContent.contentBuilder().startArray().value(geoQuery).endArray();\n+ }\n+}", "filename": "src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java", "status": "added" } ] }
{ "body": "This can currently happen if an index is repeatedly created by an indexing operation and deleted quickly afterwards. \n", "comments": [], "number": 3783, "title": "Use the new index UUID to ensure mapping update events from older indices are not applied to new indices with the same name" }
{ "body": "This can go wrong if indices with the same name are repeatably created and deleted.\n\nAlso - some minor clean up in ShardStateAction where shard started events could be added twice to the to-be-applied list where the second instance will be ignored.\n\nCloses #3783\n", "number": 3784, "review_comments": [ { "body": "Why? We don't typically have a method name starting with upper case...\n", "created_at": "2013-09-26T16:01:49Z" }, { "body": "UUID (like GUID) is something I'm used to write upper cased, but apparently I'm the exception - will change. \n", "created_at": "2013-09-26T17:31:21Z" } ], "title": "Add indexUUID to mapping-updated and mapping-refresh events and make sure they are applied to an index with same UUID." }
{ "commits": [ { "message": "Add indexUUID to mapping-updated and mapping-refresh events and make sure they are applied to an index with same UUID.\n\nThis can go wrong if indices with the same name are repeatably created and deleted.\n\nAlso - some minor clean up in ShardStateAction where shard started events could be added twice to the to-be-applied list where the second instance will be ignored.\n\nCloses #3783" }, { "message": "UUID() -> uuid()" }, { "message": "introduced a default indexUUID `_na_` value for backward compatibility (indexUUID is not nullable anymore)" }, { "message": "SimpleIdCacheTests.StubIndexService#indexUUID can't return null anymore" }, { "message": "added non null assertions to isSameUUID" } ], "files": [ { "diff": "@@ -40,6 +40,7 @@\n import org.elasticsearch.cluster.action.shard.ShardStateAction;\n import org.elasticsearch.cluster.block.ClusterBlockException;\n import org.elasticsearch.cluster.block.ClusterBlockLevel;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.cluster.metadata.MappingMetaData;\n import org.elasticsearch.cluster.routing.ShardIterator;\n import org.elasticsearch.common.bytes.BytesReference;\n@@ -62,7 +63,6 @@\n import org.elasticsearch.transport.TransportRequestOptions;\n import org.elasticsearch.transport.TransportService;\n \n-import java.io.IOException;\n import java.util.Map;\n import java.util.Set;\n \n@@ -569,19 +569,18 @@ private void updateMappingOnMaster(final String index, final String type) {\n }\n documentMapper.refreshSource();\n \n- mappingUpdatedAction.execute(new MappingUpdatedAction.MappingUpdatedRequest(index, type, documentMapper.mappingSource()), new ActionListener<MappingUpdatedAction.MappingUpdatedResponse>() {\n+ IndexMetaData metaData = clusterService.state().metaData().index(index);\n+\n+ final MappingUpdatedAction.MappingUpdatedRequest request = new MappingUpdatedAction.MappingUpdatedRequest(index, metaData.uuid(), type, documentMapper.mappingSource());\n+ mappingUpdatedAction.execute(request, new ActionListener<MappingUpdatedAction.MappingUpdatedResponse>() {\n @Override\n public void onResponse(MappingUpdatedAction.MappingUpdatedResponse mappingUpdatedResponse) {\n // all is well\n }\n \n @Override\n public void onFailure(Throwable e) {\n- try {\n- logger.warn(\"failed to update master on updated mapping for index [{}], type [{}] and source [{}]\", e, index, type, documentMapper.mappingSource().string());\n- } catch (IOException e1) {\n- // ignore\n- }\n+ logger.warn(\"failed to update master on updated mapping for {}\", e, request);\n }\n });\n } catch (Throwable e) {", "filename": "src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java", "status": "modified" }, { "diff": "@@ -33,6 +33,7 @@\n import org.elasticsearch.cluster.action.shard.ShardStateAction;\n import org.elasticsearch.cluster.block.ClusterBlockException;\n import org.elasticsearch.cluster.block.ClusterBlockLevel;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.cluster.metadata.MappingMetaData;\n import org.elasticsearch.cluster.metadata.MetaData;\n import org.elasticsearch.cluster.routing.ShardIterator;\n@@ -48,7 +49,6 @@\n import org.elasticsearch.threadpool.ThreadPool;\n import org.elasticsearch.transport.TransportService;\n \n-import java.io.IOException;\n import java.util.concurrent.CountDownLatch;\n import java.util.concurrent.TimeUnit;\n \n@@ -184,7 +184,8 @@ protected PrimaryResponse<IndexResponse, IndexRequest> shardOperationOnPrimary(C\n final IndexRequest request = shardRequest.request;\n \n // validate, if routing is required, that we got routing\n- MappingMetaData mappingMd = clusterState.metaData().index(request.index()).mappingOrDefault(request.type());\n+ IndexMetaData indexMetaData = clusterState.metaData().index(request.index());\n+ MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type());\n if (mappingMd != null && mappingMd.routing().required()) {\n if (request.routing() == null) {\n throw new RoutingMissingException(request.index(), request.type(), request.id());\n@@ -203,7 +204,7 @@ protected PrimaryResponse<IndexResponse, IndexRequest> shardOperationOnPrimary(C\n .versionType(request.versionType())\n .origin(Engine.Operation.Origin.PRIMARY);\n if (index.parsedDoc().mappingsModified()) {\n- updateMappingOnMaster(request);\n+ updateMappingOnMaster(request, indexMetaData);\n }\n indexShard.index(index);\n version = index.version();\n@@ -215,7 +216,7 @@ protected PrimaryResponse<IndexResponse, IndexRequest> shardOperationOnPrimary(C\n .versionType(request.versionType())\n .origin(Engine.Operation.Origin.PRIMARY);\n if (create.parsedDoc().mappingsModified()) {\n- updateMappingOnMaster(request);\n+ updateMappingOnMaster(request, indexMetaData);\n }\n indexShard.create(create);\n version = create.version();\n@@ -263,17 +264,19 @@ protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) {\n }\n }\n \n- private void updateMappingOnMaster(final IndexRequest request) {\n+ private void updateMappingOnMaster(final IndexRequest request, IndexMetaData indexMetaData) {\n final CountDownLatch latch = new CountDownLatch(1);\n try {\n- MapperService mapperService = indicesService.indexServiceSafe(request.index()).mapperService();\n+ final MapperService mapperService = indicesService.indexServiceSafe(request.index()).mapperService();\n final DocumentMapper documentMapper = mapperService.documentMapper(request.type());\n if (documentMapper == null) { // should not happen\n return;\n }\n documentMapper.refreshSource();\n- logger.trace(\"Sending mapping updated to master: index [{}] type [{}]\", request.index(), request.type());\n- mappingUpdatedAction.execute(new MappingUpdatedAction.MappingUpdatedRequest(request.index(), request.type(), documentMapper.mappingSource()), new ActionListener<MappingUpdatedAction.MappingUpdatedResponse>() {\n+ final MappingUpdatedAction.MappingUpdatedRequest mappingRequest =\n+ new MappingUpdatedAction.MappingUpdatedRequest(request.index(), indexMetaData.uuid(), request.type(), documentMapper.mappingSource());\n+ logger.trace(\"Sending mapping updated to master: {}\", mappingRequest);\n+ mappingUpdatedAction.execute(mappingRequest, new ActionListener<MappingUpdatedAction.MappingUpdatedResponse>() {\n @Override\n public void onResponse(MappingUpdatedAction.MappingUpdatedResponse mappingUpdatedResponse) {\n // all is well\n@@ -283,11 +286,7 @@ public void onResponse(MappingUpdatedAction.MappingUpdatedResponse mappingUpdate\n @Override\n public void onFailure(Throwable e) {\n latch.countDown();\n- try {\n- logger.warn(\"Failed to update master on updated mapping for index [\" + request.index() + \"], type [\" + request.type() + \"] and source [\" + documentMapper.mappingSource().string() + \"]\", e);\n- } catch (IOException e1) {\n- // ignore\n- }\n+ logger.warn(\"Failed to update master on updated mapping for {}\", e, mappingRequest);\n }\n });\n } catch (Exception e) {", "filename": "src/main/java/org/elasticsearch/action/index/TransportIndexAction.java", "status": "modified" }, { "diff": "@@ -20,13 +20,15 @@\n package org.elasticsearch.cluster.action.index;\n \n import org.elasticsearch.ElasticSearchException;\n+import org.elasticsearch.Version;\n import org.elasticsearch.action.ActionListener;\n import org.elasticsearch.action.ActionRequestValidationException;\n import org.elasticsearch.action.ActionResponse;\n import org.elasticsearch.action.support.master.MasterNodeOperationRequest;\n import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction;\n import org.elasticsearch.cluster.ClusterService;\n import org.elasticsearch.cluster.ClusterState;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.cluster.metadata.MetaDataMappingService;\n import org.elasticsearch.common.compress.CompressedString;\n import org.elasticsearch.common.inject.Inject;\n@@ -76,15 +78,15 @@ protected MappingUpdatedResponse newResponse() {\n \n @Override\n protected void masterOperation(final MappingUpdatedRequest request, final ClusterState state, final ActionListener<MappingUpdatedResponse> listener) throws ElasticSearchException {\n- metaDataMappingService.updateMapping(request.index(), request.type(), request.mappingSource(), new MetaDataMappingService.Listener() {\n+ metaDataMappingService.updateMapping(request.index(), request.indexUUID(), request.type(), request.mappingSource(), new MetaDataMappingService.Listener() {\n @Override\n public void onResponse(MetaDataMappingService.Response response) {\n listener.onResponse(new MappingUpdatedResponse());\n }\n \n @Override\n public void onFailure(Throwable t) {\n- logger.warn(\"failed to dynamically update the mapping in cluster_state from shard\", t);\n+ logger.warn(\"[{}] update-mapping [{}] failed to dynamically update the mapping in cluster_state from shard\", t, request.index(), request.type());\n listener.onFailure(t);\n }\n });\n@@ -105,16 +107,16 @@ public void writeTo(StreamOutput out) throws IOException {\n public static class MappingUpdatedRequest extends MasterNodeOperationRequest<MappingUpdatedRequest> {\n \n private String index;\n-\n+ private String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE;\n private String type;\n-\n private CompressedString mappingSource;\n \n MappingUpdatedRequest() {\n }\n \n- public MappingUpdatedRequest(String index, String type, CompressedString mappingSource) {\n+ public MappingUpdatedRequest(String index, String indexUUID, String type, CompressedString mappingSource) {\n this.index = index;\n+ this.indexUUID = indexUUID;\n this.type = type;\n this.mappingSource = mappingSource;\n }\n@@ -123,6 +125,10 @@ public String index() {\n return index;\n }\n \n+ public String indexUUID() {\n+ return indexUUID;\n+ }\n+\n public String type() {\n return type;\n }\n@@ -142,6 +148,9 @@ public void readFrom(StreamInput in) throws IOException {\n index = in.readString();\n type = in.readString();\n mappingSource = CompressedString.readCompressedString(in);\n+ if (in.getVersion().onOrAfter(Version.V_0_90_6)) {\n+ indexUUID = in.readString();\n+ }\n }\n \n @Override\n@@ -150,6 +159,14 @@ public void writeTo(StreamOutput out) throws IOException {\n out.writeString(index);\n out.writeString(type);\n mappingSource.writeTo(out);\n+ if (out.getVersion().onOrAfter(Version.V_0_90_6)) {\n+ out.writeString(indexUUID);\n+ }\n+ }\n+\n+ @Override\n+ public String toString() {\n+ return \"index [\" + index + \"], indexUUID [\" + indexUUID + \"], type [\" + type + \"] and source [\" + mappingSource + \"]\";\n }\n }\n }\n\\ No newline at end of file", "filename": "src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java", "status": "modified" }, { "diff": "@@ -20,7 +20,9 @@\n package org.elasticsearch.cluster.action.index;\n \n import org.elasticsearch.ElasticSearchException;\n+import org.elasticsearch.Version;\n import org.elasticsearch.cluster.ClusterState;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.cluster.metadata.MetaDataMappingService;\n import org.elasticsearch.cluster.node.DiscoveryNodes;\n import org.elasticsearch.common.component.AbstractComponent;\n@@ -60,7 +62,7 @@ public void nodeMappingRefresh(final ClusterState state, final NodeMappingRefres\n }\n \n private void innerMappingRefresh(NodeMappingRefreshRequest request) {\n- metaDataMappingService.refreshMapping(request.index(), request.types());\n+ metaDataMappingService.refreshMapping(request.index(), request.indexUUID(), request.types());\n }\n \n private class NodeMappingRefreshTransportHandler extends BaseTransportRequestHandler<NodeMappingRefreshRequest> {\n@@ -87,14 +89,16 @@ public String executor() {\n public static class NodeMappingRefreshRequest extends TransportRequest {\n \n private String index;\n+ private String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE;\n private String[] types;\n private String nodeId;\n \n NodeMappingRefreshRequest() {\n }\n \n- public NodeMappingRefreshRequest(String index, String[] types, String nodeId) {\n+ public NodeMappingRefreshRequest(String index, String indexUUID, String[] types, String nodeId) {\n this.index = index;\n+ this.indexUUID = indexUUID;\n this.types = types;\n this.nodeId = nodeId;\n }\n@@ -103,6 +107,11 @@ public String index() {\n return index;\n }\n \n+ public String indexUUID() {\n+ return indexUUID;\n+ }\n+\n+\n public String[] types() {\n return types;\n }\n@@ -117,6 +126,9 @@ public void writeTo(StreamOutput out) throws IOException {\n out.writeString(index);\n out.writeStringArray(types);\n out.writeString(nodeId);\n+ if (out.getVersion().onOrAfter(Version.V_0_90_6)) {\n+ out.writeString(indexUUID);\n+ }\n }\n \n @Override\n@@ -125,6 +137,9 @@ public void readFrom(StreamInput in) throws IOException {\n index = in.readString();\n types = in.readStringArray();\n nodeId = in.readString();\n+ if (in.getVersion().onOrAfter(Version.V_0_90_6)) {\n+ indexUUID = in.readString();\n+ }\n }\n }\n }", "filename": "src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java", "status": "modified" }, { "diff": "@@ -77,7 +77,7 @@ public ShardStateAction(Settings settings, ClusterService clusterService, Transp\n transportService.registerHandler(ShardFailedTransportHandler.ACTION, new ShardFailedTransportHandler());\n }\n \n- public void shardFailed(final ShardRouting shardRouting, String indexUUID, final String reason) throws ElasticSearchException {\n+ public void shardFailed(final ShardRouting shardRouting, final String indexUUID, final String reason) throws ElasticSearchException {\n ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, reason);\n logger.warn(\"{} sending failed shard for {}\", shardRouting.shardId(), shardRoutingEntry);\n DiscoveryNodes nodes = clusterService.state().nodes();\n@@ -215,21 +215,25 @@ public ClusterState execute(ClusterState currentState) {\n // with the shard still initializing, and it will try and start it again (until the verification comes)\n \n IndexShardRoutingTable indexShardRoutingTable = indexRoutingTable.shard(shardRouting.id());\n+\n+ boolean applyShardEvent = true;\n+\n for (ShardRouting entry : indexShardRoutingTable) {\n if (shardRouting.currentNodeId().equals(entry.currentNodeId())) {\n // we found the same shard that exists on the same node id\n- if (entry.initializing()) {\n- // shard not started, add it to the shards to be processed.\n- shardRoutingToBeApplied.add(shardRouting);\n- logger.debug(\"{} will apply shard started {}\", shardRouting.shardId(), shardRoutingEntry);\n- } else {\n+ if (!entry.initializing()) {\n+ // shard is in initialized state, skipping event (probable already started)\n logger.debug(\"{} ignoring shard started event for {}, current state: {}\", shardRouting.shardId(), shardRoutingEntry, entry.state());\n+ applyShardEvent = false;\n }\n- } else {\n- shardRoutingToBeApplied.add(shardRouting);\n- logger.debug(\"{} will apply shard started {}\", shardRouting.shardId(), shardRoutingEntry);\n }\n }\n+\n+ if (applyShardEvent) {\n+ shardRoutingToBeApplied.add(shardRouting);\n+ logger.debug(\"{} will apply shard started {}\", shardRouting.shardId(), shardRoutingEntry);\n+ }\n+\n } catch (Throwable t) {\n logger.error(\"{} unexpected failure while processing shard started [{}]\", t, shardRouting.shardId(), shardRouting);\n }\n@@ -299,7 +303,7 @@ static class ShardRoutingEntry extends TransportRequest {\n \n private ShardRouting shardRouting;\n \n- private String indexUUID;\n+ private String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE;\n \n private String reason;\n \n@@ -318,7 +322,7 @@ public void readFrom(StreamInput in) throws IOException {\n shardRouting = readShardRoutingEntry(in);\n reason = in.readString();\n if (in.getVersion().onOrAfter(Version.V_0_90_6)) {\n- indexUUID = in.readOptionalString();\n+ indexUUID = in.readString();\n }\n }\n \n@@ -328,18 +332,13 @@ public void writeTo(StreamOutput out) throws IOException {\n shardRouting.writeTo(out);\n out.writeString(reason);\n if (out.getVersion().onOrAfter(Version.V_0_90_6)) {\n- out.writeOptionalString(indexUUID);\n+ out.writeString(indexUUID);\n }\n }\n \n @Override\n public String toString() {\n- StringBuilder sb = new StringBuilder(shardRouting.toString());\n- if (indexUUID != null) {\n- sb.append(\", indexUUID [\").append(indexUUID).append(\"]\");\n- }\n- sb.append(\", reason [\").append(reason).append(\"]\");\n- return sb.toString();\n+ return \"\" + shardRouting + \", indexUUID [\" + indexUUID + \"], reason [\" + reason + \"]\";\n }\n }\n }", "filename": "src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java", "status": "modified" }, { "diff": "@@ -157,6 +157,7 @@ public static State fromString(String state) {\n public static final String SETTING_BLOCKS_METADATA = \"index.blocks.metadata\";\n public static final String SETTING_VERSION_CREATED = \"index.version.created\";\n public static final String SETTING_UUID = \"index.uuid\";\n+ public static final String INDEX_UUID_NA_VALUE = \"_na_\";\n \n private final String index;\n private final long version;\n@@ -214,25 +215,30 @@ public String index() {\n return index;\n }\n \n+ public String getIndex() {\n+ return index();\n+ }\n+\n+ public String uuid() {\n+ return settings.get(SETTING_UUID, INDEX_UUID_NA_VALUE);\n+ }\n+\n public String getUUID() {\n- return settings.get(SETTING_UUID);\n+ return uuid();\n }\n \n /**\n- * Test whether the current index UUID is the same as the given one. Incoming nulls always return true.\n+ * Test whether the current index UUID is the same as the given one. Returns true if either are _na_\n */\n- public boolean isSameUUID(@Nullable String otherUUID) {\n- if (otherUUID == null || getUUID() == null) {\n+ public boolean isSameUUID(String otherUUID) {\n+ assert otherUUID != null;\n+ assert uuid() != null;\n+ if (INDEX_UUID_NA_VALUE.equals(otherUUID) || INDEX_UUID_NA_VALUE.equals(uuid())) {\n return true;\n }\n return otherUUID.equals(getUUID());\n }\n \n-\n- public String getIndex() {\n- return index();\n- }\n-\n public long version() {\n return this.version;\n }", "filename": "src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java", "status": "modified" }, { "diff": "@@ -69,7 +69,7 @@ public class MetaDataMappingService extends AbstractComponent {\n \n private final NodeMappingCreatedAction mappingCreatedAction;\n \n- private final BlockingQueue<Object> refreshOrUpdateQueue = ConcurrentCollections.newBlockingQueue();\n+ private final BlockingQueue<MappingTask> refreshOrUpdateQueue = ConcurrentCollections.newBlockingQueue();\n \n @Inject\n public MetaDataMappingService(Settings settings, ClusterService clusterService, IndicesService indicesService, NodeMappingCreatedAction mappingCreatedAction) {\n@@ -79,24 +79,32 @@ public MetaDataMappingService(Settings settings, ClusterService clusterService,\n this.mappingCreatedAction = mappingCreatedAction;\n }\n \n- static class RefreshTask {\n+ static class MappingTask {\n final String index;\n- final String[] types;\n+ final String indexUUID;\n \n- RefreshTask(String index, String[] types) {\n+ MappingTask(String index, final String indexUUID) {\n this.index = index;\n+ this.indexUUID = indexUUID;\n+ }\n+ }\n+\n+ static class RefreshTask extends MappingTask {\n+ final String[] types;\n+\n+ RefreshTask(String index, final String indexUUID, String[] types) {\n+ super(index, indexUUID);\n this.types = types;\n }\n }\n \n- static class UpdateTask {\n- final String index;\n+ static class UpdateTask extends MappingTask {\n final String type;\n final CompressedString mappingSource;\n final Listener listener;\n \n- UpdateTask(String index, String type, CompressedString mappingSource, Listener listener) {\n- this.index = index;\n+ UpdateTask(String index, String indexUUID, String type, CompressedString mappingSource, Listener listener) {\n+ super(index, indexUUID);\n this.type = type;\n this.mappingSource = mappingSource;\n this.listener = listener;\n@@ -109,7 +117,7 @@ static class UpdateTask {\n * and generate a single cluster change event out of all of those.\n */\n ClusterState executeRefreshOrUpdate(final ClusterState currentState) throws Exception {\n- List<Object> allTasks = new ArrayList<Object>();\n+ List<MappingTask> allTasks = new ArrayList<MappingTask>();\n refreshOrUpdateQueue.drainTo(allTasks);\n \n if (allTasks.isEmpty()) {\n@@ -118,43 +126,45 @@ ClusterState executeRefreshOrUpdate(final ClusterState currentState) throws Exce\n \n // break down to tasks per index, so we can optimize the on demand index service creation\n // to only happen for the duration of a single index processing of its respective events\n- Map<String, List<Object>> tasksPerIndex = Maps.newHashMap();\n- for (Object task : allTasks) {\n- String index = null;\n- if (task instanceof UpdateTask) {\n- index = ((UpdateTask) task).index;\n- } else if (task instanceof RefreshTask) {\n- index = ((RefreshTask) task).index;\n- } else {\n- logger.warn(\"illegal state, got wrong mapping task type [{}]\", task);\n+ Map<String, List<MappingTask>> tasksPerIndex = Maps.newHashMap();\n+ for (MappingTask task : allTasks) {\n+ if (task.index == null) {\n+ logger.debug(\"ignoring a mapping task of type [{}] with a null index.\", task);\n }\n- if (index != null) {\n- List<Object> indexTasks = tasksPerIndex.get(index);\n- if (indexTasks == null) {\n- indexTasks = new ArrayList<Object>();\n- tasksPerIndex.put(index, indexTasks);\n- }\n- indexTasks.add(task);\n+ List<MappingTask> indexTasks = tasksPerIndex.get(task.index);\n+ if (indexTasks == null) {\n+ indexTasks = new ArrayList<MappingTask>();\n+ tasksPerIndex.put(task.index, indexTasks);\n }\n+ indexTasks.add(task);\n+\n }\n \n boolean dirty = false;\n MetaData.Builder mdBuilder = newMetaDataBuilder().metaData(currentState.metaData());\n- for (Map.Entry<String, List<Object>> entry : tasksPerIndex.entrySet()) {\n+ for (Map.Entry<String, List<MappingTask>> entry : tasksPerIndex.entrySet()) {\n String index = entry.getKey();\n- List<Object> tasks = entry.getValue();\n+ List<MappingTask> tasks = entry.getValue();\n boolean removeIndex = false;\n // keep track of what we already refreshed, no need to refresh it again...\n Set<String> processedRefreshes = Sets.newHashSet();\n try {\n- for (Object task : tasks) {\n+ for (MappingTask task : tasks) {\n+ final IndexMetaData indexMetaData = mdBuilder.get(index);\n+ if (indexMetaData == null) {\n+ // index got deleted on us, ignore...\n+ logger.debug(\"[{}] ignoring task [{}] - index meta data doesn't exist\", index, task);\n+ continue;\n+ }\n+\n+ if (!indexMetaData.isSameUUID(task.indexUUID)) {\n+ // index got deleted on us, ignore...\n+ logger.debug(\"[{}] ignoring task [{}] - index meta data doesn't match task uuid\", index, task);\n+ continue;\n+ }\n+\n if (task instanceof RefreshTask) {\n RefreshTask refreshTask = (RefreshTask) task;\n- final IndexMetaData indexMetaData = mdBuilder.get(index);\n- if (indexMetaData == null) {\n- // index got delete on us, ignore...\n- continue;\n- }\n IndexService indexService = indicesService.indexService(index);\n if (indexService == null) {\n // we need to create the index here, and add the current mapping to it, so we can merge\n@@ -195,13 +205,8 @@ ClusterState executeRefreshOrUpdate(final ClusterState currentState) throws Exce\n String type = updateTask.type;\n CompressedString mappingSource = updateTask.mappingSource;\n \n- // first, check if it really needs to be updated\n- final IndexMetaData indexMetaData = mdBuilder.get(index);\n- if (indexMetaData == null) {\n- // index got delete on us, ignore...\n- continue;\n- }\n if (indexMetaData.mappings().containsKey(type) && indexMetaData.mapping(type).source().equals(mappingSource)) {\n+ logger.debug(\"[{}] update_mapping [{}] ignoring mapping update task as it's source is equal to ours\", index, updateTask.type);\n continue;\n }\n \n@@ -221,16 +226,13 @@ ClusterState executeRefreshOrUpdate(final ClusterState currentState) throws Exce\n \n // if we end up with the same mapping as the original once, ignore\n if (indexMetaData.mappings().containsKey(type) && indexMetaData.mapping(type).source().equals(updatedMapper.mappingSource())) {\n+ logger.debug(\"[{}] update_mapping [{}] ignoring mapping update task as it results in the same source as what we have\", index, updateTask.type);\n continue;\n }\n \n // build the updated mapping source\n if (logger.isDebugEnabled()) {\n- try {\n- logger.debug(\"[{}] update_mapping [{}] (dynamic) with source [{}]\", index, type, updatedMapper.mappingSource().string());\n- } catch (Exception e) {\n- // ignore\n- }\n+ logger.debug(\"[{}] update_mapping [{}] (dynamic) with source [{}]\", index, type, updatedMapper.mappingSource());\n } else if (logger.isInfoEnabled()) {\n logger.info(\"[{}] update_mapping [{}] (dynamic)\", index, type);\n }\n@@ -262,8 +264,8 @@ ClusterState executeRefreshOrUpdate(final ClusterState currentState) throws Exce\n /**\n * Refreshes mappings if they are not the same between original and parsed version\n */\n- public void refreshMapping(final String index, final String... types) {\n- refreshOrUpdateQueue.add(new RefreshTask(index, types));\n+ public void refreshMapping(final String index, final String indexUUID, final String... types) {\n+ refreshOrUpdateQueue.add(new RefreshTask(index, indexUUID, types));\n clusterService.submitStateUpdateTask(\"refresh-mapping [\" + index + \"][\" + Arrays.toString(types) + \"]\", Priority.HIGH, new ClusterStateUpdateTask() {\n @Override\n public void onFailure(String source, Throwable t) {\n@@ -277,8 +279,8 @@ public ClusterState execute(ClusterState currentState) throws Exception {\n });\n }\n \n- public void updateMapping(final String index, final String type, final CompressedString mappingSource, final Listener listener) {\n- refreshOrUpdateQueue.add(new UpdateTask(index, type, mappingSource, listener));\n+ public void updateMapping(final String index, final String indexUUID, final String type, final CompressedString mappingSource, final Listener listener) {\n+ refreshOrUpdateQueue.add(new UpdateTask(index, indexUUID, type, mappingSource, listener));\n clusterService.submitStateUpdateTask(\"update-mapping [\" + index + \"][\" + type + \"]\", Priority.HIGH, new ClusterStateUpdateTask() {\n @Override\n public void onFailure(String source, Throwable t) {", "filename": "src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java", "status": "modified" }, { "diff": "@@ -295,7 +295,7 @@ public Injector shardInjectorSafe(int shardId) throws IndexShardMissingException\n \n @Override\n public String indexUUID() {\n- return indexSettings.get(IndexMetaData.SETTING_UUID);\n+ return indexSettings.get(IndexMetaData.SETTING_UUID, IndexMetaData.INDEX_UUID_NA_VALUE);\n }\n \n @Override", "filename": "src/main/java/org/elasticsearch/index/service/InternalIndexService.java", "status": "modified" }, { "diff": "@@ -388,7 +388,8 @@ private void applyMappings(ClusterChangedEvent event) {\n }\n if (typesToRefresh != null) {\n nodeMappingRefreshAction.nodeMappingRefresh(event.state(),\n- new NodeMappingRefreshAction.NodeMappingRefreshRequest(index, typesToRefresh.toArray(new String[typesToRefresh.size()]), event.state().nodes().localNodeId()));\n+ new NodeMappingRefreshAction.NodeMappingRefreshRequest(index, indexMetaData.uuid(),\n+ typesToRefresh.toArray(new String[typesToRefresh.size()]), event.state().nodes().localNodeId()));\n }\n // go over and remove mappings\n for (DocumentMapper documentMapper : mapperService) {\n@@ -820,9 +821,7 @@ private class FailedEngineHandler implements Engine.FailedEngineListener {\n public void onFailedEngine(final ShardId shardId, final Throwable failure) {\n ShardRouting shardRouting = null;\n final IndexService indexService = indicesService.indexService(shardId.index().name());\n- String indexUUID = null;\n if (indexService != null) {\n- indexUUID = indexService.indexUUID();\n IndexShard indexShard = indexService.shard(shardId.id());\n if (indexShard != null) {\n shardRouting = indexShard.routingEntry();\n@@ -833,7 +832,7 @@ public void onFailedEngine(final ShardId shardId, final Throwable failure) {\n return;\n }\n final ShardRouting fShardRouting = shardRouting;\n- final String finalIndexUUID = indexUUID;\n+ final String indexUUID = indexService.indexUUID(); // we know indexService is not null here.\n threadPool.generic().execute(new Runnable() {\n @Override\n public void run() {\n@@ -849,7 +848,7 @@ public void run() {\n }\n try {\n failedShards.put(fShardRouting.shardId(), new FailedShard(fShardRouting.version()));\n- shardStateAction.shardFailed(fShardRouting, finalIndexUUID, \"engine failure, message [\" + detailedMessage(failure) + \"]\");\n+ shardStateAction.shardFailed(fShardRouting, indexUUID, \"engine failure, message [\" + detailedMessage(failure) + \"]\");\n } catch (Throwable e1) {\n logger.warn(\"[{}][{}] failed to mark shard as failed after a failed engine\", e1, indexService.index().name(), shardId.id());\n }", "filename": "src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java", "status": "modified" }, { "diff": "@@ -27,6 +27,7 @@\n import org.apache.lucene.index.*;\n import org.apache.lucene.store.RAMDirectory;\n import org.elasticsearch.ElasticSearchException;\n+import org.elasticsearch.cluster.metadata.IndexMetaData;\n import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.bytes.HashedBytesArray;\n import org.elasticsearch.common.collect.Tuple;\n@@ -390,7 +391,7 @@ public Injector shardInjectorSafe(int shardId) throws IndexShardMissingException\n \n @Override\n public String indexUUID() {\n- return null;\n+ return IndexMetaData.INDEX_UUID_NA_VALUE;\n }\n \n @Override", "filename": "src/test/java/org/elasticsearch/index/cache/id/SimpleIdCacheTests.java", "status": "modified" } ] }
{ "body": "```\ncurl -XPUT 'http://localhost:9200/test?pretty=1' -d '\n{\n \"mappings\" : {\n \"test\" : {\n \"properties\" : {\n \"name\" : {\n \"type\" : \"completion\"\n }\n }\n }\n }\n}\n'\ncurl -XGET 'http://localhost:9200/_search?pretty=1' -d '\n{\n \"sort\" : \"name\"\n}\n'\n\n SearchPhaseExecutionException[Failed to execute phase [query], all shards failed; \n shardFailures {[PDCUCebERaCutbxDWeAORg][test][4]:\n SearchParseException[[test][4]:\n from[-1],size[-1]:\n Parse Failure [Failed to parse source [{\"sort\":\"name\"}]]]; nested:\n NullPointerException; }{[PDCUCebERaCutbxDWeAORg][test][3]:\n SearchParseException[[test][3]:\n from[-1],size[-1]:\n Parse Failure [Failed to parse source [{\"sort\":\"name\"}]]]; nested:\n NullPointerException; }{[PDCUCebERaCutbxDWeAORg][test][2]:\n SearchParseException[[test][2]:\n from[-1],size[-1]:\n Parse Failure [Failed to parse source [{\"sort\":\"name\"}]]]; nested:\n NullPointerException; }{[PDCUCebERaCutbxDWeAORg][test][0]:\n SearchParseException[[test][0]:\n from[-1],size[-1]:\n Parse Failure [Failed to parse source [{\"sort\":\"name\"}]]]; nested:\n NullPointerException; }{[PDCUCebERaCutbxDWeAORg][test][1]:\n SearchParseException[[test][1]:\n from[-1],size[-1]:\n Parse Failure [Failed to parse source [{\"sort\":\"name\"}]]]; nested:\n NullPointerException; }]\n```\n", "comments": [], "number": 3747, "title": "NPE when sorting on a completion field" }
{ "body": "Closes #3747\n", "number": 3751, "review_comments": [], "title": "Returning useful error message when sorting on a completion field" }
{ "commits": [ { "message": "Returning useful exception when sorting on a completion field\n\nCloses #3747" } ], "files": [ { "diff": "@@ -202,4 +202,6 @@ public Term createIndexNameTerm(BytesRef value) {\n PostingsFormatProvider postingsFormatProvider();\n \n boolean isNumeric();\n+\n+ boolean isSortable();\n }", "filename": "src/main/java/org/elasticsearch/index/mapper/FieldMapper.java", "status": "modified" }, { "diff": "@@ -711,4 +711,8 @@ public boolean isNumeric() {\n return false;\n }\n \n+ @Override\n+ public boolean isSortable() {\n+ return true;\n+ }\n }", "filename": "src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java", "status": "modified" }, { "diff": "@@ -357,6 +357,10 @@ protected String contentType() {\n return CONTENT_TYPE;\n }\n \n+ @Override\n+ public boolean isSortable() {\n+ return false;\n+ }\n \n @Override\n public FieldType defaultFieldType() {", "filename": "src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java", "status": "modified" }, { "diff": "@@ -31,6 +31,7 @@\n import org.elasticsearch.index.fielddata.fieldcomparator.SortMode;\n import org.elasticsearch.index.mapper.FieldMapper;\n import org.elasticsearch.index.mapper.ObjectMappers;\n+import org.elasticsearch.index.mapper.core.CompletionFieldMapper;\n import org.elasticsearch.index.mapper.core.NumberFieldMapper;\n import org.elasticsearch.index.mapper.object.ObjectMapper;\n import org.elasticsearch.index.query.ParsedFilter;\n@@ -198,6 +199,10 @@ private void addSortField(SearchContext context, List<SortField> sortFields, Str\n throw new SearchParseException(context, \"No mapping found for [\" + fieldName + \"] in order to sort on\");\n }\n \n+ if (!fieldMapper.isSortable()) {\n+ throw new SearchParseException(context, \"Sorting not supported for field[\" + fieldName + \"]\");\n+ }\n+\n // Enable when we also know how to detect fields that do tokenize, but only emit one token\n /*if (fieldMapper instanceof StringFieldMapper) {\n StringFieldMapper stringFieldMapper = (StringFieldMapper) fieldMapper;", "filename": "src/main/java/org/elasticsearch/search/sort/SortParseElement.java", "status": "modified" }, { "diff": "@@ -27,13 +27,15 @@\n import org.elasticsearch.action.admin.indices.segments.ShardSegments;\n import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;\n import org.elasticsearch.action.index.IndexRequestBuilder;\n+import org.elasticsearch.action.search.SearchPhaseExecutionException;\n import org.elasticsearch.action.suggest.SuggestResponse;\n import org.elasticsearch.common.settings.ImmutableSettings;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.index.mapper.MapperException;\n import org.elasticsearch.index.mapper.MapperParsingException;\n import org.elasticsearch.index.mapper.core.CompletionFieldMapper;\n+import org.elasticsearch.search.sort.FieldSortBuilder;\n import org.elasticsearch.search.suggest.completion.CompletionStats;\n import org.elasticsearch.search.suggest.completion.CompletionSuggestion;\n import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder;\n@@ -556,6 +558,27 @@ public void testThatStatsAreWorking() throws Exception {\n assertThat(regexSizeInBytes, is(totalSizeInBytes));\n }\n \n+ @Test\n+ public void testThatSortingOnCompletionFieldReturnsUsefulException() throws Exception {\n+ createIndexAndMapping();\n+\n+ client().prepareIndex(INDEX, TYPE, \"1\").setSource(jsonBuilder()\n+ .startObject().startObject(FIELD)\n+ .startArray(\"input\").value(\"Nirvana\").endArray()\n+ .endObject().endObject()\n+ ).get();\n+\n+ refresh();\n+ try {\n+ client().prepareSearch(INDEX).setTypes(TYPE).addSort(new FieldSortBuilder(FIELD)).execute().actionGet();\n+ fail(\"Expected an exception due to trying to sort on completion field, but did not happen\");\n+ } catch (SearchPhaseExecutionException e) {\n+ assertThat(e.status().getStatus(), is(400));\n+ assertThat(e.getMessage(), containsString(\"Sorting not supported for field[\" + FIELD + \"]\"));\n+ }\n+ }\n+\n+\n public void assertSuggestions(String suggestion, String... suggestions) {\n String suggestionName = RandomStrings.randomAsciiOfLength(new Random(), 10);\n SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(", "filename": "src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java", "status": "modified" } ] }
{ "body": "The indices stats API supports `fields`/`completion_fields`/`fielddata_fields`:\n\n```\nGET /_stats/completion?fields=name\n....\n \"completion\": {\n \"size\": \"20b\",\n \"size_in_bytes\": 20,\n \"fields\": {\n \"name\": {\n \"size\": \"20b\",\n \"size_in_bytes\": 20\n }\n }\n }\n```\n\nBut the nodes/indices stats only supports `fields` for fielddata, not for completion stats:\n\n```\nGET /_nodes/stats/indices/completion?fields=name\n....\n \"completion\": {\n \"size\": \"20b\",\n \"size_in_bytes\": 20\n }\n```\n", "comments": [], "number": 3746, "title": "Missing completion fields in nodes stats" }
{ "body": "Closes #3746\n\nAlso adds support for `completion_fields` and `fielddata_fields` parameters to be compatible with `RestIndicesStatsAction`\n", "number": 3749, "review_comments": [], "title": "Add completion field support in Rest nodes stats" }
{ "commits": [ { "message": "Add completion field support in Rest nodes stats\n\nCloses #3746" } ], "files": [ { "diff": "@@ -60,8 +60,8 @@ public RestNodesStatsAction(Settings settings, Client client, RestController con\n controller.registerHandler(RestRequest.Method.GET, \"/_nodes/{nodeId}/stats/indices/\" + flag.getRestName(), indicesHandler);\n controller.registerHandler(RestRequest.Method.GET, \"/_nodes/indices/\" + flag.getRestName() + \"/stats\", indicesHandler);\n controller.registerHandler(RestRequest.Method.GET, \"/_nodes/{nodeId}/indices/\" + flag.getRestName() + \"/stats\", indicesHandler);\n- if (flag == Flag.FieldData) {\n- // add field specific endpoint\n+ if (flag == Flag.FieldData || flag == Flag.Completion) {\n+ // add field specific endpoints\n controller.registerHandler(RestRequest.Method.GET, \"/_nodes/stats/indices/\" + flag.getRestName() + \"/{fields}\", indicesHandler);\n controller.registerHandler(RestRequest.Method.GET, \"/_nodes/{nodeId}/stats/indices/\" + flag.getRestName() + \"/{fields}\", indicesHandler);\n controller.registerHandler(RestRequest.Method.GET, \"/_nodes/indices/\" + flag.getRestName() + \"/{fields}/stats\", indicesHandler);\n@@ -183,8 +183,10 @@ class RestIndicesHandler implements RestHandler {\n public void handleRequest(final RestRequest request, final RestChannel channel) {\n NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(Strings.splitStringByCommaToArray(request.param(\"nodeId\")));\n CommonStatsFlags flags = this.flags;\n- if (flags.isSet(Flag.FieldData) && request.hasParam(\"fields\")) {\n- flags = flags.clone().fieldDataFields(request.paramAsStringArray(\"fields\", null));\n+ if (flags.isSet(Flag.FieldData) && (request.hasParam(\"fields\") || request.hasParam(\"fielddata_fields\"))) {\n+ flags = flags.clone().fieldDataFields(request.paramAsStringArray(\"fielddata_fields\", request.paramAsStringArray(\"fields\", null)));\n+ } else if (flags.isSet(Flag.Completion) && (request.hasParam(\"fields\") || request.hasParam(\"completion_fields\"))) {\n+ flags = flags.clone().completionDataFields(request.paramAsStringArray(\"completion_fields\", request.paramAsStringArray(\"fields\", null)));\n }\n nodesStatsRequest.clear().indices(flags);\n executeNodeStats(request, channel, nodesStatsRequest);", "filename": "src/main/java/org/elasticsearch/rest/action/admin/cluster/node/stats/RestNodesStatsAction.java", "status": "modified" } ] }
{ "body": "Same problem as #3625 and #3626, but with the validate query api. \nWhen using the validate query API with date range queries that contain `now` (e.g. `[* TO now-1d]`), the parsed query contains a negative number that leads to a valid query anyway, but the explain shows that something went wrong while parsing the date range. The following curl reproduction shows the different result obtained using the search API and the validate query API.\n\n```\ncurl -XPUT localhost:9200/index1/type1/1 -d '{\n \"date\": \"2013-09-03T15:07:47.000Z\"\n}\n'\n\ncurl -XPOST localhost:9200/index1/_refresh\n\n#one hit gets returned (id 1) using search api\ncurl -XGET localhost:9200/index1/_search -d '\n{ \n \"query\" : {\n \"query_string\": {\n \"query\": \"date:[* TO now-1d]\"\n } \n }\n}\n'\n#validate query api returns a weird query with a negative time from epoch (`date:[* TO -86400000]`\")\ncurl -XGET localhost:9200/index1/type1/_validate/query?explain -d '\n{ \n \"query_string\": {\n \"query\": \"date:[* TO now-1d]\"\n }\n}\n'\n```\n", "comments": [], "number": 3629, "title": "Validate query api parses wrong date range query when using \"now\"" }
{ "body": "Set nowInMillis to search context created by the count api, validate query api and explain api so that \"NOW\" can be used within queries\n\nAdded nowInMillis to ShardCountRequest, ShardValidateRequest and ExplainRequest in a backwards compatible manner\n\nFixes #3625, #3626 & #3629 \n", "number": 3686, "review_comments": [], "title": "Set nowInMillis to search context created by the count, validate query and explain api" }
{ "commits": [ { "message": "Set nowInMillis to search context created by the count api so that \"NOW\" can be used within queries\n\nAdded nowInMillis to ShardCountRequest in a backwards compatible manner\n\nFixes #3625" }, { "message": "Set nowInMillis to search context created by the validate query api so that \"NOW\" can be used within queries\n\nAdded nowInMillis to ShardValidateQueryRequest in a backwards compatible manner\n\nFixes #3629" }, { "message": "Set nowInMillis to search context created by the explain api so that \"NOW\" can be used within queries\n\nAdded nowInMillis to ExplainRequest in a backwards compatible manner\n\nFixes #3626" } ], "files": [ { "diff": "@@ -19,6 +19,7 @@\n \n package org.elasticsearch.action.admin.indices.validate.query;\n \n+import org.elasticsearch.Version;\n import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest;\n import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.Strings;\n@@ -36,6 +37,7 @@ class ShardValidateQueryRequest extends BroadcastShardOperationRequest {\n private BytesReference querySource;\n private String[] types = Strings.EMPTY_ARRAY;\n private boolean explain;\n+ private long nowInMillis;\n \n @Nullable\n private String[] filteringAliases;\n@@ -50,6 +52,7 @@ public ShardValidateQueryRequest(String index, int shardId, @Nullable String[] f\n this.types = request.types();\n this.explain = request.explain();\n this.filteringAliases = filteringAliases;\n+ this.nowInMillis = request.nowInMillis;\n }\n \n public BytesReference querySource() {\n@@ -68,6 +71,10 @@ public String[] filteringAliases() {\n return filteringAliases;\n }\n \n+ public long nowInMillis() {\n+ return this.nowInMillis;\n+ }\n+\n @Override\n public void readFrom(StreamInput in) throws IOException {\n super.readFrom(in);\n@@ -89,6 +96,12 @@ public void readFrom(StreamInput in) throws IOException {\n }\n \n explain = in.readBoolean();\n+\n+ if (in.getVersion().onOrAfter(Version.V_0_90_6)) {\n+ nowInMillis = in.readVLong();\n+ } else {\n+ nowInMillis = System.currentTimeMillis();\n+ }\n }\n \n @Override\n@@ -110,5 +123,9 @@ public void writeTo(StreamOutput out) throws IOException {\n }\n \n out.writeBoolean(explain);\n+\n+ if (out.getVersion().onOrAfter(Version.V_0_90_6)) {\n+ out.writeVLong(nowInMillis);\n+ }\n }\n }", "filename": "src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java", "status": "modified" }, { "diff": "@@ -21,6 +21,7 @@\n \n import jsr166y.ThreadLocalRandom;\n import org.elasticsearch.ElasticSearchException;\n+import org.elasticsearch.action.ActionListener;\n import org.elasticsearch.action.ShardOperationFailedException;\n import org.elasticsearch.action.support.DefaultShardOperationFailedException;\n import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;\n@@ -73,6 +74,12 @@ public TransportValidateQueryAction(Settings settings, ThreadPool threadPool, Cl\n this.cacheRecycler = cacheRecycler;\n }\n \n+ @Override\n+ protected void doExecute(ValidateQueryRequest request, ActionListener<ValidateQueryResponse> listener) {\n+ request.nowInMillis = System.currentTimeMillis();\n+ super.doExecute(request, listener);\n+ }\n+\n @Override\n protected String executor() {\n return ThreadPool.Names.SEARCH;\n@@ -171,7 +178,7 @@ protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest re\n valid = true;\n } else {\n SearchContext.setCurrent(new DefaultSearchContext(0,\n- new ShardSearchRequest().types(request.types()),\n+ new ShardSearchRequest().types(request.types()).nowInMillis(request.nowInMillis()),\n null, indexShard.acquireSearcher(), indexService, indexShard,\n scriptService, cacheRecycler));\n try {", "filename": "src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java", "status": "modified" }, { "diff": "@@ -56,6 +56,8 @@ public class ValidateQueryRequest extends BroadcastOperationRequest<ValidateQuer\n \n private String[] types = Strings.EMPTY_ARRAY;\n \n+ long nowInMillis;\n+\n ValidateQueryRequest() {\n }\n \n@@ -128,7 +130,6 @@ public ValidateQueryRequest query(XContentBuilder builder) {\n @Required\n public ValidateQueryRequest query(String querySource) {\n this.querySource = new BytesArray(querySource);\n- ;\n this.querySourceUnsafe = false;\n return this;\n }", "filename": "src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java", "status": "modified" }, { "diff": "@@ -70,6 +70,8 @@ public class CountRequest extends BroadcastOperationRequest<CountRequest> {\n \n private String[] types = Strings.EMPTY_ARRAY;\n \n+ long nowInMillis;\n+\n CountRequest() {\n }\n ", "filename": "src/main/java/org/elasticsearch/action/count/CountRequest.java", "status": "modified" }, { "diff": "@@ -19,6 +19,7 @@\n \n package org.elasticsearch.action.count;\n \n+import org.elasticsearch.Version;\n import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest;\n import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.Strings;\n@@ -39,6 +40,8 @@ class ShardCountRequest extends BroadcastShardOperationRequest {\n \n private String[] types = Strings.EMPTY_ARRAY;\n \n+ private long nowInMillis;\n+\n @Nullable\n private String[] filteringAliases;\n \n@@ -52,6 +55,7 @@ public ShardCountRequest(String index, int shardId, @Nullable String[] filtering\n this.querySource = request.querySource();\n this.types = request.types();\n this.filteringAliases = filteringAliases;\n+ this.nowInMillis = request.nowInMillis;\n }\n \n public float minScore() {\n@@ -70,6 +74,10 @@ public String[] filteringAliases() {\n return filteringAliases;\n }\n \n+ public long nowInMillis() {\n+ return this.nowInMillis;\n+ }\n+\n @Override\n public void readFrom(StreamInput in) throws IOException {\n super.readFrom(in);\n@@ -91,6 +99,11 @@ public void readFrom(StreamInput in) throws IOException {\n filteringAliases[i] = in.readString();\n }\n }\n+ if (in.getVersion().onOrAfter(Version.V_0_90_6)) {\n+ nowInMillis = in.readVLong();\n+ } else {\n+ nowInMillis = System.currentTimeMillis();\n+ }\n }\n \n @Override\n@@ -112,5 +125,8 @@ public void writeTo(StreamOutput out) throws IOException {\n } else {\n out.writeVInt(0);\n }\n+ if (out.getVersion().onOrAfter(Version.V_0_90_6)) {\n+ out.writeVLong(nowInMillis);\n+ }\n }\n }", "filename": "src/main/java/org/elasticsearch/action/count/ShardCountRequest.java", "status": "modified" }, { "diff": "@@ -20,6 +20,7 @@\n package org.elasticsearch.action.count;\n \n import org.elasticsearch.ElasticSearchException;\n+import org.elasticsearch.action.ActionListener;\n import org.elasticsearch.action.ShardOperationFailedException;\n import org.elasticsearch.action.support.DefaultShardOperationFailedException;\n import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;\n@@ -75,6 +76,12 @@ public TransportCountAction(Settings settings, ThreadPool threadPool, ClusterSer\n this.cacheRecycler = cacheRecycler;\n }\n \n+ @Override\n+ protected void doExecute(CountRequest request, ActionListener<CountResponse> listener) {\n+ request.nowInMillis = System.currentTimeMillis();\n+ super.doExecute(request, listener);\n+ }\n+\n @Override\n protected String executor() {\n return ThreadPool.Names.SEARCH;\n@@ -153,7 +160,9 @@ protected ShardCountResponse shardOperation(ShardCountRequest request) throws El\n \n SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), request.index(), request.shardId());\n SearchContext context = new DefaultSearchContext(0,\n- new ShardSearchRequest().types(request.types()).filteringAliases(request.filteringAliases()),\n+ new ShardSearchRequest().types(request.types())\n+ .filteringAliases(request.filteringAliases())\n+ .nowInMillis(request.nowInMillis()),\n shardTarget, indexShard.acquireSearcher(), indexService, indexShard,\n scriptService, cacheRecycler);\n SearchContext.setCurrent(context);", "filename": "src/main/java/org/elasticsearch/action/count/TransportCountAction.java", "status": "modified" }, { "diff": "@@ -19,11 +19,11 @@\n \n package org.elasticsearch.action.explain;\n \n+import org.elasticsearch.Version;\n import org.elasticsearch.action.ActionRequestValidationException;\n import org.elasticsearch.action.ValidateActions;\n import org.elasticsearch.action.support.single.shard.SingleShardOperationRequest;\n import org.elasticsearch.client.Requests;\n-import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.io.stream.StreamInput;\n@@ -51,6 +51,8 @@ public class ExplainRequest extends SingleShardOperationRequest<ExplainRequest>\n \n private String[] filteringAlias = Strings.EMPTY_ARRAY;\n \n+ long nowInMillis;\n+\n ExplainRequest() {\n }\n \n@@ -196,6 +198,12 @@ public void readFrom(StreamInput in) throws IOException {\n }\n \n fetchSourceContext = FetchSourceContext.optionalReadFromStream(in);\n+\n+ if (in.getVersion().onOrAfter(Version.V_0_90_6)) {\n+ nowInMillis = in.readVLong();\n+ } else {\n+ nowInMillis = System.currentTimeMillis();\n+ }\n }\n \n @Override\n@@ -215,5 +223,9 @@ public void writeTo(StreamOutput out) throws IOException {\n }\n \n FetchSourceContext.optionalWriteToStream(fetchSourceContext, out);\n+\n+ if (out.getVersion().onOrAfter(Version.V_0_90_6)) {\n+ out.writeVLong(nowInMillis);\n+ }\n }\n }", "filename": "src/main/java/org/elasticsearch/action/explain/ExplainRequest.java", "status": "modified" }, { "diff": "@@ -22,6 +22,7 @@\n import org.apache.lucene.index.Term;\n import org.apache.lucene.search.Explanation;\n import org.elasticsearch.ElasticSearchException;\n+import org.elasticsearch.action.ActionListener;\n import org.elasticsearch.action.support.single.shard.TransportShardSingleOperationAction;\n import org.elasticsearch.cache.recycler.CacheRecycler;\n import org.elasticsearch.cluster.ClusterService;\n@@ -75,6 +76,12 @@ public TransportExplainAction(Settings settings, ThreadPool threadPool, ClusterS\n this.cacheRecycler = cacheRecycler;\n }\n \n+ @Override\n+ protected void doExecute(ExplainRequest request, ActionListener<ExplainResponse> listener) {\n+ request.nowInMillis = System.currentTimeMillis();\n+ super.doExecute(request, listener);\n+ }\n+\n protected String transportAction() {\n return ExplainAction.NAME;\n }\n@@ -102,7 +109,8 @@ protected ExplainResponse shardOperation(ExplainRequest request, int shardId) th\n SearchContext context = new DefaultSearchContext(\n 0,\n new ShardSearchRequest().types(new String[]{request.type()})\n- .filteringAliases(request.filteringAlias()),\n+ .filteringAliases(request.filteringAlias())\n+ .nowInMillis(request.nowInMillis),\n null, result.searcher(), indexService, indexShard,\n scriptService, cacheRecycler\n );", "filename": "src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java", "status": "modified" }, { "diff": "@@ -19,7 +19,7 @@\n \n package org.elasticsearch.count.query;\n \n-import org.apache.lucene.util.LuceneTestCase;\n+import org.elasticsearch.AbstractSharedClusterTest;\n import org.elasticsearch.ElasticSearchException;\n import org.elasticsearch.action.count.CountResponse;\n import org.elasticsearch.action.search.SearchPhaseExecutionException;\n@@ -29,7 +29,6 @@\n import org.elasticsearch.index.query.*;\n import org.elasticsearch.index.query.CommonTermsQueryBuilder.Operator;\n import org.elasticsearch.index.query.MatchQueryBuilder.Type;\n-import org.elasticsearch.AbstractSharedClusterTest;\n import org.joda.time.DateTime;\n import org.joda.time.DateTimeZone;\n import org.joda.time.format.ISODateTimeFormat;\n@@ -41,6 +40,9 @@\n import static org.elasticsearch.index.query.FilterBuilders.*;\n import static org.elasticsearch.index.query.QueryBuilders.*;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;\n+import static org.hamcrest.Matchers.allOf;\n+import static org.hamcrest.Matchers.containsString;\n+import static org.hamcrest.Matchers.equalTo;\n \n /**\n *\n@@ -197,7 +199,7 @@ public void testLowercaseExpandedTerms() {\n assertHitCount(countResponse, 0l);\n }\n \n- @Test @LuceneTestCase.AwaitsFix(bugUrl = \"https://github.com/elasticsearch/elasticsearch/issues/3625\")\n+ @Test\n public void testDateRangeInQueryString() {\n client().admin().indices().prepareCreate(\"test\").setSettings(ImmutableSettings.settingsBuilder().put(\"index.number_of_shards\", 1)).execute().actionGet();\n \n@@ -206,20 +208,20 @@ public void testDateRangeInQueryString() {\n \n client().prepareIndex(\"test\", \"type\", \"1\").setSource(\"past\", aMonthAgo, \"future\", aMonthFromNow).execute().actionGet();\n \n- client().admin().indices().prepareRefresh().execute().actionGet();\n+ refresh();\n \n CountResponse countResponse = client().prepareCount().setQuery(queryString(\"past:[now-2M/d TO now/d]\")).execute().actionGet();\n assertHitCount(countResponse, 1l);\n \n countResponse = client().prepareCount().setQuery(queryString(\"future:[now/d TO now+2M/d]\").lowercaseExpandedTerms(false)).execute().actionGet();\n assertHitCount(countResponse, 1l);\n \n- try {\n- client().prepareCount().setQuery(queryString(\"future:[now/D TO now+2M/d]\").lowercaseExpandedTerms(false)).execute().actionGet();\n- fail(\"D is an unsupported unit in date math\");\n- } catch (Exception e) {\n- // expected\n- }\n+ countResponse = client().prepareCount().setQuery(queryString(\"future:[now/D TO now+2M/d]\").lowercaseExpandedTerms(false)).execute().actionGet();\n+ //D is an unsupported unit in date math\n+ assertThat(countResponse.getSuccessfulShards(), equalTo(0));\n+ assertThat(countResponse.getFailedShards(), equalTo(1));\n+ assertThat(countResponse.getShardFailures().length, equalTo(1));\n+ assertThat(countResponse.getShardFailures()[0].reason(), allOf(containsString(\"Failed to parse\"), containsString(\"unit [D] not supported for date math\")));\n }\n \n @Test", "filename": "src/test/java/org/elasticsearch/count/query/SimpleQueryTests.java", "status": "modified" }, { "diff": "@@ -19,18 +19,21 @@\n \n package org.elasticsearch.explain;\n \n+import org.elasticsearch.AbstractSharedClusterTest;\n import org.elasticsearch.action.explain.ExplainResponse;\n import org.elasticsearch.common.settings.ImmutableSettings;\n import org.elasticsearch.index.query.FilterBuilders;\n import org.elasticsearch.index.query.QueryBuilders;\n import org.elasticsearch.indices.IndexMissingException;\n-import org.elasticsearch.AbstractSharedClusterTest;\n+import org.joda.time.DateTime;\n+import org.joda.time.DateTimeZone;\n+import org.joda.time.format.ISODateTimeFormat;\n import org.junit.Test;\n \n import java.util.Map;\n \n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n-import static org.hamcrest.MatcherAssert.assertThat;\n+import static org.elasticsearch.index.query.QueryBuilders.queryString;\n import static org.hamcrest.Matchers.equalTo;\n \n /**\n@@ -240,4 +243,19 @@ public void testExplainWithAlias() throws Exception {\n assertFalse(response.isMatch());\n }\n \n+ @Test\n+ public void explainDateRangeInQueryString() {\n+ client().admin().indices().prepareCreate(\"test\").setSettings(ImmutableSettings.settingsBuilder().put(\"index.number_of_shards\", 1)).get();\n+\n+ String aMonthAgo = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).minusMonths(1));\n+ String aMonthFromNow = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).plusMonths(1));\n+\n+ client().prepareIndex(\"test\", \"type\", \"1\").setSource(\"past\", aMonthAgo, \"future\", aMonthFromNow).get();\n+\n+ refresh();\n+\n+ ExplainResponse explainResponse = client().prepareExplain(\"test\", \"type\", \"1\").setQuery(queryString(\"past:[now-2M/d TO now/d]\")).get();\n+ assertThat(explainResponse.isExists(), equalTo(true));\n+ assertThat(explainResponse.isMatch(), equalTo(true));\n+ }\n }", "filename": "src/test/java/org/elasticsearch/explain/ExplainActionTests.java", "status": "modified" }, { "diff": "@@ -20,6 +20,7 @@\n package org.elasticsearch.validate;\n \n import com.google.common.base.Charsets;\n+import org.elasticsearch.AbstractSharedClusterTest;\n import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Priority;\n@@ -30,13 +31,16 @@\n import org.elasticsearch.index.query.FilterBuilders;\n import org.elasticsearch.index.query.QueryBuilder;\n import org.elasticsearch.index.query.QueryBuilders;\n-import org.elasticsearch.AbstractSharedClusterTest;\n import org.hamcrest.Matcher;\n+import org.joda.time.DateTime;\n+import org.joda.time.DateTimeZone;\n+import org.joda.time.format.ISODateTimeFormat;\n import org.junit.Test;\n \n import java.io.IOException;\n \n-import static org.hamcrest.MatcherAssert.assertThat;\n+import static org.elasticsearch.index.query.QueryBuilders.queryString;\n+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;\n import static org.hamcrest.Matchers.*;\n \n /**\n@@ -228,6 +232,29 @@ public void explainValidateQueryTwoNodes() throws IOException {\n }\n }\n \n+ @Test //https://github.com/elasticsearch/elasticsearch/issues/3629\n+ public void explainDateRangeInQueryString() {\n+ client().admin().indices().prepareCreate(\"test\").setSettings(ImmutableSettings.settingsBuilder().put(\"index.number_of_shards\", 1)).get();\n+\n+ String aMonthAgo = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).minusMonths(1));\n+ String aMonthFromNow = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).plusMonths(1));\n+\n+ client().prepareIndex(\"test\", \"type\", \"1\").setSource(\"past\", aMonthAgo, \"future\", aMonthFromNow).get();\n+\n+ refresh();\n+\n+ ValidateQueryResponse response = client().admin().indices().prepareValidateQuery()\n+ .setQuery(queryString(\"past:[now-2M/d TO now/d]\")).setExplain(true).get();\n+\n+ assertNoFailures(response);\n+ assertThat(response.getQueryExplanation().size(), equalTo(1));\n+ assertThat(response.getQueryExplanation().get(0).getError(), nullValue());\n+ DateTime twoMonthsAgo = new DateTime(DateTimeZone.UTC).minusMonths(2).withTimeAtStartOfDay();\n+ DateTime now = new DateTime(DateTimeZone.UTC).plusDays(1).withTimeAtStartOfDay();\n+ assertThat(response.getQueryExplanation().get(0).getExplanation(),\n+ equalTo(\"past:[\" + twoMonthsAgo.getMillis() + \" TO \" + now.getMillis() + \"]\"));\n+ assertThat(response.isValid(), equalTo(true));\n+ }\n \n private void assertExplanation(QueryBuilder queryBuilder, Matcher<String> matcher) {\n ValidateQueryResponse response = client().admin().indices().prepareValidateQuery(\"test\")\n@@ -240,5 +267,4 @@ private void assertExplanation(QueryBuilder queryBuilder, Matcher<String> matche\n assertThat(response.getQueryExplanation().get(0).getExplanation(), matcher);\n assertThat(response.isValid(), equalTo(true));\n }\n-\n }", "filename": "src/test/java/org/elasticsearch/validate/SimpleValidateQueryTests.java", "status": "modified" } ] }
{ "body": "Same problem as #3625, but with the explain api. \nWhen using the explain API with date range queries that contain `now` (e.g. `[* TO now-1d]`), the parsed query contains a negative number that leads to a no match being returned. The following curl reproduction shows the different result obtained using the search API and the explain API.\n\n```\ncurl -XPUT localhost:9200/index1/type1/1 -d '{\n \"date\": \"2013-09-03T15:07:47.000Z\"\n}\n'\n\ncurl -XPOST localhost:9200/index1/_refresh\n\n#one hit gets returned (id 1) using search api\ncurl -XGET localhost:9200/index1/_search -d '\n{ \n \"query\" : {\n \"query_string\": {\n \"query\": \"date:[* TO now-1d]\"\n } \n }\n}\n'\n#explain api says the document doesn't match\ncurl -XGET localhost:9200/index1/type1/1/_explain -d '\n{ \n \"query_string\": {\n \"query\": \"date:[* TO now-1d]\"\n }\n}\n'\n```\n", "comments": [], "number": 3626, "title": " Explain api parses wrong date range query when using \"now\"" }
{ "body": "Set nowInMillis to search context created by the count api, validate query api and explain api so that \"NOW\" can be used within queries\n\nAdded nowInMillis to ShardCountRequest, ShardValidateRequest and ExplainRequest in a backwards compatible manner\n\nFixes #3625, #3626 & #3629 \n", "number": 3686, "review_comments": [], "title": "Set nowInMillis to search context created by the count, validate query and explain api" }
{ "commits": [ { "message": "Set nowInMillis to search context created by the count api so that \"NOW\" can be used within queries\n\nAdded nowInMillis to ShardCountRequest in a backwards compatible manner\n\nFixes #3625" }, { "message": "Set nowInMillis to search context created by the validate query api so that \"NOW\" can be used within queries\n\nAdded nowInMillis to ShardValidateQueryRequest in a backwards compatible manner\n\nFixes #3629" }, { "message": "Set nowInMillis to search context created by the explain api so that \"NOW\" can be used within queries\n\nAdded nowInMillis to ExplainRequest in a backwards compatible manner\n\nFixes #3626" } ], "files": [ { "diff": "@@ -19,6 +19,7 @@\n \n package org.elasticsearch.action.admin.indices.validate.query;\n \n+import org.elasticsearch.Version;\n import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest;\n import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.Strings;\n@@ -36,6 +37,7 @@ class ShardValidateQueryRequest extends BroadcastShardOperationRequest {\n private BytesReference querySource;\n private String[] types = Strings.EMPTY_ARRAY;\n private boolean explain;\n+ private long nowInMillis;\n \n @Nullable\n private String[] filteringAliases;\n@@ -50,6 +52,7 @@ public ShardValidateQueryRequest(String index, int shardId, @Nullable String[] f\n this.types = request.types();\n this.explain = request.explain();\n this.filteringAliases = filteringAliases;\n+ this.nowInMillis = request.nowInMillis;\n }\n \n public BytesReference querySource() {\n@@ -68,6 +71,10 @@ public String[] filteringAliases() {\n return filteringAliases;\n }\n \n+ public long nowInMillis() {\n+ return this.nowInMillis;\n+ }\n+\n @Override\n public void readFrom(StreamInput in) throws IOException {\n super.readFrom(in);\n@@ -89,6 +96,12 @@ public void readFrom(StreamInput in) throws IOException {\n }\n \n explain = in.readBoolean();\n+\n+ if (in.getVersion().onOrAfter(Version.V_0_90_6)) {\n+ nowInMillis = in.readVLong();\n+ } else {\n+ nowInMillis = System.currentTimeMillis();\n+ }\n }\n \n @Override\n@@ -110,5 +123,9 @@ public void writeTo(StreamOutput out) throws IOException {\n }\n \n out.writeBoolean(explain);\n+\n+ if (out.getVersion().onOrAfter(Version.V_0_90_6)) {\n+ out.writeVLong(nowInMillis);\n+ }\n }\n }", "filename": "src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java", "status": "modified" }, { "diff": "@@ -21,6 +21,7 @@\n \n import jsr166y.ThreadLocalRandom;\n import org.elasticsearch.ElasticSearchException;\n+import org.elasticsearch.action.ActionListener;\n import org.elasticsearch.action.ShardOperationFailedException;\n import org.elasticsearch.action.support.DefaultShardOperationFailedException;\n import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;\n@@ -73,6 +74,12 @@ public TransportValidateQueryAction(Settings settings, ThreadPool threadPool, Cl\n this.cacheRecycler = cacheRecycler;\n }\n \n+ @Override\n+ protected void doExecute(ValidateQueryRequest request, ActionListener<ValidateQueryResponse> listener) {\n+ request.nowInMillis = System.currentTimeMillis();\n+ super.doExecute(request, listener);\n+ }\n+\n @Override\n protected String executor() {\n return ThreadPool.Names.SEARCH;\n@@ -171,7 +178,7 @@ protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest re\n valid = true;\n } else {\n SearchContext.setCurrent(new DefaultSearchContext(0,\n- new ShardSearchRequest().types(request.types()),\n+ new ShardSearchRequest().types(request.types()).nowInMillis(request.nowInMillis()),\n null, indexShard.acquireSearcher(), indexService, indexShard,\n scriptService, cacheRecycler));\n try {", "filename": "src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java", "status": "modified" }, { "diff": "@@ -56,6 +56,8 @@ public class ValidateQueryRequest extends BroadcastOperationRequest<ValidateQuer\n \n private String[] types = Strings.EMPTY_ARRAY;\n \n+ long nowInMillis;\n+\n ValidateQueryRequest() {\n }\n \n@@ -128,7 +130,6 @@ public ValidateQueryRequest query(XContentBuilder builder) {\n @Required\n public ValidateQueryRequest query(String querySource) {\n this.querySource = new BytesArray(querySource);\n- ;\n this.querySourceUnsafe = false;\n return this;\n }", "filename": "src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java", "status": "modified" }, { "diff": "@@ -70,6 +70,8 @@ public class CountRequest extends BroadcastOperationRequest<CountRequest> {\n \n private String[] types = Strings.EMPTY_ARRAY;\n \n+ long nowInMillis;\n+\n CountRequest() {\n }\n ", "filename": "src/main/java/org/elasticsearch/action/count/CountRequest.java", "status": "modified" }, { "diff": "@@ -19,6 +19,7 @@\n \n package org.elasticsearch.action.count;\n \n+import org.elasticsearch.Version;\n import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest;\n import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.Strings;\n@@ -39,6 +40,8 @@ class ShardCountRequest extends BroadcastShardOperationRequest {\n \n private String[] types = Strings.EMPTY_ARRAY;\n \n+ private long nowInMillis;\n+\n @Nullable\n private String[] filteringAliases;\n \n@@ -52,6 +55,7 @@ public ShardCountRequest(String index, int shardId, @Nullable String[] filtering\n this.querySource = request.querySource();\n this.types = request.types();\n this.filteringAliases = filteringAliases;\n+ this.nowInMillis = request.nowInMillis;\n }\n \n public float minScore() {\n@@ -70,6 +74,10 @@ public String[] filteringAliases() {\n return filteringAliases;\n }\n \n+ public long nowInMillis() {\n+ return this.nowInMillis;\n+ }\n+\n @Override\n public void readFrom(StreamInput in) throws IOException {\n super.readFrom(in);\n@@ -91,6 +99,11 @@ public void readFrom(StreamInput in) throws IOException {\n filteringAliases[i] = in.readString();\n }\n }\n+ if (in.getVersion().onOrAfter(Version.V_0_90_6)) {\n+ nowInMillis = in.readVLong();\n+ } else {\n+ nowInMillis = System.currentTimeMillis();\n+ }\n }\n \n @Override\n@@ -112,5 +125,8 @@ public void writeTo(StreamOutput out) throws IOException {\n } else {\n out.writeVInt(0);\n }\n+ if (out.getVersion().onOrAfter(Version.V_0_90_6)) {\n+ out.writeVLong(nowInMillis);\n+ }\n }\n }", "filename": "src/main/java/org/elasticsearch/action/count/ShardCountRequest.java", "status": "modified" }, { "diff": "@@ -20,6 +20,7 @@\n package org.elasticsearch.action.count;\n \n import org.elasticsearch.ElasticSearchException;\n+import org.elasticsearch.action.ActionListener;\n import org.elasticsearch.action.ShardOperationFailedException;\n import org.elasticsearch.action.support.DefaultShardOperationFailedException;\n import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;\n@@ -75,6 +76,12 @@ public TransportCountAction(Settings settings, ThreadPool threadPool, ClusterSer\n this.cacheRecycler = cacheRecycler;\n }\n \n+ @Override\n+ protected void doExecute(CountRequest request, ActionListener<CountResponse> listener) {\n+ request.nowInMillis = System.currentTimeMillis();\n+ super.doExecute(request, listener);\n+ }\n+\n @Override\n protected String executor() {\n return ThreadPool.Names.SEARCH;\n@@ -153,7 +160,9 @@ protected ShardCountResponse shardOperation(ShardCountRequest request) throws El\n \n SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), request.index(), request.shardId());\n SearchContext context = new DefaultSearchContext(0,\n- new ShardSearchRequest().types(request.types()).filteringAliases(request.filteringAliases()),\n+ new ShardSearchRequest().types(request.types())\n+ .filteringAliases(request.filteringAliases())\n+ .nowInMillis(request.nowInMillis()),\n shardTarget, indexShard.acquireSearcher(), indexService, indexShard,\n scriptService, cacheRecycler);\n SearchContext.setCurrent(context);", "filename": "src/main/java/org/elasticsearch/action/count/TransportCountAction.java", "status": "modified" }, { "diff": "@@ -19,11 +19,11 @@\n \n package org.elasticsearch.action.explain;\n \n+import org.elasticsearch.Version;\n import org.elasticsearch.action.ActionRequestValidationException;\n import org.elasticsearch.action.ValidateActions;\n import org.elasticsearch.action.support.single.shard.SingleShardOperationRequest;\n import org.elasticsearch.client.Requests;\n-import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.io.stream.StreamInput;\n@@ -51,6 +51,8 @@ public class ExplainRequest extends SingleShardOperationRequest<ExplainRequest>\n \n private String[] filteringAlias = Strings.EMPTY_ARRAY;\n \n+ long nowInMillis;\n+\n ExplainRequest() {\n }\n \n@@ -196,6 +198,12 @@ public void readFrom(StreamInput in) throws IOException {\n }\n \n fetchSourceContext = FetchSourceContext.optionalReadFromStream(in);\n+\n+ if (in.getVersion().onOrAfter(Version.V_0_90_6)) {\n+ nowInMillis = in.readVLong();\n+ } else {\n+ nowInMillis = System.currentTimeMillis();\n+ }\n }\n \n @Override\n@@ -215,5 +223,9 @@ public void writeTo(StreamOutput out) throws IOException {\n }\n \n FetchSourceContext.optionalWriteToStream(fetchSourceContext, out);\n+\n+ if (out.getVersion().onOrAfter(Version.V_0_90_6)) {\n+ out.writeVLong(nowInMillis);\n+ }\n }\n }", "filename": "src/main/java/org/elasticsearch/action/explain/ExplainRequest.java", "status": "modified" }, { "diff": "@@ -22,6 +22,7 @@\n import org.apache.lucene.index.Term;\n import org.apache.lucene.search.Explanation;\n import org.elasticsearch.ElasticSearchException;\n+import org.elasticsearch.action.ActionListener;\n import org.elasticsearch.action.support.single.shard.TransportShardSingleOperationAction;\n import org.elasticsearch.cache.recycler.CacheRecycler;\n import org.elasticsearch.cluster.ClusterService;\n@@ -75,6 +76,12 @@ public TransportExplainAction(Settings settings, ThreadPool threadPool, ClusterS\n this.cacheRecycler = cacheRecycler;\n }\n \n+ @Override\n+ protected void doExecute(ExplainRequest request, ActionListener<ExplainResponse> listener) {\n+ request.nowInMillis = System.currentTimeMillis();\n+ super.doExecute(request, listener);\n+ }\n+\n protected String transportAction() {\n return ExplainAction.NAME;\n }\n@@ -102,7 +109,8 @@ protected ExplainResponse shardOperation(ExplainRequest request, int shardId) th\n SearchContext context = new DefaultSearchContext(\n 0,\n new ShardSearchRequest().types(new String[]{request.type()})\n- .filteringAliases(request.filteringAlias()),\n+ .filteringAliases(request.filteringAlias())\n+ .nowInMillis(request.nowInMillis),\n null, result.searcher(), indexService, indexShard,\n scriptService, cacheRecycler\n );", "filename": "src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java", "status": "modified" }, { "diff": "@@ -19,7 +19,7 @@\n \n package org.elasticsearch.count.query;\n \n-import org.apache.lucene.util.LuceneTestCase;\n+import org.elasticsearch.AbstractSharedClusterTest;\n import org.elasticsearch.ElasticSearchException;\n import org.elasticsearch.action.count.CountResponse;\n import org.elasticsearch.action.search.SearchPhaseExecutionException;\n@@ -29,7 +29,6 @@\n import org.elasticsearch.index.query.*;\n import org.elasticsearch.index.query.CommonTermsQueryBuilder.Operator;\n import org.elasticsearch.index.query.MatchQueryBuilder.Type;\n-import org.elasticsearch.AbstractSharedClusterTest;\n import org.joda.time.DateTime;\n import org.joda.time.DateTimeZone;\n import org.joda.time.format.ISODateTimeFormat;\n@@ -41,6 +40,9 @@\n import static org.elasticsearch.index.query.FilterBuilders.*;\n import static org.elasticsearch.index.query.QueryBuilders.*;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;\n+import static org.hamcrest.Matchers.allOf;\n+import static org.hamcrest.Matchers.containsString;\n+import static org.hamcrest.Matchers.equalTo;\n \n /**\n *\n@@ -197,7 +199,7 @@ public void testLowercaseExpandedTerms() {\n assertHitCount(countResponse, 0l);\n }\n \n- @Test @LuceneTestCase.AwaitsFix(bugUrl = \"https://github.com/elasticsearch/elasticsearch/issues/3625\")\n+ @Test\n public void testDateRangeInQueryString() {\n client().admin().indices().prepareCreate(\"test\").setSettings(ImmutableSettings.settingsBuilder().put(\"index.number_of_shards\", 1)).execute().actionGet();\n \n@@ -206,20 +208,20 @@ public void testDateRangeInQueryString() {\n \n client().prepareIndex(\"test\", \"type\", \"1\").setSource(\"past\", aMonthAgo, \"future\", aMonthFromNow).execute().actionGet();\n \n- client().admin().indices().prepareRefresh().execute().actionGet();\n+ refresh();\n \n CountResponse countResponse = client().prepareCount().setQuery(queryString(\"past:[now-2M/d TO now/d]\")).execute().actionGet();\n assertHitCount(countResponse, 1l);\n \n countResponse = client().prepareCount().setQuery(queryString(\"future:[now/d TO now+2M/d]\").lowercaseExpandedTerms(false)).execute().actionGet();\n assertHitCount(countResponse, 1l);\n \n- try {\n- client().prepareCount().setQuery(queryString(\"future:[now/D TO now+2M/d]\").lowercaseExpandedTerms(false)).execute().actionGet();\n- fail(\"D is an unsupported unit in date math\");\n- } catch (Exception e) {\n- // expected\n- }\n+ countResponse = client().prepareCount().setQuery(queryString(\"future:[now/D TO now+2M/d]\").lowercaseExpandedTerms(false)).execute().actionGet();\n+ //D is an unsupported unit in date math\n+ assertThat(countResponse.getSuccessfulShards(), equalTo(0));\n+ assertThat(countResponse.getFailedShards(), equalTo(1));\n+ assertThat(countResponse.getShardFailures().length, equalTo(1));\n+ assertThat(countResponse.getShardFailures()[0].reason(), allOf(containsString(\"Failed to parse\"), containsString(\"unit [D] not supported for date math\")));\n }\n \n @Test", "filename": "src/test/java/org/elasticsearch/count/query/SimpleQueryTests.java", "status": "modified" }, { "diff": "@@ -19,18 +19,21 @@\n \n package org.elasticsearch.explain;\n \n+import org.elasticsearch.AbstractSharedClusterTest;\n import org.elasticsearch.action.explain.ExplainResponse;\n import org.elasticsearch.common.settings.ImmutableSettings;\n import org.elasticsearch.index.query.FilterBuilders;\n import org.elasticsearch.index.query.QueryBuilders;\n import org.elasticsearch.indices.IndexMissingException;\n-import org.elasticsearch.AbstractSharedClusterTest;\n+import org.joda.time.DateTime;\n+import org.joda.time.DateTimeZone;\n+import org.joda.time.format.ISODateTimeFormat;\n import org.junit.Test;\n \n import java.util.Map;\n \n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n-import static org.hamcrest.MatcherAssert.assertThat;\n+import static org.elasticsearch.index.query.QueryBuilders.queryString;\n import static org.hamcrest.Matchers.equalTo;\n \n /**\n@@ -240,4 +243,19 @@ public void testExplainWithAlias() throws Exception {\n assertFalse(response.isMatch());\n }\n \n+ @Test\n+ public void explainDateRangeInQueryString() {\n+ client().admin().indices().prepareCreate(\"test\").setSettings(ImmutableSettings.settingsBuilder().put(\"index.number_of_shards\", 1)).get();\n+\n+ String aMonthAgo = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).minusMonths(1));\n+ String aMonthFromNow = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).plusMonths(1));\n+\n+ client().prepareIndex(\"test\", \"type\", \"1\").setSource(\"past\", aMonthAgo, \"future\", aMonthFromNow).get();\n+\n+ refresh();\n+\n+ ExplainResponse explainResponse = client().prepareExplain(\"test\", \"type\", \"1\").setQuery(queryString(\"past:[now-2M/d TO now/d]\")).get();\n+ assertThat(explainResponse.isExists(), equalTo(true));\n+ assertThat(explainResponse.isMatch(), equalTo(true));\n+ }\n }", "filename": "src/test/java/org/elasticsearch/explain/ExplainActionTests.java", "status": "modified" }, { "diff": "@@ -20,6 +20,7 @@\n package org.elasticsearch.validate;\n \n import com.google.common.base.Charsets;\n+import org.elasticsearch.AbstractSharedClusterTest;\n import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse;\n import org.elasticsearch.client.Client;\n import org.elasticsearch.common.Priority;\n@@ -30,13 +31,16 @@\n import org.elasticsearch.index.query.FilterBuilders;\n import org.elasticsearch.index.query.QueryBuilder;\n import org.elasticsearch.index.query.QueryBuilders;\n-import org.elasticsearch.AbstractSharedClusterTest;\n import org.hamcrest.Matcher;\n+import org.joda.time.DateTime;\n+import org.joda.time.DateTimeZone;\n+import org.joda.time.format.ISODateTimeFormat;\n import org.junit.Test;\n \n import java.io.IOException;\n \n-import static org.hamcrest.MatcherAssert.assertThat;\n+import static org.elasticsearch.index.query.QueryBuilders.queryString;\n+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;\n import static org.hamcrest.Matchers.*;\n \n /**\n@@ -228,6 +232,29 @@ public void explainValidateQueryTwoNodes() throws IOException {\n }\n }\n \n+ @Test //https://github.com/elasticsearch/elasticsearch/issues/3629\n+ public void explainDateRangeInQueryString() {\n+ client().admin().indices().prepareCreate(\"test\").setSettings(ImmutableSettings.settingsBuilder().put(\"index.number_of_shards\", 1)).get();\n+\n+ String aMonthAgo = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).minusMonths(1));\n+ String aMonthFromNow = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).plusMonths(1));\n+\n+ client().prepareIndex(\"test\", \"type\", \"1\").setSource(\"past\", aMonthAgo, \"future\", aMonthFromNow).get();\n+\n+ refresh();\n+\n+ ValidateQueryResponse response = client().admin().indices().prepareValidateQuery()\n+ .setQuery(queryString(\"past:[now-2M/d TO now/d]\")).setExplain(true).get();\n+\n+ assertNoFailures(response);\n+ assertThat(response.getQueryExplanation().size(), equalTo(1));\n+ assertThat(response.getQueryExplanation().get(0).getError(), nullValue());\n+ DateTime twoMonthsAgo = new DateTime(DateTimeZone.UTC).minusMonths(2).withTimeAtStartOfDay();\n+ DateTime now = new DateTime(DateTimeZone.UTC).plusDays(1).withTimeAtStartOfDay();\n+ assertThat(response.getQueryExplanation().get(0).getExplanation(),\n+ equalTo(\"past:[\" + twoMonthsAgo.getMillis() + \" TO \" + now.getMillis() + \"]\"));\n+ assertThat(response.isValid(), equalTo(true));\n+ }\n \n private void assertExplanation(QueryBuilder queryBuilder, Matcher<String> matcher) {\n ValidateQueryResponse response = client().admin().indices().prepareValidateQuery(\"test\")\n@@ -240,5 +267,4 @@ private void assertExplanation(QueryBuilder queryBuilder, Matcher<String> matche\n assertThat(response.getQueryExplanation().get(0).getExplanation(), matcher);\n assertThat(response.isValid(), equalTo(true));\n }\n-\n }", "filename": "src/test/java/org/elasticsearch/validate/SimpleValidateQueryTests.java", "status": "modified" } ] }
{ "body": "## Expected Behavior\n\nNormally, if you try to index a document without an ID in the URI (e.g. a POST) but with an _id field in the document (and no explicit _id path mapping), it throws an error because the autogenerated ID does not match the provided _id field:\n\n``` bash\ncurl -XDELETE localhost:9200/testindex\ncurl -XPUT localhost:9200/testindex\ncurl -XPOST localhost:9200/testindex/testtype?pretty -d '{\"_id\":\"polyfractal\",\"key\":\"value\"}}}'\n```\n\n``` json\n{\n \"error\" : \"MapperParsingException[failed to parse [_id]]; nested: MapperParsingException[Provided id [O-kIgieVTRG9DpxHML7LkA] does not match the content one [polyfractal]]; \",\n \"status\" : 400\n}\n```\n## Broken Behavior\n\nHowever, if the _id field happens to be an object, Elasticsearch happily indexes the document:\n\n``` bash\ncurl -XDELETE localhost:9200/testindex\ncurl -XPUT localhost:9200/testindex\ncurl -XPOST \"localhost:9200/testindex/testtype\" -d '{\"key\":\"value\"}'\ncurl -XPOST \"localhost:9200/testindex/testtype\" -d '{\"_id\":{\"name\":\"polyfractal\"},\"key\":\"value\"}}}'\n```\n\n``` json\n{\"ok\":true,\"_index\":\"testindex\",\"_type\":\"testtype\",\"_id\":\"b2xEPk5tTfC-RLsCb1ZapA\",\"_version\":1}\n{\"ok\":true,\"_index\":\"testindex\",\"_type\":\"testtype\",\"_id\":\"BsTbRqaeTrKLIe0JoeHsWw\",\"_version\":1}\n```\n\nYou can GET it:\n\n``` bash\ncurl -XGET localhost:9200/testindex/testtype/BsTbRqaeTrKLIe0JoeHsWw?pretty\n```\n\n``` json\n{\n \"_index\" : \"testindex\",\n \"_type\" : \"testtype\",\n \"_id\" : \"BsTbRqaeTrKLIe0JoeHsWw\",\n \"_version\" : 1,\n \"exists\" : true, \"_source\" : {\"_id\":{\"name\":\"polyfractal\"},\"key\":\"value\"}}}\n}\n```\n\nIt shows up with a match_all query:\n\n``` bash\ncurl -XGET localhost:9200/testindex/testtype/_search?pretty -d '{\"query\":{\"match_all\":{}}}'\n```\n\n``` json\n{\n \"took\" : 1,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 5,\n \"successful\" : 5,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : 2,\n \"max_score\" : 1.0,\n \"hits\" : [ {\n \"_index\" : \"testindex\",\n \"_type\" : \"testtype\",\n \"_id\" : \"BsTbRqaeTrKLIe0JoeHsWw\",\n \"_score\" : 1.0, \"_source\" : {\"_id\":{\"name\":\"polyfractal\"},\"key\":\"value\"}}}\n }, {\n \"_index\" : \"testindex\",\n \"_type\" : \"testtype\",\n \"_id\" : \"b2xEPk5tTfC-RLsCb1ZapA\",\n \"_score\" : 1.0, \"_source\" : {\"key\":\"value\"}\n } ]\n }\n}\n```\n\nBut doesn't show up when you search for exact values (or Match or any other search):\n\n``` bash\ncurl -XGET localhost:9200/testindex/testtype/_search?pretty -d '{\"query\":{\"term\":{\"key\":\"value\"}}}'\n```\n\n``` json\n{\n \"took\" : 1,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 5,\n \"successful\" : 5,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : 1,\n \"max_score\" : 0.30685282,\n \"hits\" : [ {\n \"_index\" : \"testindex\",\n \"_type\" : \"testtype\",\n \"_id\" : \"b2xEPk5tTfC-RLsCb1ZapA\",\n \"_score\" : 0.30685282, \"_source\" : {\"key\":\"value\"}\n } ]\n }\n}\n```\n\nIf you ask ES why it doesn't show up, it says there are no matching terms:\n\n``` bash\ncurl -XGET localhost:9200/testindex/testtype/BsTbRqaeTrKLIe0JoeHsWw/_explain?pretty -d '{\"query\":{\"term\":{\"key\":\"value\"}}}'\n```\n\n``` json\n{\n \"ok\" : true,\n \"_index\" : \"testindex\",\n \"_type\" : \"testtype\",\n \"_id\" : \"BsTbRqaeTrKLIe0JoeHsWw\",\n \"matched\" : false,\n \"explanation\" : {\n \"value\" : 0.0,\n \"description\" : \"no matching term\"\n }\n}\n```\n\nAnd finally, as a fun twist, you can set an explicit mapping to look inside the _id object. This works with regard to the ID (it extracts the appropriate ID), is GETable, match_all, etc. Search is still broken.\n\n``` bash\ncurl -XDELETE localhost:9200/testindex\ncurl -XPUT localhost:9200/testindex -d '{\n \"mappings\":{\n \"testtype\":{\n \"_id\" : {\n \"path\" : \"_id.name\"\n },\n \"properties\":{\n \"_id\":{\n \"type\":\"object\",\n \"properties\":{\n \"name\":{\n \"type\":\"string\"\n }\n }\n }\n }\n }\n }\n}'\n\ncurl -XPOST \"localhost:9200/testindex/testtype\" -d '{\"key\":\"value\"}'\ncurl -XPOST \"localhost:9200/testindex/testtype\" -d '{\"_id\":{\"name\":\"polyfractal\"},\"key\":\"value\"}}}'\ncurl -XGET localhost:9200/testindex/testtype/polyfractal?pretty\n```\n\n``` json\n{\n \"_index\" : \"testindex\",\n \"_type\" : \"testtype\",\n \"_id\" : \"polyfractal\",\n \"_version\" : 1,\n \"exists\" : true, \"_source\" : {\"_id\":{\"name\":\"polyfractal\"},\"key\":\"value\"}}}\n}\n```\n\n``` bash\ncurl -XGET localhost:9200/testindex/testtype/_search?pretty -d '{\"query\":{\"match_all\":{}}}'\n```\n\n``` json\n{\n \"took\" : 2,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 5,\n \"successful\" : 5,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : 2,\n \"max_score\" : 1.0,\n \"hits\" : [ {\n \"_index\" : \"testindex\",\n \"_type\" : \"testtype\",\n \"_id\" : \"wsT9vaevTCW5EuKyr7nmUw\",\n \"_score\" : 1.0, \"_source\" : {\"key\":\"value\"}\n }, {\n \"_index\" : \"testindex\",\n \"_type\" : \"testtype\",\n \"_id\" : \"polyfractal\",\n \"_score\" : 1.0, \"_source\" : {\"_id\":{\"name\":\"polyfractal\"},\"key\":\"value\"}}}\n } ]\n }\n}\n```\n\n``` bash\ncurl -XGET localhost:9200/testindex/testtype/_search?pretty -d '{\"query\":{\"term\":{\"key\":\"value\"}}}'\n```\n\n``` json\n{\n \"took\" : 2,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 5,\n \"successful\" : 5,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : 1,\n \"max_score\" : 0.30685282,\n \"hits\" : [ {\n \"_index\" : \"testindex\",\n \"_type\" : \"testtype\",\n \"_id\" : \"wsT9vaevTCW5EuKyr7nmUw\",\n \"_score\" : 0.30685282, \"_source\" : {\"key\":\"value\"}\n } ]\n }\n}\n```\n## Reference\n\nThis was surfaced by [Scott on the mailing list](https://groups.google.com/d/msg/elasticsearch/0at1uZBvN3k/xIatIxwVziwJ).\n", "comments": [ { "body": "It's a little bit more fun than that, even: you actually get _partial_ indexing!\n\n```\ncurl -XDELETE localhost:9200/testindex\ncurl -XPUT localhost:9200/testindex\ncurl -XPOST localhost:9200/testindex/testtype -d '{\"leftkey\":\"value\",\"_id\":{\"name\":\"polyfractal\"},\"rightkey\":\"value\"}}}'\ncurl -XPOST localhost:9200/_flush\n```\n\nNow search on the field _before_ the _id:\n\n```\ncurl -XGET localhost:9200/testindex/testtype/_search?pretty -d '{\"query\":{\"term\":{\"leftkey\":\"value\"}}}'\n{\n \"took\" : 3,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 5,\n \"successful\" : 5,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : 1,\n \"max_score\" : 0.30685282,\n \"hits\" : [ {\n \"_index\" : \"testindex\",\n \"_type\" : \"testtype\",\n \"_id\" : \"PalIN5CpSPKkGbhs4qNqaw\",\n \"_score\" : 0.30685282, \"_source\" : {\"leftkey\":\"value\",\"_id\":{\"name\":\"polyfractal\"},\"rightkey\":\"value\"}}}\n } ]\n }\n}\n```\n\nThere you go.\nBut search on the field _after_ the _id:\n\n```\ncurl -XGET localhost:9200/testindex/testtype/_search?pretty -d '{\"query\":{\"term\":{\"rightkey\":\"value\"}}}'\n{\n \"took\" : 1,\n \"timed_out\" : false,\n \"_shards\" : {\n \"total\" : 5,\n \"successful\" : 5,\n \"failed\" : 0\n },\n \"hits\" : {\n \"total\" : 0,\n \"max_score\" : null,\n \"hits\" : [ ]\n }\n}\n```\n\nAnd you get nothing.\n", "created_at": "2015-02-12T23:41:53Z" }, { "body": "I am affected by this behavior too, monogo output the field like this \n\n```\n{ \"_id\":{\"$oid\":\"54d9e3bf30320c3335017e69\"}, \"@timestamp\":\"...\"}\n```\n\nactually I did not care about the \"_id\" field, but I care about the \"@timestamp\" field which is _silently_ not indexed. Here an example that shows the behavior:\nhttps://gist.github.com/andreaskern/01d1d292f7f146186ee5\n", "created_at": "2015-02-13T07:16:12Z" }, { "body": "In 2.0, the timestamp field would now be indexed correctly, as would `_id.$oid`. Wondering if we should allow users to index `_id` field inside the body at all? /cc @rjernst \n", "created_at": "2015-05-29T17:04:05Z" }, { "body": "The ability to specify _id within a document has already been removed for 2.0+ indexes. \n", "created_at": "2015-05-29T17:36:13Z" }, { "body": "@rjernst you removed the ability to specify the main doc _id in the body, but if the body contains an `_id` field then it creates a field called `_id` in the mapping, which can't be queried. \n\nWhat I'm asking is: should we just ignore the fact that this field is not accessible (as we do in master today) or should we actually throw an exception? I'm leaning towards ignoring, as users don't always have control over the docs they receive.\n", "created_at": "2015-05-29T18:47:59Z" }, { "body": "I would be in favor of throwing an exception. This would only be for 2.0+ indexes, and it is really just field name validation (disallowing fields colliding with meta fields). The mechanism would be the same, a user would not be able to explicitly add a field `_id` in the properties for a document type.\n", "created_at": "2015-05-31T11:42:49Z" }, { "body": "@rjernst it's a tricky one. eg mongo adds `{ \"_id\": { \"$oid\": \"....\" }}`, so actually the `_id.$oid` field IS queryable... should this still throw an exception?\n", "created_at": "2015-05-31T11:44:04Z" }, { "body": "IMO, yes.\n", "created_at": "2015-05-31T11:48:27Z" }, { "body": "With #8871, I don't think that would work, because _id is both a field mapper (the real meta field), and an object mapper.\n", "created_at": "2015-05-31T11:50:17Z" }, { "body": "@rjernst yep, makes sense\n", "created_at": "2015-05-31T12:06:17Z" }, { "body": "@rjernst this still works, even with #8871 merged in\n", "created_at": "2015-06-24T17:41:42Z" }, { "body": "Closed by #14003\n", "created_at": "2015-10-14T13:21:26Z" } ], "number": 3517, "title": "If _id field is an object, no error is thrown but doc is \"unsearchable\"" }
{ "body": "An exception is thrown if the provided id does not match the content id, but only if the content id is a string field. If the content id is a complex object, no exception is thrown and the document is indexed anyway, leading to problems with search later.\n\nThis fix adds an additional check for _id fields that are objects and throws an exception if one is encountered.\n\nFixes #3517\n", "number": 3586, "review_comments": [ { "body": "you could remove the null check by changing the second check to `Defaults.NAME.equals(parser.currentName())`\n", "created_at": "2014-05-28T06:37:59Z" }, { "body": "I think the first check should be:\n\n``` Java\nif (parser.currentName() != null && parser.currentName().equals(Defaults.NAME)) {\n if (parser.currentToken().isValue() == false) {\n throw new MapperParsingException(\"Expected a value as Content id but got \" + parser.currentToken());\n }\n/....\n}\n\n```\n\nthat way we also fail for arrays etc. \n", "created_at": "2014-07-17T13:48:59Z" } ], "title": "Throw exception when content _id is an object" }
{ "commits": [ { "message": "Throw exception when content _id is an object\n\n An exception is thrown if the provided id does not match the\n content id, but only if the content id is a string field. If\n the content id is a complex object, no exception is thrown but\n the document is indexed anyway, leading to problems with search\n later.\n\n This fix adds an additional check for _id fields that are objects\n and throws an exception if one is encountered\n\n Fixes #3517" } ], "files": [ { "diff": "@@ -307,6 +307,8 @@ protected Field parseCreateField(ParseContext context) throws IOException {\n return null;\n }\n return new Field(names.indexName(), context.id(), fieldType);\n+ } else if (parser.currentName() != null && parser.currentName().equals(Defaults.NAME) && parser.currentToken().equals(XContentParser.Token.START_OBJECT)) {\n+ throw new MapperParsingException(\"Content id cannot be an object.\");\n } else {\n // we are in the pre/post parse phase\n if (!fieldType.indexed() && !fieldType.stored()) {", "filename": "src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java", "status": "modified" }, { "diff": "@@ -19,6 +19,7 @@\n \n package org.elasticsearch.test.unit.index.mapper.id;\n \n+import org.elasticsearch.common.bytes.BytesReference;\n import org.elasticsearch.common.xcontent.ToXContent;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentFactory;\n@@ -114,4 +115,22 @@ public void testIdPath() throws Exception {\n \n assertThat(serialized_id_mapping, equalTo(expected_id_mapping));\n }\n+\n+ @Test\n+ public void testObjectId() throws Exception {\n+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(\"type\")\n+ .endObject().endObject().string();\n+ DocumentMapper docMapper = MapperTestUtils.newParser().parse(mapping);\n+\n+ BytesReference source = XContentFactory.jsonBuilder().startObject()\n+ .startObject(\"_id\").field(\"name\", \"test\").endObject()\n+ .endObject().bytes();\n+ try {\n+ ParsedDocument doc = docMapper.parse(\"type\", \"1\", source);\n+ assert false;\n+ } catch (MapperParsingException e) {\n+ assert true;\n+ }\n+\n+ }\n }", "filename": "src/test/java/org/elasticsearch/test/unit/index/mapper/id/IdMappingTests.java", "status": "modified" } ] }
{ "body": "When setting `queue_size` on the index/bulk thread pools, they can cause replica shard failures when a request ends up being rejected on the replica shard. We should not adhere to the `queue_size` limit when executing the operation on the replica (which is perfectly fine, since the primary shard will make sure to limit it).\n", "comments": [ { "body": "+1\n", "created_at": "2013-08-17T19:13:55Z" } ], "number": 3526, "title": "Setting index/bulk thread pools with queue_size can cause replica shard failures" }
{ "body": "Setting index/bulk thread pools with queue_size can cause replica shard failures\ncloses #3526\n", "number": 3566, "review_comments": [], "title": "Setting index/bulk thread pools with queue_size can cause replica shard failures" }
{ "commits": [ { "message": "Setting index/bulk thread pools with queue_size can cause replica shard failures\ncloses #3526" }, { "message": "fix forwarding the array to the queue" }, { "message": "rename runnable" }, { "message": "add assert on creation" } ], "files": [ { "diff": "@@ -40,6 +40,7 @@\n import org.elasticsearch.common.io.stream.Streamable;\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.unit.TimeValue;\n+import org.elasticsearch.common.util.concurrent.AbstractRunnable;\n import org.elasticsearch.index.engine.DocumentAlreadyExistsException;\n import org.elasticsearch.index.engine.VersionConflictEngineException;\n import org.elasticsearch.indices.IndicesService;\n@@ -695,7 +696,7 @@ private void finishIfPossible() {\n if (request.operationThreaded()) {\n request.beforeLocalFork();\n try {\n- threadPool.executor(executor).execute(new Runnable() {\n+ threadPool.executor(executor).execute(new AbstractRunnable() {\n @Override\n public void run() {\n try {\n@@ -710,6 +711,12 @@ public void run() {\n listener.onResponse(response.response());\n }\n }\n+\n+ // we must never reject on because of thread pool capacity on replicas\n+ @Override\n+ public boolean isForceExecution() {\n+ return true;\n+ }\n });\n } catch (Throwable e) {\n if (!ignoreReplicaException(e)) {", "filename": "src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java", "status": "modified" }, { "diff": "@@ -0,0 +1,33 @@\n+/*\n+ * Licensed to ElasticSearch and Shay Banon under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. ElasticSearch licenses this\n+ * file to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.common.util.concurrent;\n+\n+/**\n+ * An extension to runnable.\n+ */\n+public abstract class AbstractRunnable implements Runnable {\n+\n+ /**\n+ * Should the runnable force its execution in case it gets rejected?\n+ */\n+ public boolean isForceExecution() {\n+ return false;\n+ }\n+}", "filename": "src/main/java/org/elasticsearch/common/util/concurrent/AbstractRunnable.java", "status": "added" }, { "diff": "@@ -19,8 +19,11 @@\n \n package org.elasticsearch.common.util.concurrent;\n \n+import org.elasticsearch.ElasticSearchIllegalStateException;\n+import org.elasticsearch.ElasticSearchInterruptedException;\n import org.elasticsearch.common.metrics.CounterMetric;\n \n+import java.util.concurrent.BlockingQueue;\n import java.util.concurrent.ThreadPoolExecutor;\n \n /**\n@@ -31,6 +34,20 @@ public class EsAbortPolicy implements XRejectedExecutionHandler {\n \n @Override\n public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {\n+ if (r instanceof AbstractRunnable) {\n+ if (((AbstractRunnable) r).isForceExecution()) {\n+ BlockingQueue<Runnable> queue = executor.getQueue();\n+ if (!(queue instanceof SizeBlockingQueue)) {\n+ throw new ElasticSearchIllegalStateException(\"forced execution, but expected a size queue\");\n+ }\n+ try {\n+ ((SizeBlockingQueue) queue).forcePut(r);\n+ } catch (InterruptedException e) {\n+ throw new ElasticSearchInterruptedException(e.getMessage(), e);\n+ }\n+ return;\n+ }\n+ }\n rejected.inc();\n throw new EsRejectedExecutionException(\"rejected execution of [\" + r.getClass().getName() + \"]\");\n }", "filename": "src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java", "status": "modified" }, { "diff": "@@ -57,14 +57,16 @@ public static EsThreadPoolExecutor newCached(long keepAliveTime, TimeUnit unit,\n return new EsThreadPoolExecutor(0, Integer.MAX_VALUE, keepAliveTime, unit, new SynchronousQueue<Runnable>(), threadFactory, new EsAbortPolicy());\n }\n \n- public static EsThreadPoolExecutor newFixed(int size, BlockingQueue<Runnable> queue, ThreadFactory threadFactory) {\n+ public static EsThreadPoolExecutor newFixed(int size, int queueCapacity, ThreadFactory threadFactory) {\n+ BlockingQueue<Runnable> queue;\n+ if (queueCapacity < 0) {\n+ queue = ConcurrentCollections.newBlockingQueue();\n+ } else {\n+ queue = new SizeBlockingQueue<Runnable>(ConcurrentCollections.<Runnable>newBlockingQueue(), queueCapacity);\n+ }\n return new EsThreadPoolExecutor(size, size, 0, TimeUnit.MILLISECONDS, queue, threadFactory, new EsAbortPolicy());\n }\n \n- public static EsThreadPoolExecutor newFixed(int size, BlockingQueue<Runnable> queue, ThreadFactory threadFactory, XRejectedExecutionHandler rejectedExecutionHandler) {\n- return new EsThreadPoolExecutor(size, size, 0, TimeUnit.MILLISECONDS, queue, threadFactory, rejectedExecutionHandler);\n- }\n-\n public static String threadName(Settings settings, String namePrefix) {\n String name = settings.get(\"name\");\n if (name == null) {\n@@ -111,6 +113,7 @@ public Thread newThread(Runnable r) {\n private EsExecutors() {\n }\n \n+\n static class ExecutorScalingQueue<E> extends LinkedTransferQueue<E> {\n \n ThreadPoolExecutor executor;", "filename": "src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java", "status": "modified" }, { "diff": "@@ -0,0 +1,204 @@\n+/*\n+ * Licensed to ElasticSearch and Shay Banon under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. ElasticSearch licenses this\n+ * file to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.common.util.concurrent;\n+\n+import org.elasticsearch.ElasticSearchIllegalStateException;\n+\n+import java.util.AbstractQueue;\n+import java.util.Collection;\n+import java.util.Iterator;\n+import java.util.concurrent.BlockingQueue;\n+import java.util.concurrent.TimeUnit;\n+import java.util.concurrent.atomic.AtomicInteger;\n+\n+/**\n+ * A size based queue wrapping another blocking queue to provide (somewhat relaxed) capacity checks.\n+ * Mainly makes sense to use with blocking queues that are unbounded to provide the ability to do\n+ * capacity verification.\n+ */\n+public class SizeBlockingQueue<E> extends AbstractQueue<E> implements BlockingQueue<E> {\n+\n+ private final BlockingQueue<E> queue;\n+ private final int capacity;\n+\n+ private final AtomicInteger size = new AtomicInteger();\n+\n+ public SizeBlockingQueue(BlockingQueue<E> queue, int capacity) {\n+ assert capacity >= 0;\n+ this.queue = queue;\n+ this.capacity = capacity;\n+ }\n+\n+ @Override\n+ public int size() {\n+ return size.get();\n+ }\n+\n+ @Override\n+ public Iterator<E> iterator() {\n+ final Iterator<E> it = queue.iterator();\n+ return new Iterator<E>() {\n+ E current;\n+\n+ @Override\n+ public boolean hasNext() {\n+ return it.hasNext();\n+ }\n+\n+ @Override\n+ public E next() {\n+ current = it.next();\n+ return current;\n+ }\n+\n+ @Override\n+ public void remove() {\n+ // note, we can't call #remove on the iterator because we need to know\n+ // if it was removed or not\n+ if (queue.remove(current)) {\n+ size.decrementAndGet();\n+ }\n+ }\n+ };\n+ }\n+\n+ @Override\n+ public E peek() {\n+ return queue.peek();\n+ }\n+\n+ @Override\n+ public E poll() {\n+ E e = queue.poll();\n+ if (e != null) {\n+ size.decrementAndGet();\n+ }\n+ return e;\n+ }\n+\n+ @Override\n+ public E poll(long timeout, TimeUnit unit) throws InterruptedException {\n+ E e = queue.poll(timeout, unit);\n+ if (e != null) {\n+ size.decrementAndGet();\n+ }\n+ return e;\n+ }\n+\n+ @Override\n+ public boolean remove(Object o) {\n+ boolean v = queue.remove(o);\n+ if (v) {\n+ size.decrementAndGet();\n+ }\n+ return v;\n+ }\n+\n+ /**\n+ * Forces adding an element to the queue, without doing size checks.\n+ */\n+ public void forcePut(E e) throws InterruptedException {\n+ size.incrementAndGet();\n+ try {\n+ queue.put(e);\n+ } catch (InterruptedException ie) {\n+ size.decrementAndGet();\n+ throw ie;\n+ }\n+ }\n+\n+\n+ @Override\n+ public boolean offer(E e) {\n+ int count = size.incrementAndGet();\n+ if (count > capacity) {\n+ size.decrementAndGet();\n+ return false;\n+ }\n+ boolean offered = queue.offer(e);\n+ if (!offered) {\n+ size.decrementAndGet();\n+ }\n+ return offered;\n+ }\n+\n+ @Override\n+ public boolean offer(E e, long timeout, TimeUnit unit) throws InterruptedException {\n+ // note, not used in ThreadPoolExecutor\n+ throw new ElasticSearchIllegalStateException(\"offer with timeout not allowed on size queue\");\n+ }\n+\n+ @Override\n+ public void put(E e) throws InterruptedException {\n+ // note, not used in ThreadPoolExecutor\n+ throw new ElasticSearchIllegalStateException(\"put not allowed on size queue\");\n+ }\n+\n+ @Override\n+ public E take() throws InterruptedException {\n+ E e;\n+ try {\n+ e = queue.take();\n+ size.decrementAndGet();\n+ } catch (InterruptedException ie) {\n+ throw ie;\n+ }\n+ return e;\n+ }\n+\n+ @Override\n+ public int remainingCapacity() {\n+ return capacity - size.get();\n+ }\n+\n+ @Override\n+ public int drainTo(Collection<? super E> c) {\n+ int v = queue.drainTo(c);\n+ size.addAndGet(-v);\n+ return v;\n+ }\n+\n+ @Override\n+ public int drainTo(Collection<? super E> c, int maxElements) {\n+ int v = queue.drainTo(c, maxElements);\n+ size.addAndGet(-v);\n+ return v;\n+ }\n+\n+ @Override\n+ public Object[] toArray() {\n+ return queue.toArray();\n+ }\n+\n+ @Override\n+ public <T> T[] toArray(T[] a) {\n+ return (T[]) queue.toArray(a);\n+ }\n+\n+ @Override\n+ public boolean contains(Object o) {\n+ return queue.contains(o);\n+ }\n+\n+ @Override\n+ public boolean containsAll(Collection<?> c) {\n+ return queue.containsAll(c);\n+ }\n+}", "filename": "src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java", "status": "added" }, { "diff": "@@ -35,7 +35,6 @@\n import org.elasticsearch.common.settings.Settings;\n import org.elasticsearch.common.unit.SizeValue;\n import org.elasticsearch.common.unit.TimeValue;\n-import org.elasticsearch.common.util.concurrent.ConcurrentCollections;\n import org.elasticsearch.common.util.concurrent.EsExecutors;\n import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor;\n import org.elasticsearch.common.util.concurrent.XRejectedExecutionHandler;\n@@ -298,19 +297,17 @@ private ExecutorHolder rebuild(String name, ExecutorHolder previousExecutorHolde\n } else if (\"fixed\".equals(type)) {\n int defaultSize = defaultSettings.getAsInt(\"size\", EsExecutors.boundedNumberOfProcessors());\n SizeValue defaultQueueSize = defaultSettings.getAsSize(\"queue\", defaultSettings.getAsSize(\"queue_size\", null));\n- String defaultQueueType = defaultSettings.get(\"queue_type\", \"linked\");\n \n if (previousExecutorHolder != null) {\n if (\"fixed\".equals(previousInfo.getType())) {\n SizeValue updatedQueueSize = settings.getAsSize(\"capacity\", settings.getAsSize(\"queue\", settings.getAsSize(\"queue_size\", previousInfo.getQueueSize())));\n- String updatedQueueType = settings.get(\"queue_type\", previousInfo.getQueueType());\n- if (Objects.equal(previousInfo.getQueueSize(), updatedQueueSize) && previousInfo.getQueueType().equals(updatedQueueType)) {\n+ if (Objects.equal(previousInfo.getQueueSize(), updatedQueueSize)) {\n int updatedSize = settings.getAsInt(\"size\", previousInfo.getMax());\n if (previousInfo.getMax() != updatedSize) {\n- logger.debug(\"updating thread_pool [{}], type [{}], size [{}], queue_size [{}], queue_type [{}]\", name, type, updatedSize, updatedQueueSize, updatedQueueType);\n+ logger.debug(\"updating thread_pool [{}], type [{}], size [{}], queue_size [{}]\", name, type, updatedSize, updatedQueueSize);\n ((EsThreadPoolExecutor) previousExecutorHolder.executor).setCorePoolSize(updatedSize);\n ((EsThreadPoolExecutor) previousExecutorHolder.executor).setMaximumPoolSize(updatedSize);\n- return new ExecutorHolder(previousExecutorHolder.executor, new Info(name, type, updatedSize, updatedSize, null, updatedQueueSize, updatedQueueType));\n+ return new ExecutorHolder(previousExecutorHolder.executor, new Info(name, type, updatedSize, updatedSize, null, updatedQueueSize));\n }\n return previousExecutorHolder;\n }\n@@ -319,18 +316,13 @@ private ExecutorHolder rebuild(String name, ExecutorHolder previousExecutorHolde\n defaultSize = previousInfo.getMax();\n }\n defaultQueueSize = previousInfo.getQueueSize();\n- if (previousInfo.getQueueType() != null) {\n- defaultQueueType = previousInfo.getQueueType();\n- }\n }\n \n int size = settings.getAsInt(\"size\", defaultSize);\n SizeValue queueSize = settings.getAsSize(\"capacity\", settings.getAsSize(\"queue\", settings.getAsSize(\"queue_size\", defaultQueueSize)));\n- String queueType = settings.get(\"queue_type\", defaultQueueType);\n- BlockingQueue<Runnable> workQueue = newQueue(queueSize, queueType);\n- logger.debug(\"creating thread_pool [{}], type [{}], size [{}], queue_size [{}], queue_type [{}]\", name, type, size, queueSize, queueType);\n- Executor executor = EsExecutors.newFixed(size, workQueue, threadFactory);\n- return new ExecutorHolder(executor, new Info(name, type, size, size, null, queueSize, queueType));\n+ logger.debug(\"creating thread_pool [{}], type [{}], size [{}], queue_size [{}]\", name, type, size, queueSize);\n+ Executor executor = EsExecutors.newFixed(size, queueSize == null ? -1 : (int) queueSize.singles(), threadFactory);\n+ return new ExecutorHolder(executor, new Info(name, type, size, size, null, queueSize));\n } else if (\"scaling\".equals(type)) {\n TimeValue defaultKeepAlive = defaultSettings.getAsTime(\"keep_alive\", timeValueMinutes(5));\n int defaultMin = defaultSettings.getAsInt(\"min\", 1);\n@@ -403,24 +395,6 @@ public void updateSettings(Settings settings) {\n }\n }\n \n- private BlockingQueue<Runnable> newQueue(SizeValue queueSize, String queueType) {\n- if (queueSize == null) {\n- return ConcurrentCollections.newBlockingQueue();\n- } else if (queueSize.singles() == 0) {\n- return new SynchronousQueue<Runnable>();\n- } else if (queueSize.singles() > 0) {\n- if (\"linked\".equals(queueType)) {\n- return new LinkedBlockingQueue<Runnable>((int) queueSize.singles());\n- } else if (\"array\".equals(queueType)) {\n- return new ArrayBlockingQueue<Runnable>((int) queueSize.singles());\n- } else {\n- throw new ElasticSearchIllegalArgumentException(\"illegal queue_type set to [\" + queueType + \"], should be either linked or array\");\n- }\n- } else { // queueSize.singles() < 0, just treat it as unbounded queue\n- return ConcurrentCollections.newBlockingQueue();\n- }\n- }\n-\n class ExecutorShutdownListener implements EsThreadPoolExecutor.ShutdownListener {\n \n private ExecutorHolder holder;\n@@ -555,7 +529,6 @@ public static class Info implements Streamable, ToXContent {\n private int max;\n private TimeValue keepAlive;\n private SizeValue queueSize;\n- private String queueType;\n \n Info() {\n \n@@ -570,17 +543,12 @@ public Info(String name, String type, int size) {\n }\n \n public Info(String name, String type, int min, int max, @Nullable TimeValue keepAlive, @Nullable SizeValue queueSize) {\n- this(name, type, min, max, keepAlive, queueSize, null);\n- }\n-\n- public Info(String name, String type, int min, int max, @Nullable TimeValue keepAlive, @Nullable SizeValue queueSize, String queueType) {\n this.name = name;\n this.type = type;\n this.min = min;\n this.max = max;\n this.keepAlive = keepAlive;\n this.queueSize = queueSize;\n- this.queueType = queueType;\n }\n \n public String getName() {\n@@ -609,12 +577,6 @@ public SizeValue getQueueSize() {\n return this.queueSize;\n }\n \n- @Nullable\n- public String getQueueType() {\n- return this.queueType;\n- }\n-\n-\n @Override\n public void readFrom(StreamInput in) throws IOException {\n name = in.readString();\n@@ -629,7 +591,7 @@ public void readFrom(StreamInput in) throws IOException {\n }\n in.readBoolean(); // here to conform with removed waitTime\n in.readBoolean(); // here to conform with removed rejected setting\n- queueType = in.readOptionalString();\n+ in.readBoolean(); // here to conform with queue type\n }\n \n @Override\n@@ -650,9 +612,9 @@ public void writeTo(StreamOutput out) throws IOException {\n out.writeBoolean(true);\n queueSize.writeTo(out);\n }\n- out.writeBoolean(false); // here to conform with remobed waitTime\n+ out.writeBoolean(false); // here to conform with removed waitTime\n out.writeBoolean(false); // here to conform with removed rejected setting\n- out.writeOptionalString(queueType);\n+ out.writeBoolean(false); // here to conform with queue type\n }\n \n @Override\n@@ -671,9 +633,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws\n if (queueSize != null) {\n builder.field(Fields.QUEUE_SIZE, queueSize.toString());\n }\n- if (queueType != null) {\n- builder.field(Fields.QUEUE_TYPE, queueType);\n- }\n builder.endObject();\n return builder;\n }\n@@ -684,7 +643,6 @@ static final class Fields {\n static final XContentBuilderString MAX = new XContentBuilderString(\"max\");\n static final XContentBuilderString KEEP_ALIVE = new XContentBuilderString(\"keep_alive\");\n static final XContentBuilderString QUEUE_SIZE = new XContentBuilderString(\"queue_size\");\n- static final XContentBuilderString QUEUE_TYPE = new XContentBuilderString(\"queue_type\");\n }\n \n }", "filename": "src/main/java/org/elasticsearch/threadpool/ThreadPool.java", "status": "modified" }, { "diff": "@@ -96,15 +96,13 @@ public void run() {\n for (ThreadPool.Info info : nodeInfo.getThreadPool()) {\n if (info.getName().equals(Names.SEARCH)) {\n assertThat(info.getType(), equalTo(\"fixed\"));\n- assertThat(info.getQueueType(), equalTo(\"linked\"));\n found = true;\n break;\n }\n }\n assertThat(found, equalTo(true));\n \n Map<String, Object> poolMap = getPoolSettingsThroughJson(nodeInfo.getThreadPool(), Names.SEARCH);\n- assertThat(poolMap.get(\"queue_type\").toString(), equalTo(\"linked\"));\n }\n }\n ", "filename": "src/test/java/org/elasticsearch/test/integration/threadpool/SimpleThreadPoolTests.java", "status": "modified" }, { "diff": "@@ -20,14 +20,14 @@\n package org.elasticsearch.test.unit.common.util.concurrent;\n \n import com.google.common.base.Predicate;\n-import org.elasticsearch.common.util.concurrent.EsExecutors;\n-import org.elasticsearch.common.util.concurrent.ThreadBarrier;\n+import org.elasticsearch.common.util.concurrent.*;\n import org.elasticsearch.test.integration.ElasticsearchTestCase;\n import org.junit.Test;\n \n import java.util.concurrent.CountDownLatch;\n import java.util.concurrent.ThreadPoolExecutor;\n import java.util.concurrent.TimeUnit;\n+import java.util.concurrent.atomic.AtomicBoolean;\n \n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.lessThan;\n@@ -40,6 +40,119 @@ private TimeUnit randomTimeUnit() {\n return TimeUnit.values()[between(0, TimeUnit.values().length - 1)];\n }\n \n+ @Test\n+ public void testFixedForcedExecution() throws Exception {\n+ EsThreadPoolExecutor executor = EsExecutors.newFixed(1, 1, EsExecutors.daemonThreadFactory(\"test\"));\n+ final CountDownLatch wait = new CountDownLatch(1);\n+\n+ final CountDownLatch exec1Wait = new CountDownLatch(1);\n+ final AtomicBoolean executed1 = new AtomicBoolean();\n+ executor.execute(new Runnable() {\n+ @Override\n+ public void run() {\n+ try {\n+ wait.await();\n+ } catch (InterruptedException e) {\n+ throw new RuntimeException(e);\n+ }\n+ executed1.set(true);\n+ exec1Wait.countDown();\n+ }\n+ });\n+\n+ final CountDownLatch exec2Wait = new CountDownLatch(1);\n+ final AtomicBoolean executed2 = new AtomicBoolean();\n+ executor.execute(new Runnable() {\n+ @Override\n+ public void run() {\n+ executed2.set(true);\n+ exec2Wait.countDown();\n+ }\n+ });\n+\n+ final AtomicBoolean executed3 = new AtomicBoolean();\n+ final CountDownLatch exec3Wait = new CountDownLatch(1);\n+ executor.execute(new AbstractRunnable() {\n+ @Override\n+ public void run() {\n+ executed3.set(true);\n+ exec3Wait.countDown();\n+ }\n+\n+ @Override\n+ public boolean isForceExecution() {\n+ return true;\n+ }\n+ });\n+\n+ wait.countDown();\n+\n+ exec1Wait.await();\n+ exec2Wait.await();\n+ exec3Wait.await();\n+\n+ assertThat(executed1.get(), equalTo(true));\n+ assertThat(executed2.get(), equalTo(true));\n+ assertThat(executed3.get(), equalTo(true));\n+\n+ executor.shutdownNow();\n+ }\n+\n+ @Test\n+ public void testFixedRejected() throws Exception {\n+ EsThreadPoolExecutor executor = EsExecutors.newFixed(1, 1, EsExecutors.daemonThreadFactory(\"test\"));\n+ final CountDownLatch wait = new CountDownLatch(1);\n+\n+ final CountDownLatch exec1Wait = new CountDownLatch(1);\n+ final AtomicBoolean executed1 = new AtomicBoolean();\n+ executor.execute(new Runnable() {\n+ @Override\n+ public void run() {\n+ try {\n+ wait.await();\n+ } catch (InterruptedException e) {\n+ throw new RuntimeException(e);\n+ }\n+ executed1.set(true);\n+ exec1Wait.countDown();\n+ }\n+ });\n+\n+ final CountDownLatch exec2Wait = new CountDownLatch(1);\n+ final AtomicBoolean executed2 = new AtomicBoolean();\n+ executor.execute(new Runnable() {\n+ @Override\n+ public void run() {\n+ executed2.set(true);\n+ exec2Wait.countDown();\n+ }\n+ });\n+\n+ final AtomicBoolean executed3 = new AtomicBoolean();\n+ try {\n+ executor.execute(new Runnable() {\n+ @Override\n+ public void run() {\n+ executed3.set(true);\n+ }\n+ });\n+ assert false : \"should be rejected...\";\n+ } catch (EsRejectedExecutionException e) {\n+ // all is well\n+ }\n+\n+ wait.countDown();\n+\n+ exec1Wait.await();\n+ exec2Wait.await();\n+\n+ assertThat(executed1.get(), equalTo(true));\n+ assertThat(executed2.get(), equalTo(true));\n+ assertThat(executed3.get(), equalTo(false));\n+\n+ executor.shutdownNow();\n+ }\n+\n @Test\n public void testScaleUp() throws Exception {\n final int min = between(1, 3);", "filename": "src/test/java/org/elasticsearch/test/unit/common/util/concurrent/EsExecutorsTests.java", "status": "modified" }, { "diff": "@@ -26,7 +26,10 @@\n import org.elasticsearch.threadpool.ThreadPool.Names;\n import org.junit.Test;\n \n-import java.util.concurrent.*;\n+import java.util.concurrent.CountDownLatch;\n+import java.util.concurrent.Executor;\n+import java.util.concurrent.ThreadPoolExecutor;\n+import java.util.concurrent.TimeUnit;\n \n import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;\n import static org.hamcrest.MatcherAssert.assertThat;\n@@ -101,7 +104,6 @@ public void testCachedExecutorType() {\n @Test\n public void testFixedExecutorType() {\n ThreadPool threadPool = new ThreadPool(settingsBuilder().put(\"threadpool.search.type\", \"fixed\").build(), null);\n- assertThat(info(threadPool, Names.SEARCH).getQueueType(), equalTo(\"linked\"));\n assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));\n \n // Replace with different type\n@@ -151,20 +153,6 @@ public void testFixedExecutorType() {\n threadPool.updateSettings(settingsBuilder()\n .put(\"threadpool.search.queue\", \"500\")\n .build());\n- assertThat(info(threadPool, Names.SEARCH).getQueueType(), equalTo(\"linked\"));\n- assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getQueue(), instanceOf(LinkedBlockingQueue.class));\n-\n- // Set different queue and size type\n- threadPool.updateSettings(settingsBuilder()\n- .put(\"threadpool.search.queue_type\", \"array\")\n- .put(\"threadpool.search.size\", \"12\")\n- .build());\n- // Make sure keep size changed\n- assertThat(info(threadPool, Names.SEARCH).getType(), equalTo(\"fixed\"));\n- assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(12));\n- assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(12));\n- assertThat(info(threadPool, Names.SEARCH).getQueueType(), equalTo(\"array\"));\n- assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getQueue(), instanceOf(ArrayBlockingQueue.class));\n \n threadPool.shutdown();\n }", "filename": "src/test/java/org/elasticsearch/test/unit/threadpool/UpdateThreadPoolSettingsTests.java", "status": "modified" } ] }
{ "body": "To reproduce:\n- let's create 2 concrete indices `foo` and `foo_2`\n\n``` bash\n$ curl -XPOST 'http://localhost:9200/foo'\n$ curl -XPOST 'http://localhost:9200/foo_2'\n```\n- let's create a routing alias `foo_1` for index `foo` and routing value 1\n\n``` bash\n$ curl -XPOST 'http://localhost:9200/_aliases' -d '\n{\n \"actions\" : [\n {\n \"add\" : {\n \"index\" : \"foo\",\n \"alias\" : \"foo_1\",\n \"routing\" : \"1\"\n }\n }\n ]\n}'\n```\n- let's index 2 docs one in `foo_1` and the other in `foo_2`\n\n``` bash\n$ curl -XPOST 'http://localhost:9200/foo_1/type/1' -d '{\"foo1\":\"bar1\"}'\n$ curl -XPOST 'http://localhost:9200/foo_2/type/2' -d '{\"foo2\":\"bar2\"}'\n```\n- Now this search gives 1 result instead of the 2 I expected\n\n``` bash\n$ curl -XGET 'http://localhost:9200/foo_*/_search' \n```\n", "comments": [ { "body": "I have submitted PR #2683 that should fix it\n", "created_at": "2013-02-24T23:29:24Z" }, { "body": "Ok closing this one, thanks @s1monw \n", "created_at": "2013-03-03T14:18:04Z" }, { "body": "ah cool thanks for closing this\n", "created_at": "2013-03-03T16:33:02Z" } ], "number": 2682, "title": "Bug when searching concrete and routing aliased indices" }
{ "body": "This PR refactors a bit and add a shared method for searchShards and searchShardsCount to avoid divergent behavior in the future (like #2682 and #3268).\n\nIt seems like searchShardsCount is mainly implemented to test if we are hitting 1 shard or more than 1 shards so maybe it could be renamed and optimized for that specific use case.\n", "number": 3530, "review_comments": [], "title": "Share shards computation logic between searchShards and searchShardsCoun..." }
{ "commits": [ { "message": "Share shards computation logic between searchShards and searchShardsCount" } ], "files": [ { "diff": "@@ -112,51 +112,31 @@ public GroupShardsIterator deleteByQueryShards(ClusterState clusterState, String\n \n @Override\n public int searchShardsCount(ClusterState clusterState, String[] indices, String[] concreteIndices, @Nullable Map<String, Set<String>> routing, @Nullable String preference) throws IndexMissingException {\n- if (concreteIndices == null || concreteIndices.length == 0) {\n- concreteIndices = clusterState.metaData().concreteAllOpenIndices();\n- }\n- if (routing != null) {\n- HashSet<ShardId> set = new HashSet<ShardId>();\n- for (String index : concreteIndices) {\n- IndexRoutingTable indexRouting = indexRoutingTable(clusterState, index);\n- Set<String> effectiveRouting = routing.get(index);\n- if (effectiveRouting != null) {\n- for (String r : effectiveRouting) {\n- int shardId = shardId(clusterState, index, null, null, r);\n- IndexShardRoutingTable indexShard = indexRouting.shard(shardId);\n- if (indexShard == null) {\n- throw new IndexShardMissingException(new ShardId(index, shardId));\n- }\n- // we might get duplicates, but that's ok, its an estimated count? (we just want to know if its 1 or not)\n- set.add(indexShard.shardId());\n- }\n- } else {\n- for (IndexShardRoutingTable indexShard : indexRouting) {\n- set.add(indexShard.shardId());\n- }\n- }\n- }\n- return set.size();\n- } else {\n- // we use list here since we know we are not going to create duplicates\n- int count = 0;\n- for (String index : concreteIndices) {\n- IndexRoutingTable indexRouting = indexRoutingTable(clusterState, index);\n- count += indexRouting.shards().size();\n+ final Set<IndexShardRoutingTable> shards = computeTargetedShards(clusterState, indices, concreteIndices, routing);\n+ return shards.size();\n+ }\n+\n+ @Override\n+ public GroupShardsIterator searchShards(ClusterState clusterState, String[] indices, String[] concreteIndices, @Nullable Map<String, Set<String>> routing, @Nullable String preference) throws IndexMissingException {\n+ final Set<IndexShardRoutingTable> shards = computeTargetedShards(clusterState, indices, concreteIndices, routing);\n+ final Set<ShardIterator> set = new HashSet<ShardIterator>(shards.size());\n+ for (IndexShardRoutingTable shard : shards) {\n+ ShardIterator iterator = preferenceActiveShardIterator(shard, clusterState.nodes().localNodeId(), clusterState.nodes(), preference);\n+ if (iterator != null) {\n+ set.add(iterator);\n }\n- return count;\n }\n+ return new GroupShardsIterator(set);\n }\n \n private static final Map<String, Set<String>> EMPTY_ROUTING = Collections.emptyMap();\n \n- @Override\n- public GroupShardsIterator searchShards(ClusterState clusterState, String[] indices, String[] concreteIndices, @Nullable Map<String, Set<String>> routing, @Nullable String preference) throws IndexMissingException {\n+ private Set<IndexShardRoutingTable> computeTargetedShards(ClusterState clusterState, String[] indices, String[] concreteIndices, @Nullable Map<String, Set<String>> routing) throws IndexMissingException {\n if (concreteIndices == null || concreteIndices.length == 0) {\n concreteIndices = clusterState.metaData().concreteAllOpenIndices();\n }\n- routing = routing == null ? EMPTY_ROUTING : routing; // just use an empty map \n- final Set<ShardIterator> set = new HashSet<ShardIterator>();\n+ routing = routing == null ? EMPTY_ROUTING : routing; // just use an empty map\n+ final Set<IndexShardRoutingTable> set = new HashSet<IndexShardRoutingTable>();\n // we use set here and not list since we might get duplicates\n for (String index : concreteIndices) {\n final IndexRoutingTable indexRouting = indexRoutingTable(clusterState, index);\n@@ -169,21 +149,15 @@ public GroupShardsIterator searchShards(ClusterState clusterState, String[] indi\n throw new IndexShardMissingException(new ShardId(index, shardId));\n }\n // we might get duplicates, but that's ok, they will override one another\n- ShardIterator iterator = preferenceActiveShardIterator(indexShard, clusterState.nodes().localNodeId(), clusterState.nodes(), preference);\n- if (iterator != null) {\n- set.add(iterator);\n- }\n+ set.add(indexShard);\n }\n } else {\n for (IndexShardRoutingTable indexShard : indexRouting) {\n- ShardIterator iterator = preferenceActiveShardIterator(indexShard, clusterState.nodes().localNodeId(), clusterState.nodes(), preference);\n- if (iterator != null) {\n- set.add(iterator);\n- }\n+ set.add(indexShard);\n }\n }\n }\n- return new GroupShardsIterator(set);\n+ return set;\n }\n \n private ShardIterator preferenceActiveShardIterator(IndexShardRoutingTable indexShard, String localNodeId, DiscoveryNodes nodes, @Nullable String preference) {", "filename": "src/main/java/org/elasticsearch/cluster/routing/operation/plain/PlainOperationRouting.java", "status": "modified" } ] }
{ "body": "Scoring in `function_score` and `filters_function_score` queries is potentially inconsistent when using scripts. \n## Brief overview of related classes\n\n`function_score` and `filters_function_score` allow a user to modify the score of a query (referred to as 'subQueryScore' from here on). \n\nIn brief, there are two classes that compute scores based on a query and some function: `FunctionScoreQuery`, which only has one function and `FiltersFunctionScoreQuery` which combines the result of several functions. For both classes, the function can be a `ScriptScoreFunction`.\n\n`ScoreFunction`: Computes a score for a document. The two relevant methods are:\n- `score(docId, subQueryScore)`: computes a new score taking into account the `subQueryScore` and some other properties of a documents.\n- `factor(docId)`: computes a score solely based on properties of the document.\n\n`FunctionScoreQuery.score()` computes:\n\nscore = `subQueryBoost` \\* `ScoreFunction.score(docId, subQueryScore)`\n\n`FiltersFunctionScoreQuery.CustomBoostFactorScorer.score()` computes: \n\nscore = `subQueryScore` \\* `subQueryBoost` \\* `combine(ScoreFunction1.factor(docId), ScoreFunction2.factor(docId),…)`\n\nwhere combine can mean add, multiply, lowest value, etc.\n## The problem\n\n`ScoreFunctions.factor(docId)` implies that the method computes a factor only and does not take into account the `subQueryScore`. This is the way the `ScoreFunctions` are used in `FiltersFunctionScoreQuery.CustomBoostFactorScorer`: The method `factor()` is called for each score function and the result is than later multiplied to the `subQueryScore`.\n\nHowever, the `ScriptScoreFunction` violates this principle: scripts can use the `_score` variable which should be initialized with the `subQueryScore` before the script is run, see `ScriptScoreFunction.score(..)`. If `ScriptScoreFunction.factor()` is called, then the behavior is undefined, since the `_score` variable is either wrong or maybe even not initialized.\n\nThis might cause unexpected behavior since this inconsistency is not transparent to the user.\n", "comments": [ { "body": "Backported function_score (Issue #3423, commits 720b550..a938bd5, 534299a and 8774c46, 32cdddb) to 0.90.\n", "created_at": "2013-08-09T12:44:43Z" }, { "body": "@brwe cool! thanks for recording those commit IDs\n", "created_at": "2013-08-09T12:45:24Z" } ], "number": 3464, "title": "Inconsistent usage of ScriptScoreFunction in FiltersFunctionScoreQuery" }
{ "body": "...ry\n\nThis commit fixes inconsistencies in `function_score` and `filters_function_score`\nusing scripts, see issue #3464\n\nThe method 'ScoreFunction.factor(docId)' is removed completely, since the name\nsuggests that this method actually computes a factor which was not the case.\nMultiplying the computed score is now handled by 'FiltersFunctionScoreQuery'\nand 'FunctionScoreQuery' and not implicitely performed in\n'ScoreFunction.factor(docId, subQueryScore)' as was the case for 'BoostScoreFunction'\nand 'DecayScoreFunctions'.\n\nThis commit also fixes the explain function for FiltersFunctionScoreQuery. Here,\nthe influence of the maxBoost was never printed. Furthermore, the queryBoost was\nprinted as beeing multiplied to the filter score.\n\nCloses #3464\n", "number": 3472, "review_comments": [], "title": "Fix inconsistent usage of ScriptScoreFunction in FiltersFunctionScoreQue..." }
{ "commits": [ { "message": "Fix inconsistent usage of ScriptScoreFunction in FiltersFunctionScoreQuery\n\nThis commit fixes inconsistencies in `function_score` and `filters_function_score`\nusing scripts, see issue #3464\n\nThe method 'ScoreFunction.factor(docId)' is removed completely, since the name\nsuggests that this method actually computes a factor which was not the case.\nMultiplying the computed score is now handled by 'FiltersFunctionScoreQuery'\nand 'FunctionScoreQuery' and not implicitely performed in\n'ScoreFunction.factor(docId, subQueryScore)' as was the case for 'BoostScoreFunction'\nand 'DecayScoreFunctions'.\n\nThis commit also fixes the explain function for FiltersFunctionScoreQuery. Here,\nthe influence of the maxBoost was never printed. Furthermore, the queryBoost was\nprinted as beeing multiplied to the filter score.\n\nCloses #3464" }, { "message": " implement simons comments" }, { "message": "add boost_mode to rest interface\n\nallow user to set combine functions explicitely via boost_mode variable." }, { "message": "make mult default boost mode\n\nalways multiply query score to function score. For script score\nfunctions, this means that boost_mode has to be set to `plain` if\n'function_score' should behave like 'custom_score'" }, { "message": "format code" }, { "message": "make GeoPoint parsable in lat/lon json format" }, { "message": "add more combine functions and rename PLAIN to REPLACE" }, { "message": "rename 'total' to 'avg', both enum and for query" }, { "message": "Add offset to decay function score\n\nDocs within the offset will be scored with 1.0, decay only starts after\noffset is reached." }, { "message": "rename scale_weight -> decay" }, { "message": "rename reference -> origin" }, { "message": "format code" }, { "message": "add builders for nicer java api\n\nCloses #3533" } ], "files": [ { "diff": "@@ -25,11 +25,12 @@\n /**\n *\n */\n-public class BoostScoreFunction implements ScoreFunction {\n+public class BoostScoreFunction extends ScoreFunction {\n \n private final float boost;\n \n public BoostScoreFunction(float boost) {\n+ super(CombineFunction.MULT);\n this.boost = boost;\n }\n \n@@ -41,30 +42,19 @@ public float getBoost() {\n public void setNextReader(AtomicReaderContext context) {\n // nothing to do here...\n }\n-\n+ \n @Override\n public double score(int docId, float subQueryScore) {\n- return subQueryScore * boost;\n- }\n-\n- @Override\n- public double factor(int docId) {\n return boost;\n }\n \n @Override\n public Explanation explainScore(int docId, Explanation subQueryExpl) {\n- Explanation exp = new Explanation(boost * subQueryExpl.getValue(), \"static boost function: product of:\");\n- exp.addDetail(subQueryExpl);\n+ Explanation exp = new Explanation(boost, \"static boost factor\");\n exp.addDetail(new Explanation(boost, \"boostFactor\"));\n return exp;\n }\n \n- @Override\n- public Explanation explainFactor(int docId) {\n- return new Explanation(boost, \"boostFactor\");\n- }\n-\n @Override\n public boolean equals(Object o) {\n if (this == o)", "filename": "src/main/java/org/elasticsearch/common/lucene/search/function/BoostScoreFunction.java", "status": "modified" }, { "diff": "@@ -0,0 +1,202 @@\n+/*\n+ * Licensed to ElasticSearch and Shay Banon under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. ElasticSearch licenses this\n+ * file to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.common.lucene.search.function;\n+\n+import org.apache.lucene.search.ComplexExplanation;\n+import org.apache.lucene.search.Explanation;\n+\n+public enum CombineFunction {\n+ MULT {\n+ @Override\n+ public float combine(double queryBoost, double queryScore, double funcScore, double maxBoost) {\n+ return toFloat(queryBoost * queryScore * Math.min(funcScore, maxBoost));\n+ }\n+\n+ @Override\n+ public String getName() {\n+ return \"mult\";\n+ }\n+\n+ @Override\n+ public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) {\n+ float score = queryBoost * Math.min(funcExpl.getValue(), maxBoost) * queryExpl.getValue();\n+ ComplexExplanation res = new ComplexExplanation(true, score, \"function score, product of:\");\n+ res.addDetail(queryExpl);\n+ ComplexExplanation minExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), \"Math.min of\");\n+ minExpl.addDetail(funcExpl);\n+ minExpl.addDetail(new Explanation(maxBoost, \"maxBoost\"));\n+ res.addDetail(minExpl);\n+ res.addDetail(new Explanation(queryBoost, \"queryBoost\"));\n+ return res;\n+ }\n+ },\n+ REPLACE {\n+ @Override\n+ public float combine(double queryBoost, double queryScore, double funcScore, double maxBoost) {\n+ return toFloat(queryBoost * Math.min(funcScore, maxBoost));\n+ }\n+\n+ @Override\n+ public String getName() {\n+ return \"replace\";\n+ }\n+\n+ @Override\n+ public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) {\n+ float score = queryBoost * Math.min(funcExpl.getValue(), maxBoost);\n+ ComplexExplanation res = new ComplexExplanation(true, score, \"function score, product of:\");\n+ ComplexExplanation minExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), \"Math.min of\");\n+ minExpl.addDetail(funcExpl);\n+ minExpl.addDetail(new Explanation(maxBoost, \"maxBoost\"));\n+ res.addDetail(minExpl);\n+ res.addDetail(new Explanation(queryBoost, \"queryBoost\"));\n+ return res;\n+ }\n+\n+ },\n+ SUM {\n+ @Override\n+ public float combine(double queryBoost, double queryScore, double funcScore, double maxBoost) {\n+ return toFloat(queryBoost * (queryScore + Math.min(funcScore, maxBoost)));\n+ }\n+\n+ @Override\n+ public String getName() {\n+ return \"sum\";\n+ }\n+\n+ @Override\n+ public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) {\n+ float score = queryBoost * (Math.min(funcExpl.getValue(), maxBoost) + queryExpl.getValue());\n+ ComplexExplanation res = new ComplexExplanation(true, score, \"function score, product of:\");\n+ ComplexExplanation minExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), \"Math.min of\");\n+ minExpl.addDetail(funcExpl);\n+ minExpl.addDetail(new Explanation(maxBoost, \"maxBoost\"));\n+ ComplexExplanation sumExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost) + queryExpl.getValue(),\n+ \"sum of\");\n+ sumExpl.addDetail(queryExpl);\n+ sumExpl.addDetail(minExpl);\n+ res.addDetail(sumExpl);\n+ res.addDetail(new Explanation(queryBoost, \"queryBoost\"));\n+ return res;\n+ }\n+\n+ },\n+ AVG {\n+ @Override\n+ public float combine(double queryBoost, double queryScore, double funcScore, double maxBoost) {\n+ return toFloat((queryBoost * (Math.min(funcScore, maxBoost) + queryScore) / 2.0));\n+ }\n+\n+ @Override\n+ public String getName() {\n+ return \"avg\";\n+ }\n+\n+ @Override\n+ public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) {\n+ float score = toFloat(queryBoost * (queryExpl.getValue() + Math.min(funcExpl.getValue(), maxBoost)) / 2.0);\n+ ComplexExplanation res = new ComplexExplanation(true, score, \"function score, product of:\");\n+ ComplexExplanation minExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), \"Math.min of\");\n+ minExpl.addDetail(funcExpl);\n+ minExpl.addDetail(new Explanation(maxBoost, \"maxBoost\"));\n+ ComplexExplanation avgExpl = new ComplexExplanation(true,\n+ toFloat((Math.min(funcExpl.getValue(), maxBoost) + queryExpl.getValue()) / 2.0), \"avg of\");\n+ avgExpl.addDetail(queryExpl);\n+ avgExpl.addDetail(minExpl);\n+ res.addDetail(avgExpl);\n+ res.addDetail(new Explanation(queryBoost, \"queryBoost\"));\n+ return res;\n+ }\n+\n+ },\n+ MIN {\n+ @Override\n+ public float combine(double queryBoost, double queryScore, double funcScore, double maxBoost) {\n+ return toFloat(queryBoost * Math.min(queryScore, Math.min(funcScore, maxBoost)));\n+ }\n+\n+ @Override\n+ public String getName() {\n+ return \"min\";\n+ }\n+\n+ @Override\n+ public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) {\n+ float score = toFloat(queryBoost * Math.min(queryExpl.getValue(), Math.min(funcExpl.getValue(), maxBoost)));\n+ ComplexExplanation res = new ComplexExplanation(true, score, \"function score, product of:\");\n+ ComplexExplanation innerMinExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), \"Math.min of\");\n+ innerMinExpl.addDetail(funcExpl);\n+ innerMinExpl.addDetail(new Explanation(maxBoost, \"maxBoost\"));\n+ ComplexExplanation outerMinExpl = new ComplexExplanation(true, Math.min(Math.min(funcExpl.getValue(), maxBoost),\n+ queryExpl.getValue()), \"min of\");\n+ outerMinExpl.addDetail(queryExpl);\n+ outerMinExpl.addDetail(innerMinExpl);\n+ res.addDetail(outerMinExpl);\n+ res.addDetail(new Explanation(queryBoost, \"queryBoost\"));\n+ return res;\n+ }\n+\n+ },\n+ MAX {\n+ @Override\n+ public float combine(double queryBoost, double queryScore, double funcScore, double maxBoost) {\n+ return toFloat(queryBoost * (Math.max(queryScore, Math.min(funcScore, maxBoost))));\n+ }\n+\n+ @Override\n+ public String getName() {\n+ return \"max\";\n+ }\n+\n+ @Override\n+ public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) {\n+ float score = toFloat(queryBoost * Math.max(queryExpl.getValue(), Math.min(funcExpl.getValue(), maxBoost)));\n+ ComplexExplanation res = new ComplexExplanation(true, score, \"function score, product of:\");\n+ ComplexExplanation innerMinExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), \"Math.min of\");\n+ innerMinExpl.addDetail(funcExpl);\n+ innerMinExpl.addDetail(new Explanation(maxBoost, \"maxBoost\"));\n+ ComplexExplanation outerMaxExpl = new ComplexExplanation(true, Math.max(Math.min(funcExpl.getValue(), maxBoost),\n+ queryExpl.getValue()), \"max of\");\n+ outerMaxExpl.addDetail(queryExpl);\n+ outerMaxExpl.addDetail(innerMinExpl);\n+ res.addDetail(outerMaxExpl);\n+ res.addDetail(new Explanation(queryBoost, \"queryBoost\"));\n+ return res;\n+ }\n+\n+ };\n+\n+ public abstract float combine(double queryBoost, double queryScore, double funcScore, double maxBoost);\n+\n+ public abstract String getName();\n+\n+ public static float toFloat(double input) {\n+ assert deviation(input) <= 0.001 : \"input \" + input + \" out of float scope for function score deviation: \" + deviation(input);\n+ return (float) input;\n+ }\n+\n+ private static double deviation(double input) { // only with assert!\n+ float floatVersion = (float) input;\n+ return Double.compare(floatVersion, input) == 0 || input == 0.0d ? 0 : 1.d - (floatVersion) / input;\n+ }\n+\n+ public abstract ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost);\n+}", "filename": "src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java", "status": "added" }, { "diff": "@@ -28,14 +28,11 @@\n import org.elasticsearch.common.lucene.docset.DocIdSets;\n \n import java.io.IOException;\n-import java.util.ArrayList;\n-import java.util.Arrays;\n-import java.util.Locale;\n-import java.util.Set;\n+import java.util.*;\n \n /**\n- * A query that allows for a pluggable boost function / filter. If it matches the filter, it will\n- * be boosted by the formula.\n+ * A query that allows for a pluggable boost function / filter. If it matches\n+ * the filter, it will be boosted by the formula.\n */\n public class FiltersFunctionScoreQuery extends Query {\n \n@@ -50,13 +47,17 @@ public FilterFunction(Filter filter, ScoreFunction function) {\n \n @Override\n public boolean equals(Object o) {\n- if (this == o) return true;\n- if (o == null || getClass() != o.getClass()) return false;\n+ if (this == o)\n+ return true;\n+ if (o == null || getClass() != o.getClass())\n+ return false;\n \n FilterFunction that = (FilterFunction) o;\n \n- if (filter != null ? !filter.equals(that.filter) : that.filter != null) return false;\n- if (function != null ? !function.equals(that.function) : that.function != null) return false;\n+ if (filter != null ? !filter.equals(that.filter) : that.filter != null)\n+ return false;\n+ if (function != null ? !function.equals(that.function) : that.function != null)\n+ return false;\n \n return true;\n }\n@@ -69,20 +70,29 @@ public int hashCode() {\n }\n }\n \n- public static enum ScoreMode {First, Avg, Max, Total, Min, Multiply}\n+ public static enum ScoreMode {\n+ First, Avg, Max, Sum, Min, Multiply\n+ }\n \n Query subQuery;\n final FilterFunction[] filterFunctions;\n final ScoreMode scoreMode;\n final float maxBoost;\n \n+ protected CombineFunction combineFunction;\n+\n public FiltersFunctionScoreQuery(Query subQuery, ScoreMode scoreMode, FilterFunction[] filterFunctions, float maxBoost) {\n this.subQuery = subQuery;\n this.scoreMode = scoreMode;\n this.filterFunctions = filterFunctions;\n this.maxBoost = maxBoost;\n+ combineFunction = CombineFunction.MULT;\n }\n \n+ public FiltersFunctionScoreQuery setCombineFunction(CombineFunction combineFunction){\n+ this.combineFunction = combineFunction;\n+ return this;\n+ }\n public Query getSubQuery() {\n return subQuery;\n }\n@@ -94,7 +104,8 @@ public FilterFunction[] getFilterFunctions() {\n @Override\n public Query rewrite(IndexReader reader) throws IOException {\n Query newQ = subQuery.rewrite(reader);\n- if (newQ == subQuery) return this;\n+ if (newQ == subQuery)\n+ return this;\n FiltersFunctionScoreQuery bq = (FiltersFunctionScoreQuery) this.clone();\n bq.subQuery = newQ;\n return bq;\n@@ -148,107 +159,88 @@ public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, bool\n filterFunction.function.setNextReader(context);\n docSets[i] = DocIdSets.toSafeBits(context.reader(), filterFunction.filter.getDocIdSet(context, acceptDocs));\n }\n- return new CustomBoostFactorScorer(this, subQueryScorer, scoreMode, filterFunctions, maxBoost, docSets);\n+ return new CustomBoostFactorScorer(this, subQueryScorer, scoreMode, filterFunctions, maxBoost, docSets, combineFunction);\n }\n \n @Override\n public Explanation explain(AtomicReaderContext context, int doc) throws IOException {\n+\n Explanation subQueryExpl = subQueryWeight.explain(context, doc);\n if (!subQueryExpl.isMatch()) {\n return subQueryExpl;\n }\n+ // First: Gather explanations for all filters\n+ List<ComplexExplanation> filterExplanations = new ArrayList<ComplexExplanation>();\n+ for (FilterFunction filterFunction : filterFunctions) {\n+ Bits docSet = DocIdSets.toSafeBits(context.reader(),\n+ filterFunction.filter.getDocIdSet(context, context.reader().getLiveDocs()));\n+ if (docSet.get(doc)) {\n+ filterFunction.function.setNextReader(context);\n+ Explanation functionExplanation = filterFunction.function.explainScore(doc, subQueryExpl);\n+ double factor = functionExplanation.getValue();\n+ float sc = CombineFunction.toFloat(factor);\n+ ComplexExplanation filterExplanation = new ComplexExplanation(true, sc, \"function score, product of:\");\n+ filterExplanation.addDetail(new Explanation(1.0f, \"match filter: \" + filterFunction.filter.toString()));\n+ filterExplanation.addDetail(functionExplanation);\n+ filterExplanations.add(filterExplanation);\n+ }\n+ }\n+ if (filterExplanations.size() == 0) {\n+ float sc = getBoost() * subQueryExpl.getValue();\n+ Explanation res = new ComplexExplanation(true, sc, \"function score, no filter match, product of:\");\n+ res.addDetail(subQueryExpl);\n+ res.addDetail(new Explanation(getBoost(), \"queryBoost\"));\n+ return res;\n+ }\n \n- if (scoreMode == ScoreMode.First) {\n- for (FilterFunction filterFunction : filterFunctions) {\n- Bits docSet = DocIdSets.toSafeBits(context.reader(), filterFunction.filter.getDocIdSet(context, context.reader().getLiveDocs()));\n- if (docSet.get(doc)) {\n- filterFunction.function.setNextReader(context);\n- Explanation functionExplanation = filterFunction.function.explainFactor(doc);\n- double factor = functionExplanation.getValue();\n- if (factor > maxBoost) {\n- factor = maxBoost;\n- }\n- float sc = FunctionScoreQuery.toFloat(getBoost() * factor);\n- Explanation filterExplanation = new ComplexExplanation(true, sc, \"function score, product of:\");\n- filterExplanation.addDetail(new Explanation(1.0f, \"match filter: \" + filterFunction.filter.toString()));\n- filterExplanation.addDetail(functionExplanation);\n- filterExplanation.addDetail(new Explanation(getBoost(), \"queryBoost\"));\n-\n- // top level score = subquery.score * filter.score (this already has the query boost)\n- float topLevelScore = subQueryExpl.getValue() * sc;\n- Explanation topLevel = new ComplexExplanation(true, topLevelScore, \"function score, score mode [\" + scoreMode.toString().toLowerCase(Locale.ROOT) + \"]\");\n- topLevel.addDetail(subQueryExpl);\n- topLevel.addDetail(filterExplanation);\n- return topLevel;\n- }\n+ // Second: Compute the factor that would have been computed by the\n+ // filters\n+ double factor = 1.0;\n+ switch (scoreMode) {\n+ case First:\n+\n+ factor = filterExplanations.get(0).getValue();\n+ break;\n+ case Max:\n+ double maxFactor = Double.NEGATIVE_INFINITY;\n+ for (int i = 0; i < filterExplanations.size(); i++) {\n+ factor = Math.max(filterExplanations.get(i).getValue(), maxFactor);\n+ }\n+ break;\n+ case Min:\n+ double minFactor = Double.POSITIVE_INFINITY;\n+ for (int i = 0; i < filterExplanations.size(); i++) {\n+ factor = Math.min(filterExplanations.get(i).getValue(), minFactor);\n+ }\n+ break;\n+ case Multiply:\n+ for (int i = 0; i < filterExplanations.size(); i++) {\n+ factor *= filterExplanations.get(i).getValue();\n }\n- } else {\n+ break;\n+ default: // Avg / Total\n+ double totalFactor = 0.0f;\n int count = 0;\n- float total = 0;\n- float multiply = 1;\n- double max = Double.NEGATIVE_INFINITY;\n- double min = Double.POSITIVE_INFINITY;\n- ArrayList<Explanation> filtersExplanations = new ArrayList<Explanation>();\n- for (FilterFunction filterFunction : filterFunctions) {\n- Bits docSet = DocIdSets.toSafeBits(context.reader(), filterFunction.filter.getDocIdSet(context, context.reader().getLiveDocs()));\n- if (docSet.get(doc)) {\n- filterFunction.function.setNextReader(context);\n- Explanation functionExplanation = filterFunction.function.explainFactor(doc);\n- double factor = functionExplanation.getValue();\n- count++;\n- total += factor;\n- multiply *= factor;\n- max = Math.max(factor, max);\n- min = Math.min(factor, min);\n- Explanation res = new ComplexExplanation(true, FunctionScoreQuery.toFloat(factor), \"function score, product of:\");\n- res.addDetail(new Explanation(1.0f, \"match filter: \" + filterFunction.filter.toString()));\n- res.addDetail(functionExplanation);\n- res.addDetail(new Explanation(getBoost(), \"queryBoost\"));\n- filtersExplanations.add(res);\n- }\n+ for (int i = 0; i < filterExplanations.size(); i++) {\n+ totalFactor += filterExplanations.get(i).getValue();\n+ count++;\n }\n- if (count > 0) {\n- double factor = 0;\n- switch (scoreMode) {\n- case Avg:\n- factor = total / count;\n- break;\n- case Max:\n- factor = max;\n- break;\n- case Min:\n- factor = min;\n- break;\n- case Total:\n- factor = total;\n- break;\n- case Multiply:\n- factor = multiply;\n- break;\n- }\n-\n- if (factor > maxBoost) {\n- factor = maxBoost;\n- }\n- float sc = FunctionScoreQuery.toFloat(factor * subQueryExpl.getValue() * getBoost());\n- Explanation res = new ComplexExplanation(true, sc, \"function score, score mode [\" + scoreMode.toString().toLowerCase(Locale.ROOT) + \"]\");\n- res.addDetail(subQueryExpl);\n- for (Explanation explanation : filtersExplanations) {\n- res.addDetail(explanation);\n+ if (count != 0) {\n+ factor = totalFactor;\n+ if (scoreMode == ScoreMode.Avg) {\n+ factor /= count;\n }\n- return res;\n }\n }\n-\n- float sc = getBoost() * subQueryExpl.getValue();\n- Explanation res = new ComplexExplanation(true, sc, \"custom score, no filter match, product of:\");\n- res.addDetail(subQueryExpl);\n- res.addDetail(new Explanation(getBoost(), \"queryBoost\"));\n- return res;\n+ ComplexExplanation factorExplanaition = new ComplexExplanation(true, CombineFunction.toFloat(factor),\n+ \"function score, score mode [\" + scoreMode.toString().toLowerCase(Locale.ROOT) + \"]\");\n+ for (int i = 0; i < filterExplanations.size(); i++) {\n+ factorExplanaition.addDetail(filterExplanations.get(i));\n+ }\n+ return combineFunction.explain(getBoost(), subQueryExpl, factorExplanaition, maxBoost);\n }\n }\n \n-\n static class CustomBoostFactorScorer extends Scorer {\n \n private final float subQueryBoost;\n@@ -257,16 +249,18 @@ static class CustomBoostFactorScorer extends Scorer {\n private final ScoreMode scoreMode;\n private final float maxBoost;\n private final Bits[] docSets;\n+ private final CombineFunction scoreCombiner;\n \n- private CustomBoostFactorScorer(CustomBoostFactorWeight w, Scorer scorer, ScoreMode scoreMode,\n- FilterFunction[] filterFunctions, float maxBoost, Bits[] docSets) throws IOException {\n+ private CustomBoostFactorScorer(CustomBoostFactorWeight w, Scorer scorer, ScoreMode scoreMode, FilterFunction[] filterFunctions,\n+ float maxBoost, Bits[] docSets, CombineFunction scoreCombiner) throws IOException {\n super(w);\n this.subQueryBoost = w.getQuery().getBoost();\n this.scorer = scorer;\n this.scoreMode = scoreMode;\n this.filterFunctions = filterFunctions;\n this.maxBoost = maxBoost;\n this.docSets = docSets;\n+ this.scoreCombiner = scoreCombiner;\n }\n \n @Override\n@@ -288,18 +282,19 @@ public int nextDoc() throws IOException {\n public float score() throws IOException {\n int docId = scorer.docID();\n double factor = 1.0f;\n+ float subQueryScore = scorer.score();\n if (scoreMode == ScoreMode.First) {\n for (int i = 0; i < filterFunctions.length; i++) {\n if (docSets[i].get(docId)) {\n- factor = filterFunctions[i].function.factor(docId);\n+ factor = filterFunctions[i].function.score(docId, subQueryScore);\n break;\n }\n }\n } else if (scoreMode == ScoreMode.Max) {\n double maxFactor = Double.NEGATIVE_INFINITY;\n for (int i = 0; i < filterFunctions.length; i++) {\n if (docSets[i].get(docId)) {\n- maxFactor = Math.max(filterFunctions[i].function.factor(docId), maxFactor);\n+ maxFactor = Math.max(filterFunctions[i].function.score(docId, subQueryScore), maxFactor);\n }\n }\n if (maxFactor != Float.NEGATIVE_INFINITY) {\n@@ -309,7 +304,7 @@ public float score() throws IOException {\n double minFactor = Double.POSITIVE_INFINITY;\n for (int i = 0; i < filterFunctions.length; i++) {\n if (docSets[i].get(docId)) {\n- minFactor = Math.min(filterFunctions[i].function.factor(docId), minFactor);\n+ minFactor = Math.min(filterFunctions[i].function.score(docId, subQueryScore), minFactor);\n }\n }\n if (minFactor != Float.POSITIVE_INFINITY) {\n@@ -318,15 +313,15 @@ public float score() throws IOException {\n } else if (scoreMode == ScoreMode.Multiply) {\n for (int i = 0; i < filterFunctions.length; i++) {\n if (docSets[i].get(docId)) {\n- factor *= filterFunctions[i].function.factor(docId);\n+ factor *= filterFunctions[i].function.score(docId, subQueryScore);\n }\n }\n } else { // Avg / Total\n double totalFactor = 0.0f;\n int count = 0;\n for (int i = 0; i < filterFunctions.length; i++) {\n if (docSets[i].get(docId)) {\n- totalFactor += filterFunctions[i].function.factor(docId);\n+ totalFactor += filterFunctions[i].function.score(docId, subQueryScore);\n count++;\n }\n }\n@@ -337,11 +332,7 @@ public float score() throws IOException {\n }\n }\n }\n- if (factor > maxBoost) {\n- factor = maxBoost;\n- }\n- float score = scorer.score();\n- return FunctionScoreQuery.toFloat(subQueryBoost * score * factor);\n+ return scoreCombiner.combine(subQueryBoost, subQueryScore, factor, maxBoost);\n }\n \n @Override\n@@ -355,10 +346,9 @@ public long cost() {\n }\n }\n \n-\n public String toString(String field) {\n StringBuilder sb = new StringBuilder();\n- sb.append(\"custom score (\").append(subQuery.toString(field)).append(\", functions: [\");\n+ sb.append(\"function score (\").append(subQuery.toString(field)).append(\", functions: [\");\n for (FilterFunction filterFunction : filterFunctions) {\n sb.append(\"{filter(\").append(filterFunction.filter).append(\"), function [\").append(filterFunction.function).append(\"]}\");\n }\n@@ -368,7 +358,8 @@ public String toString(String field) {\n }\n \n public boolean equals(Object o) {\n- if (getClass() != o.getClass()) return false;\n+ if (getClass() != o.getClass())\n+ return false;\n FiltersFunctionScoreQuery other = (FiltersFunctionScoreQuery) o;\n if (this.getBoost() != other.getBoost())\n return false;\n@@ -382,4 +373,3 @@ public int hashCode() {\n return subQuery.hashCode() + 31 * Arrays.hashCode(filterFunctions) ^ Float.floatToIntBits(getBoost());\n }\n }\n-", "filename": "src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java", "status": "modified" }, { "diff": "@@ -37,12 +37,18 @@ public class FunctionScoreQuery extends Query {\n Query subQuery;\n final ScoreFunction function;\n float maxBoost = Float.MAX_VALUE;\n-\n+ CombineFunction combineFunction;\n+ \n public FunctionScoreQuery(Query subQuery, ScoreFunction function) {\n this.subQuery = subQuery;\n this.function = function;\n+ this.combineFunction = function.getDefaultScoreCombiner();\n }\n \n+ public void setCombineFunction(CombineFunction combineFunction) {\n+ this.combineFunction = combineFunction;\n+ }\n+ \n public void setMaxBoost(float maxBoost) {\n this.maxBoost = maxBoost;\n }\n@@ -112,7 +118,7 @@ public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, bool\n return null;\n }\n function.setNextReader(context);\n- return new CustomBoostFactorScorer(this, subQueryScorer, function, maxBoost);\n+ return new CustomBoostFactorScorer(this, subQueryScorer, function, maxBoost, combineFunction);\n }\n \n @Override\n@@ -121,14 +127,9 @@ public Explanation explain(AtomicReaderContext context, int doc) throws IOExcept\n if (!subQueryExpl.isMatch()) {\n return subQueryExpl;\n }\n-\n function.setNextReader(context);\n Explanation functionExplanation = function.explainScore(doc, subQueryExpl);\n- float sc = getBoost() * functionExplanation.getValue();\n- Explanation res = new ComplexExplanation(true, sc, \"function score, product of:\");\n- res.addDetail(functionExplanation);\n- res.addDetail(new Explanation(getBoost(), \"queryBoost\"));\n- return res;\n+ return combineFunction.explain(getBoost(), subQueryExpl, functionExplanation, maxBoost);\n }\n }\n \n@@ -138,14 +139,16 @@ static class CustomBoostFactorScorer extends Scorer {\n private final Scorer scorer;\n private final ScoreFunction function;\n private final float maxBoost;\n+ private final CombineFunction scoreCombiner;\n \n- private CustomBoostFactorScorer(CustomBoostFactorWeight w, Scorer scorer, ScoreFunction function, float maxBoost)\n+ private CustomBoostFactorScorer(CustomBoostFactorWeight w, Scorer scorer, ScoreFunction function, float maxBoost, CombineFunction scoreCombiner)\n throws IOException {\n super(w);\n this.subQueryBoost = w.getQuery().getBoost();\n this.scorer = scorer;\n this.function = function;\n this.maxBoost = maxBoost;\n+ this.scoreCombiner = scoreCombiner;\n }\n \n @Override\n@@ -165,8 +168,8 @@ public int nextDoc() throws IOException {\n \n @Override\n public float score() throws IOException {\n- double factor = function.score(scorer.docID(), scorer.score());\n- return toFloat(subQueryBoost * Math.min(maxBoost, factor));\n+ return scoreCombiner.combine(subQueryBoost, scorer.score(),\n+ function.score(scorer.docID(), scorer.score()), maxBoost);\n }\n \n @Override\n@@ -182,7 +185,7 @@ public long cost() {\n \n public String toString(String field) {\n StringBuilder sb = new StringBuilder();\n- sb.append(\"custom score (\").append(subQuery.toString(field)).append(\",function=\").append(function).append(')');\n+ sb.append(\"function score (\").append(subQuery.toString(field)).append(\",function=\").append(function).append(')');\n sb.append(ToStringUtils.boost(getBoost()));\n return sb.toString();\n }\n@@ -198,15 +201,4 @@ public boolean equals(Object o) {\n public int hashCode() {\n return subQuery.hashCode() + 31 * function.hashCode() ^ Float.floatToIntBits(getBoost());\n }\n-\n- public static float toFloat(double input) {\n- assert deviation(input) <= 0.001 : \"input \" + input + \" out of float scope for function score deviation: \" + deviation(input);\n- return (float) input;\n- }\n- \n- private static double deviation(double input) { // only with assert!\n- float floatVersion = (float)input;\n- return Double.compare(floatVersion, input) == 0 || input == 0.0d ? 0 : 1.d-(floatVersion) / input;\n- }\n-\n }", "filename": "src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java", "status": "modified" }, { "diff": "@@ -25,12 +25,13 @@\n /**\n *\n */\n-public class RandomScoreFunction implements ScoreFunction {\n+public class RandomScoreFunction extends ScoreFunction {\n \n private final PRNG prng;\n private int docBase;\n \n public RandomScoreFunction(long seed) {\n+ super(CombineFunction.MULT);\n this.prng = new PRNG(seed);\n }\n \n@@ -44,26 +45,14 @@ public double score(int docId, float subQueryScore) {\n return prng.random(docBase + docId);\n }\n \n- @Override\n- public double factor(int docId) {\n- return prng.seed;\n- }\n-\n @Override\n public Explanation explainScore(int docId, Explanation subQueryExpl) {\n Explanation exp = new Explanation();\n exp.setDescription(\"random score function (seed: \" + prng.originalSeed + \")\");\n exp.addDetail(subQueryExpl);\n return exp;\n }\n-\n- @Override\n- public Explanation explainFactor(int docId) {\n- Explanation exp = new Explanation();\n- exp.setDescription(\"seed: \" + prng.originalSeed + \")\");\n- return exp;\n- }\n-\n+ \n /**\n * Algorithm based on {@link java.util.Random} except this one is not\n * thread safe", "filename": "src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java", "status": "modified" }, { "diff": "@@ -25,15 +25,22 @@\n /**\n *\n */\n-public interface ScoreFunction {\n+public abstract class ScoreFunction {\n \n- void setNextReader(AtomicReaderContext context);\n+ private final CombineFunction scoreCombiner;\n+ \n+ public abstract void setNextReader(AtomicReaderContext context);\n \n- double score(int docId, float subQueryScore);\n+ public abstract double score(int docId, float subQueryScore);\n \n- double factor(int docId);\n+ public abstract Explanation explainScore(int docId, Explanation subQueryExpl);\n \n- Explanation explainScore(int docId, Explanation subQueryExpl);\n+ public CombineFunction getDefaultScoreCombiner() {\n+ return scoreCombiner;\n+ }\n+\n+ protected ScoreFunction(CombineFunction scoreCombiner) {\n+ this.scoreCombiner = scoreCombiner;\n+ }\n \n- Explanation explainFactor(int docId);\n }", "filename": "src/main/java/org/elasticsearch/common/lucene/search/function/ScoreFunction.java", "status": "modified" }, { "diff": "@@ -26,15 +26,17 @@\n \n import java.util.Map;\n \n-public class ScriptScoreFunction implements ScoreFunction {\n+public class ScriptScoreFunction extends ScoreFunction {\n \n private final String sScript;\n \n private final Map<String, Object> params;\n \n private final SearchScript script;\n+ \n \n public ScriptScoreFunction(String sScript, Map<String, Object> params, SearchScript script) {\n+ super(CombineFunction.REPLACE);\n this.sScript = sScript;\n this.params = params;\n this.script = script;\n@@ -52,13 +54,6 @@ public double score(int docId, float subQueryScore) {\n return script.runAsDouble();\n }\n \n- @Override\n- public double factor(int docId) {\n- // just the factor, so don't provide _score\n- script.setNextDocId(docId);\n- return script.runAsFloat();\n- }\n-\n @Override\n public Explanation explainScore(int docId, Explanation subQueryExpl) {\n Explanation exp;\n@@ -68,19 +63,15 @@ public Explanation explainScore(int docId, Explanation subQueryExpl) {\n exp = ((ExplainableSearchScript) script).explain(subQueryExpl);\n } else {\n double score = score(docId, subQueryExpl.getValue());\n- exp = new Explanation((float)score, \"script score function: composed of:\");\n+ exp = new Explanation(CombineFunction.toFloat(score), \"script score function: composed of:\");\n exp.addDetail(subQueryExpl);\n }\n return exp;\n }\n \n- @Override\n- public Explanation explainFactor(int docId) {\n- return new Explanation((float)factor(docId), \"script_factor\");\n- }\n-\n @Override\n public String toString() {\n return \"script[\" + sScript + \"], params [\" + params + \"]\";\n }\n+\n }\n\\ No newline at end of file", "filename": "src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java", "status": "modified" }, { "diff": "@@ -24,6 +24,7 @@\n import org.elasticsearch.common.Strings;\n import org.elasticsearch.common.bytes.BytesArray;\n import org.elasticsearch.common.bytes.BytesReference;\n+import org.elasticsearch.common.geo.GeoPoint;\n import org.elasticsearch.common.io.BytesStream;\n import org.elasticsearch.common.io.stream.BytesStreamOutput;\n import org.elasticsearch.common.text.Text;\n@@ -1117,6 +1118,11 @@ private void writeValue(Object value) throws IOException {\n generator.writeNumber(((Short) value).shortValue());\n } else if (type == Boolean.class) {\n generator.writeBoolean(((Boolean) value).booleanValue());\n+ } else if (type == GeoPoint.class) {\n+ generator.writeStartObject();\n+ generator.writeNumberField(\"lat\", ((GeoPoint) value).lat());\n+ generator.writeNumberField(\"lon\", ((GeoPoint) value).lon());\n+ generator.writeEndObject();\n } else if (value instanceof Map) {\n writeMap((Map) value);\n } else if (value instanceof Iterable) {", "filename": "src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java", "status": "modified" }, { "diff": "@@ -140,7 +140,7 @@ public Query parse(QueryParseContext parseContext) throws IOException, QueryPars\n } else if (\"min\".equals(sScoreMode)) {\n scoreMode = FiltersFunctionScoreQuery.ScoreMode.Min;\n } else if (\"total\".equals(sScoreMode)) {\n- scoreMode = FiltersFunctionScoreQuery.ScoreMode.Total;\n+ scoreMode = FiltersFunctionScoreQuery.ScoreMode.Sum;\n } else if (\"multiply\".equals(sScoreMode)) {\n scoreMode = FiltersFunctionScoreQuery.ScoreMode.Multiply;\n } else if (\"first\".equals(sScoreMode)) {", "filename": "src/main/java/org/elasticsearch/index/query/CustomFiltersScoreQueryParser.java", "status": "modified" }, { "diff": "@@ -22,6 +22,7 @@\n import org.elasticsearch.common.Nullable;\n import org.elasticsearch.common.geo.builders.ShapeBuilder;\n import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;\n+import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;\n \n import java.util.Collection;\n \n@@ -549,12 +550,30 @@ public static CustomFiltersScoreQueryBuilder customFiltersScoreQuery(QueryBuilde\n public static FunctionScoreQueryBuilder functionScoreQuery(QueryBuilder queryBuilder) {\n return new FunctionScoreQueryBuilder(queryBuilder);\n }\n+ /**\n+ * A query that allows to define a custom scoring function.\n+ *\n+ * @param queryBuilder The query to custom score\n+ * @param scoreFunctionBuilder The score function used to re-score the query\n+ */\n+ public static FunctionScoreQueryBuilder functionScoreQuery(QueryBuilder queryBuilder, ScoreFunctionBuilder function) {\n+ return (new FunctionScoreQueryBuilder(queryBuilder)).add(function);\n+ }\n \n /**\n * A query that allows to define a custom scoring function.\n *\n * @param filterBuilder The query to custom score\n */\n+ public static FunctionScoreQueryBuilder functionScoreQuery(FilterBuilder filterBuilder, ScoreFunctionBuilder function) {\n+ return (new FunctionScoreQueryBuilder(filterBuilder)).add(function);\n+ }\n+ \n+ /**\n+ * A query that allows to define a custom scoring function.\n+ *\n+ * @param filterBuilder The filterBuilder to custom score\n+ */\n public static FunctionScoreQueryBuilder functionScoreQuery(FilterBuilder filterBuilder) {\n return new FunctionScoreQueryBuilder(filterBuilder);\n }", "filename": "src/main/java/org/elasticsearch/index/query/QueryBuilders.java", "status": "modified" }, { "diff": "@@ -45,10 +45,10 @@ public interface DecayFunction {\n * \n * @param scale\n * the raw scale value given by the user\n- * @param value\n+ * @param decay\n * the value which decay function should take once the distance\n * reaches this scale\n * */\n- public double processScale(double scale, double value);\n+ public double processScale(double scale, double decay);\n \n }", "filename": "src/main/java/org/elasticsearch/index/query/functionscore/DecayFunction.java", "status": "modified" }, { "diff": "@@ -26,36 +26,47 @@\n \n public abstract class DecayFunctionBuilder implements ScoreFunctionBuilder {\n \n- protected static final String REFERNECE = \"reference\";\n+ protected static final String ORIGIN = \"origin\";\n protected static final String SCALE = \"scale\";\n- protected static final String SCALE_WEIGHT = \"scale_weight\";\n+ protected static final String DECAY = \"decay\";\n+ protected static final String OFFSET = \"offset\";\n \n private String fieldName;\n- private Object reference;\n+ private Object origin;\n private Object scale;\n- private double scaleWeight = -1;\n+ private double decay = -1;\n+ private Object offset;\n \n- public DecayFunctionBuilder(String fieldName, Object reference, Object scale) {\n+ public DecayFunctionBuilder(String fieldName, Object origin, Object scale) {\n this.fieldName = fieldName;\n- this.reference = reference;\n+ this.origin = origin;\n this.scale = scale;\n }\n- public DecayFunctionBuilder setScaleWeight(double scaleWeight) {\n- if(scaleWeight <=0 || scaleWeight >= 1.0) {\n+\n+ public DecayFunctionBuilder setDecay(double decay) {\n+ if (decay <= 0 || decay >= 1.0) {\n throw new ElasticSearchIllegalStateException(\"scale weight parameter must be in range 0..1!\");\n }\n- this.scaleWeight = scaleWeight;\n+ this.decay = decay;\n+ return this;\n+ }\n+\n+ public DecayFunctionBuilder setOffset(Object offset) {\n+ this.offset = offset;\n return this;\n }\n- \n+\n @Override\n public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {\n builder.startObject(getName());\n builder.startObject(fieldName);\n- builder.field(REFERNECE, reference);\n+ builder.field(ORIGIN, origin);\n builder.field(SCALE, scale);\n- if (scaleWeight > 0) {\n- builder.field(SCALE_WEIGHT, scaleWeight);\n+ if (decay > 0) {\n+ builder.field(DECAY, decay);\n+ }\n+ if (offset != null) {\n+ builder.field(OFFSET, offset);\n }\n builder.endObject();\n builder.endObject();", "filename": "src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java", "status": "modified" }, { "diff": "@@ -26,6 +26,7 @@\n import org.elasticsearch.ElasticSearchParseException;\n import org.elasticsearch.common.geo.GeoDistance;\n import org.elasticsearch.common.geo.GeoPoint;\n+import org.elasticsearch.common.lucene.search.function.CombineFunction;\n import org.elasticsearch.common.lucene.search.function.ScoreFunction;\n import org.elasticsearch.common.unit.DistanceUnit;\n import org.elasticsearch.common.unit.TimeValue;\n@@ -57,25 +58,25 @@\n * {@code}\n * { \n * \"fieldname1\" : {\n- * \"reference\" = \"someValue\", \n+ * \"origin\" = \"someValue\", \n * \"scale\" = \"someValue\"\n * } \n * \n * }\n * </pre>\n * \n- * \"reference\" here refers to the reference point and \"scale\" to the level of\n- * uncertainty you have in your reference.\n+ * \"origin\" here refers to the reference point and \"scale\" to the level of\n+ * uncertainty you have in your origin.\n * <p>\n * \n * For example, you might want to retrieve an event that took place around the\n * 20 May 2010 somewhere near Berlin. You are mainly interested in events that\n * are close to the 20 May 2010 but you are unsure about your guess, maybe it\n- * was a week before or after that. Your \"reference\" for the date field would be\n+ * was a week before or after that. Your \"origin\" for the date field would be\n * \"20 May 2010\" and your \"scale\" would be \"7d\".\n * \n * This class parses the input and creates a scoring function from the\n- * parameters reference and scale.\n+ * parameters origin and scale.\n * <p>\n * To write a new scoring function, create a new class that inherits from this\n * one and implement the getDistanceFuntion(). Furthermore, to create a builder,\n@@ -102,7 +103,7 @@ public abstract class DecayFunctionParser implements ScoreFunctionParser {\n * {@code}\n * { \n * \"fieldname1\" : {\n- * \"reference\" = \"someValue\", \n+ * \"origin\" = \"someValue\", \n * \"scale\" = \"someValue\"\n * } \n * \n@@ -120,7 +121,7 @@ public ScoreFunction parse(QueryParseContext parseContext, XContentParser parser\n currentFieldName = parser.currentName();\n token = parser.nextToken();\n if (token == XContentParser.Token.START_OBJECT) {\n- // parse per field the reference and scale value\n+ // parse per field the origin and scale value\n scoreFunction = parseVariable(currentFieldName, parser, parseContext);\n } else {\n throw new ElasticSearchParseException(\"Malformed score function score parameters.\");\n@@ -132,7 +133,7 @@ public ScoreFunction parse(QueryParseContext parseContext, XContentParser parser\n return scoreFunction;\n }\n \n- // parses reference and scale parameter for field \"fieldName\"\n+ // parses origin and scale parameter for field \"fieldName\"\n private ScoreFunction parseVariable(String fieldName, XContentParser parser, QueryParseContext parseContext) throws IOException {\n \n // now, the field must exist, else we cannot read the value for\n@@ -161,8 +162,9 @@ private ScoreFunction parseNumberVariable(String fieldName, XContentParser parse\n XContentParser.Token token;\n String parameterName = null;\n double scale = 0;\n- double reference = 0;\n- double scaleWeight = 0.5;\n+ double origin = 0;\n+ double decay = 0.5;\n+ double offset = 0.0d;\n boolean scaleFound = false;\n boolean refFound = false;\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n@@ -171,50 +173,55 @@ private ScoreFunction parseNumberVariable(String fieldName, XContentParser parse\n } else if (parameterName.equals(DecayFunctionBuilder.SCALE)) {\n scale = parser.doubleValue();\n scaleFound = true;\n- } else if (parameterName.equals(DecayFunctionBuilder.SCALE_WEIGHT)) {\n- scaleWeight = parser.doubleValue();\n- } else if (parameterName.equals(DecayFunctionBuilder.REFERNECE)) {\n- reference = parser.doubleValue();\n+ } else if (parameterName.equals(DecayFunctionBuilder.DECAY)) {\n+ decay = parser.doubleValue();\n+ } else if (parameterName.equals(DecayFunctionBuilder.ORIGIN)) {\n+ origin = parser.doubleValue();\n refFound = true;\n+ } else if (parameterName.equals(DecayFunctionBuilder.OFFSET)) {\n+ offset = parser.doubleValue();\n } else {\n throw new ElasticSearchParseException(\"Parameter \" + parameterName + \" not supported!\");\n }\n }\n if (!scaleFound || !refFound) {\n- throw new ElasticSearchParseException(\"Both \" + DecayFunctionBuilder.SCALE + \"and \" + DecayFunctionBuilder.REFERNECE\n+ throw new ElasticSearchParseException(\"Both \" + DecayFunctionBuilder.SCALE + \"and \" + DecayFunctionBuilder.ORIGIN\n + \" must be set for numeric fields.\");\n }\n IndexNumericFieldData<?> numericFieldData = parseContext.fieldData().getForField(mapper);\n- return new NumericFieldDataScoreFunction(reference, scale, scaleWeight, getDecayFunction(), numericFieldData);\n+ return new NumericFieldDataScoreFunction(origin, scale, decay, offset, getDecayFunction(), numericFieldData);\n }\n \n private ScoreFunction parseGeoVariable(String fieldName, XContentParser parser, QueryParseContext parseContext,\n GeoStringFieldMapper mapper) throws IOException {\n XContentParser.Token token;\n String parameterName = null;\n- GeoPoint reference = new GeoPoint();\n+ GeoPoint origin = new GeoPoint();\n String scaleString = \"1km\";\n- double scaleWeight = 0.5;\n+ String offsetString = \"0km\";\n+ double decay = 0.5;\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n if (token == XContentParser.Token.FIELD_NAME) {\n parameterName = parser.currentName();\n } else if (parameterName.equals(DecayFunctionBuilder.SCALE)) {\n scaleString = parser.text();\n- } else if (parameterName.equals(DecayFunctionBuilder.REFERNECE)) {\n- reference = GeoPoint.parse(parser);\n- } else if (parameterName.equals(DecayFunctionBuilder.SCALE_WEIGHT)) {\n- scaleWeight = parser.doubleValue();\n+ } else if (parameterName.equals(DecayFunctionBuilder.ORIGIN)) {\n+ origin = GeoPoint.parse(parser);\n+ } else if (parameterName.equals(DecayFunctionBuilder.DECAY)) {\n+ decay = parser.doubleValue();\n+ } else if (parameterName.equals(DecayFunctionBuilder.OFFSET)) {\n+ offsetString = parser.text();\n } else {\n throw new ElasticSearchParseException(\"Parameter \" + parameterName + \" not supported!\");\n }\n }\n- if (reference == null) {\n- throw new ElasticSearchParseException(DecayFunctionBuilder.REFERNECE + \"must be set for geo fields.\");\n+ if (origin == null) {\n+ throw new ElasticSearchParseException(DecayFunctionBuilder.ORIGIN + \"must be set for geo fields.\");\n }\n double scale = DistanceUnit.parse(scaleString, DistanceUnit.METERS, DistanceUnit.METERS);\n-\n+ double offset = DistanceUnit.parse(offsetString, DistanceUnit.METERS, DistanceUnit.METERS);\n IndexGeoPointFieldData<?> indexFieldData = parseContext.fieldData().getForField(mapper);\n- return new GeoFieldDataScoreFunction(reference, scale, scaleWeight, getDecayFunction(), indexFieldData);\n+ return new GeoFieldDataScoreFunction(origin, scale, decay, offset, getDecayFunction(), indexFieldData);\n \n }\n \n@@ -223,47 +230,52 @@ private ScoreFunction parseDateVariable(String fieldName, XContentParser parser,\n XContentParser.Token token;\n String parameterName = null;\n String scaleString = null;\n- String referenceString = null;\n- double scaleWeight = 0.5;\n+ String originString = null;\n+ String offsetString = \"0d\";\n+ double decay = 0.5;\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n if (token == XContentParser.Token.FIELD_NAME) {\n parameterName = parser.currentName();\n } else if (parameterName.equals(DecayFunctionBuilder.SCALE)) {\n scaleString = parser.text();\n- } else if (parameterName.equals(DecayFunctionBuilder.REFERNECE)) {\n- referenceString = parser.text();\n- } else if (parameterName.equals(DecayFunctionBuilder.SCALE_WEIGHT)) {\n- scaleWeight = parser.doubleValue();\n+ } else if (parameterName.equals(DecayFunctionBuilder.ORIGIN)) {\n+ originString = parser.text();\n+ } else if (parameterName.equals(DecayFunctionBuilder.DECAY)) {\n+ decay = parser.doubleValue();\n+ } else if (parameterName.equals(DecayFunctionBuilder.OFFSET)) {\n+ offsetString = parser.text();\n } else {\n throw new ElasticSearchParseException(\"Parameter \" + parameterName + \" not supported!\");\n }\n }\n- long reference = SearchContext.current().nowInMillis();\n- if (referenceString != null) {\n- reference = dateFieldMapper.value(referenceString).longValue();\n+ long origin = SearchContext.current().nowInMillis();\n+ if (originString != null) {\n+ origin = dateFieldMapper.value(originString).longValue();\n }\n \n if (scaleString == null) {\n throw new ElasticSearchParseException(DecayFunctionBuilder.SCALE + \"must be set for geo fields.\");\n }\n TimeValue val = TimeValue.parseTimeValue(scaleString, TimeValue.timeValueHours(24));\n double scale = val.getMillis();\n+ val = TimeValue.parseTimeValue(offsetString, TimeValue.timeValueHours(24));\n+ double offset = val.getMillis();\n IndexNumericFieldData<?> numericFieldData = parseContext.fieldData().getForField(dateFieldMapper);\n- return new NumericFieldDataScoreFunction(reference, scale, scaleWeight, getDecayFunction(), numericFieldData);\n+ return new NumericFieldDataScoreFunction(origin, scale, decay, offset, getDecayFunction(), numericFieldData);\n }\n \n static class GeoFieldDataScoreFunction extends AbstractDistanceScoreFunction {\n \n- private final GeoPoint reference;\n+ private final GeoPoint origin;\n private final IndexGeoPointFieldData<?> fieldData;\n private GeoPointValues geoPointValues = null;\n \n private static final GeoDistance distFunction = GeoDistance.fromString(\"arc\");\n \n- public GeoFieldDataScoreFunction(GeoPoint reference, double scale, double scaleWeight, DecayFunction func,\n+ public GeoFieldDataScoreFunction(GeoPoint origin, double scale, double decay, double offset, DecayFunction func,\n IndexGeoPointFieldData<?> fieldData) {\n- super(scale, scaleWeight, func);\n- this.reference = reference;\n+ super(scale, decay, offset, func);\n+ this.origin = origin;\n this.fieldData = fieldData;\n }\n \n@@ -274,35 +286,40 @@ public void setNextReader(AtomicReaderContext context) {\n \n @Override\n protected double distance(int docId) {\n- GeoPoint other = geoPointValues.getValueMissing(docId, reference);\n- return distFunction.calculate(reference.lat(), reference.lon(), other.lat(), other.lon(), DistanceUnit.METERS);\n+ GeoPoint other = geoPointValues.getValueMissing(docId, origin);\n+ double distance = Math.abs(distFunction.calculate(origin.lat(), origin.lon(), other.lat(), other.lon(),\n+ DistanceUnit.METERS)) - offset;\n+ if (distance < 0.0d) {\n+ distance = 0.0d;\n+ }\n+ return distance;\n }\n \n @Override\n protected String getDistanceString(int docId) {\n- final GeoPoint other = geoPointValues.getValueMissing(docId, reference);\n- return \"arcDistance(\" + other + \"(=doc value), \" + reference + \") = \" + distance(docId);\n-\n+ final GeoPoint other = geoPointValues.getValueMissing(docId, origin);\n+ return \"arcDistance(\" + other + \"(=doc value), \" + origin + \"(=origin)) - \" + offset\n+ + \"(=offset) < 0.0 ? 0.0: arcDistance(\" + other + \"(=doc value), \" + origin + \"(=origin)) - \" + offset\n+ + \"(=offset)\";\n }\n \n @Override\n protected String getFieldName() {\n return fieldData.getFieldNames().fullName();\n }\n-\n }\n \n static class NumericFieldDataScoreFunction extends AbstractDistanceScoreFunction {\n \n private final IndexNumericFieldData<?> fieldData;\n- private final double reference;\n+ private final double origin;\n private DoubleValues doubleValues;\n \n- public NumericFieldDataScoreFunction(double reference, double scale, double scaleWeight, DecayFunction func,\n+ public NumericFieldDataScoreFunction(double origin, double scale, double decay, double offset, DecayFunction func,\n IndexNumericFieldData<?> fieldData) {\n- super(scale, scaleWeight, func);\n+ super(scale, decay, offset, func);\n this.fieldData = fieldData;\n- this.reference = reference;\n+ this.origin = origin;\n }\n \n public void setNextReader(AtomicReaderContext context) {\n@@ -311,12 +328,18 @@ public void setNextReader(AtomicReaderContext context) {\n \n @Override\n protected double distance(int docId) {\n- return doubleValues.getValueMissing(docId, reference) - reference;\n+ double distance = Math.abs(doubleValues.getValueMissing(docId, origin) - origin) - offset;\n+ if (distance < 0.0) {\n+ distance = 0.0;\n+ }\n+ return distance;\n }\n \n @Override\n protected String getDistanceString(int docId) {\n- return \"(\" + doubleValues.getValueMissing(docId, reference) + \"(=doc value) - \" + reference + \")\";\n+ return \"Math.abs(\" + doubleValues.getValueMissing(docId, origin) + \"(=doc value) - \" + origin + \"(=origin)) - \"\n+ + offset + \"(=offset) < 0.0 ? 0.0: Math.abs(\" + doubleValues.getValueMissing(docId, origin) + \"(=doc value) - \"\n+ + origin + \") - \" + offset + \"(=offset)\";\n }\n \n @Override\n@@ -329,36 +352,37 @@ protected String getFieldName() {\n * This is the base class for scoring a single field.\n * \n * */\n- public static abstract class AbstractDistanceScoreFunction implements ScoreFunction {\n+ public static abstract class AbstractDistanceScoreFunction extends ScoreFunction {\n \n private final double scale;\n+ protected final double offset;\n private final DecayFunction func;\n \n- public AbstractDistanceScoreFunction(double userSuppiedScale, double userSuppliedScaleWeight, DecayFunction func) {\n+ public AbstractDistanceScoreFunction(double userSuppiedScale, double decay, double offset, DecayFunction func) {\n+ super(CombineFunction.MULT);\n if (userSuppiedScale <= 0.0) {\n throw new ElasticSearchIllegalArgumentException(FunctionScoreQueryParser.NAME + \" : scale must be > 0.0.\");\n }\n- if (userSuppliedScaleWeight <= 0.0 || userSuppliedScaleWeight >= 1.0) {\n+ if (decay <= 0.0 || decay >= 1.0) {\n throw new ElasticSearchIllegalArgumentException(FunctionScoreQueryParser.NAME\n- + \" : scale_weight must be in the range [0..1].\");\n+ + \" : decay must be in the range [0..1].\");\n }\n- this.scale = func.processScale(userSuppiedScale, userSuppliedScaleWeight);\n+ this.scale = func.processScale(userSuppiedScale, decay);\n this.func = func;\n+ if (offset < 0.0d) {\n+ throw new ElasticSearchIllegalArgumentException(FunctionScoreQueryParser.NAME + \" : offset must be > 0.0\");\n+ }\n+ this.offset = offset;\n }\n \n @Override\n public double score(int docId, float subQueryScore) {\n- return (subQueryScore * factor(docId));\n- }\n-\n- @Override\n- public double factor(int docId) {\n double value = distance(docId);\n return func.evaluate(value, scale);\n }\n \n /**\n- * This function computes the distance from a defined reference. Since\n+ * This function computes the distance from a defined origin. Since\n * the value of the document is read from the index, it cannot be\n * guaranteed that the value actually exists. If it does not, we assume\n * the user handles this case in the query and return 0.\n@@ -372,19 +396,9 @@ public double factor(int docId) {\n @Override\n public Explanation explainScore(int docId, Explanation subQueryExpl) {\n ComplexExplanation ce = new ComplexExplanation();\n- ce.setValue((float) score(docId, subQueryExpl.getValue()));\n- ce.setMatch(true);\n- ce.setDescription(\"subQueryScore * Function for field \" + getFieldName() + \":\");\n- ce.addDetail(func.explainFunction(getDistanceString(docId), distance(docId), scale));\n- return ce;\n- }\n-\n- @Override\n- public Explanation explainFactor(int docId) {\n- ComplexExplanation ce = new ComplexExplanation();\n- ce.setValue((float) factor(docId));\n+ ce.setValue(CombineFunction.toFloat(score(docId, subQueryExpl.getValue())));\n ce.setMatch(true);\n- ce.setDescription(\"subQueryScore * Function for field \" + getFieldName() + \":\");\n+ ce.setDescription(\"Function for field \" + getFieldName() + \":\");\n ce.addDetail(func.explainFunction(getDistanceString(docId), distance(docId), scale));\n return ce;\n }", "filename": "src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java", "status": "modified" }, { "diff": "@@ -44,6 +44,8 @@ public class FunctionScoreQueryBuilder extends BaseQueryBuilder implements Boost\n private Float maxBoost;\n \n private String scoreMode;\n+ \n+ private String boostMode;\n \n private ArrayList<FilterBuilder> filters = new ArrayList<FilterBuilder>();\n private ArrayList<ScoreFunctionBuilder> scoreFunctions = new ArrayList<ScoreFunctionBuilder>();\n@@ -74,6 +76,11 @@ public FunctionScoreQueryBuilder scoreMode(String scoreMode) {\n this.scoreMode = scoreMode;\n return this;\n }\n+ \n+ public FunctionScoreQueryBuilder boostMode(String boostMode) {\n+ this.boostMode = boostMode;\n+ return this;\n+ }\n \n public FunctionScoreQueryBuilder maxBoost(float maxBoost) {\n this.maxBoost = maxBoost;\n@@ -124,6 +131,9 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep\n if (scoreMode != null) {\n builder.field(\"score_mode\", scoreMode);\n }\n+ if (boostMode != null) {\n+ builder.field(\"boost_mode\", boostMode);\n+ }\n if (maxBoost != null) {\n builder.field(\"max_boost\", maxBoost);\n }", "filename": "src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java", "status": "modified" }, { "diff": "@@ -25,6 +25,7 @@\n import org.elasticsearch.common.inject.Inject;\n import org.elasticsearch.common.lucene.search.MatchAllDocsFilter;\n import org.elasticsearch.common.lucene.search.XConstantScoreQuery;\n+import org.elasticsearch.common.lucene.search.function.CombineFunction;\n import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery;\n import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;\n import org.elasticsearch.common.lucene.search.function.ScoreFunction;\n@@ -67,6 +68,7 @@ public Query parse(QueryParseContext parseContext) throws IOException, QueryPars\n \n String currentFieldName = null;\n XContentParser.Token token;\n+ CombineFunction combineFunction = CombineFunction.MULT;\n while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {\n if (token == XContentParser.Token.FIELD_NAME) {\n currentFieldName = parser.currentName();\n@@ -76,6 +78,8 @@ public Query parse(QueryParseContext parseContext) throws IOException, QueryPars\n query = new XConstantScoreQuery(parseContext.parseInnerFilter());\n } else if (\"score_mode\".equals(currentFieldName) || \"scoreMode\".equals(currentFieldName)) {\n scoreMode = parseScoreMode(parseContext, parser);\n+ } else if (\"boost_mode\".equals(currentFieldName) || \"boostMode\".equals(currentFieldName)) {\n+ combineFunction = parseBoostMode(parseContext, parser);\n } else if (\"max_boost\".equals(currentFieldName) || \"maxBoost\".equals(currentFieldName)) {\n maxBoost = parser.floatValue();\n } else if (\"boost\".equals(currentFieldName)) {\n@@ -101,13 +105,19 @@ public Query parse(QueryParseContext parseContext) throws IOException, QueryPars\n // provided. In this case we create a FunctionScoreQuery.\n if (filterFunctions.size() == 1 && filterFunctions.get(0).filter == null) {\n FunctionScoreQuery theQuery = new FunctionScoreQuery(query, filterFunctions.get(0).function);\n+ if (combineFunction != null) {\n+ theQuery.setCombineFunction(combineFunction);\n+ }\n theQuery.setBoost(boost);\n theQuery.setMaxBoost(maxBoost);\n return theQuery;\n // in all other cases we create a FiltersFunctionScoreQuery.\n } else {\n FiltersFunctionScoreQuery functionScoreQuery = new FiltersFunctionScoreQuery(query, scoreMode,\n filterFunctions.toArray(new FiltersFunctionScoreQuery.FilterFunction[filterFunctions.size()]), maxBoost);\n+ if (combineFunction != null) {\n+ functionScoreQuery.setCombineFunction(combineFunction);\n+ }\n functionScoreQuery.setBoost(boost);\n return functionScoreQuery;\n }\n@@ -156,8 +166,8 @@ private FiltersFunctionScoreQuery.ScoreMode parseScoreMode(QueryParseContext par\n return FiltersFunctionScoreQuery.ScoreMode.Max;\n } else if (\"min\".equals(scoreMode)) {\n return FiltersFunctionScoreQuery.ScoreMode.Min;\n- } else if (\"total\".equals(scoreMode)) {\n- return FiltersFunctionScoreQuery.ScoreMode.Total;\n+ } else if (\"sum\".equals(scoreMode)) {\n+ return FiltersFunctionScoreQuery.ScoreMode.Sum;\n } else if (\"multiply\".equals(scoreMode)) {\n return FiltersFunctionScoreQuery.ScoreMode.Multiply;\n } else if (\"first\".equals(scoreMode)) {\n@@ -166,4 +176,14 @@ private FiltersFunctionScoreQuery.ScoreMode parseScoreMode(QueryParseContext par\n throw new QueryParsingException(parseContext.index(), NAME + \" illegal score_mode [\" + scoreMode + \"]\");\n }\n }\n+\n+ private CombineFunction parseBoostMode(QueryParseContext parseContext, XContentParser parser) throws IOException {\n+ String boostMode = parser.text();\n+ for (CombineFunction cf : CombineFunction.values()) {\n+ if (cf.getName().equals(boostMode)) {\n+ return cf;\n+ }\n+ }\n+ throw new QueryParsingException(parseContext.index(), NAME + \" illegal boost_mode [\" + boostMode + \"]\");\n+ }\n }\n\\ No newline at end of file", "filename": "src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryParser.java", "status": "modified" }, { "diff": "@@ -0,0 +1,96 @@\n+/*\n+ * Licensed to ElasticSearch and Shay Banon under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. ElasticSearch licenses this\n+ * file to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.elasticsearch.index.query.functionscore;\n+\n+import org.elasticsearch.index.query.functionscore.exp.ExponentialDecayFunctionBuilder;\n+import org.elasticsearch.index.query.functionscore.factor.FactorBuilder;\n+import org.elasticsearch.index.query.functionscore.gauss.GaussDecayFunctionBuilder;\n+import org.elasticsearch.index.query.functionscore.lin.LinearDecayFunctionBuilder;\n+import org.elasticsearch.index.query.functionscore.random.RandomScoreFunctionBuilder;\n+import org.elasticsearch.index.query.functionscore.script.ScriptScoreFunctionBuilder;\n+\n+import java.util.Map;\n+\n+public class ScoreFunctionBuilders {\n+ /**\n+ * A query that match on all documents.\n+ */\n+ public static ExponentialDecayFunctionBuilder exponentialDecayFunction(String fieldName, Object origin, Object scale) {\n+ return new ExponentialDecayFunctionBuilder(fieldName, origin, scale);\n+ }\n+\n+ public static ExponentialDecayFunctionBuilder exponentialDecayFunction(String fieldName, Object origin, Object scale, double decay) {\n+ return (ExponentialDecayFunctionBuilder) (new ExponentialDecayFunctionBuilder(fieldName, origin, scale)).setDecay(decay);\n+ }\n+\n+ public static ExponentialDecayFunctionBuilder exponentialDecayFunction(String fieldName, Object origin, Object scale, double decay,\n+ double offset) {\n+ return (ExponentialDecayFunctionBuilder) (new ExponentialDecayFunctionBuilder(fieldName, origin, scale)).setDecay(decay).setOffset(\n+ offset);\n+ }\n+\n+ public static GaussDecayFunctionBuilder gaussDecayFunction(String fieldName, Object origin, Object scale) {\n+ return new GaussDecayFunctionBuilder(fieldName, origin, scale);\n+ }\n+\n+ public static GaussDecayFunctionBuilder gaussDecayFunction(String fieldName, Object origin, Object scale, double decay) {\n+ return (GaussDecayFunctionBuilder) (new GaussDecayFunctionBuilder(fieldName, origin, scale)).setDecay(decay);\n+ }\n+\n+ public static GaussDecayFunctionBuilder gaussDecayFunction(String fieldName, Object origin, Object scale, double decay, double offset) {\n+ return (GaussDecayFunctionBuilder) (new GaussDecayFunctionBuilder(fieldName, origin, scale)).setDecay(decay).setOffset(offset);\n+ }\n+\n+ public static LinearDecayFunctionBuilder linearDecayFunction(String fieldName, Object origin, Object scale) {\n+ return new LinearDecayFunctionBuilder(fieldName, origin, scale);\n+ }\n+\n+ public static LinearDecayFunctionBuilder linearDecayFunction(String fieldName, Object origin, Object scale, double decay) {\n+ return (LinearDecayFunctionBuilder) (new LinearDecayFunctionBuilder(fieldName, origin, scale)).setDecay(decay);\n+ }\n+\n+ public static LinearDecayFunctionBuilder linearDecayFunction(String fieldName, Object origin, Object scale, double decay, double offset) {\n+ return (LinearDecayFunctionBuilder) (new LinearDecayFunctionBuilder(fieldName, origin, scale)).setDecay(decay).setOffset(offset);\n+ }\n+\n+ public static ScriptScoreFunctionBuilder scriptFunction(String script) {\n+ return (new ScriptScoreFunctionBuilder()).script(script);\n+ }\n+\n+ public static ScriptScoreFunctionBuilder scriptFunction(String script, String lang) {\n+ return (new ScriptScoreFunctionBuilder()).script(script).lang(lang);\n+ }\n+\n+ public static ScriptScoreFunctionBuilder scriptFunction(String script, String lang, Map<String, Object> params) {\n+ return (new ScriptScoreFunctionBuilder()).script(script).lang(lang).params(params);\n+ }\n+\n+ public static ScriptScoreFunctionBuilder scriptFunction(String script, Map<String, Object> params) {\n+ return (new ScriptScoreFunctionBuilder()).script(script).params(params);\n+ }\n+\n+ public static FactorBuilder factorFunction(float boost) {\n+ return (new FactorBuilder()).boostFactor(boost);\n+ }\n+\n+ public static RandomScoreFunctionBuilder randomFunction(long seed) {\n+ return (new RandomScoreFunctionBuilder()).seed(seed);\n+ }\n+}", "filename": "src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionBuilders.java", "status": "added" }, { "diff": "@@ -24,8 +24,8 @@\n \n public class ExponentialDecayFunctionBuilder extends DecayFunctionBuilder {\n \n- public ExponentialDecayFunctionBuilder(String fieldName, Object reference, Object scale) {\n- super(fieldName, reference, scale);\n+ public ExponentialDecayFunctionBuilder(String fieldName, Object origin, Object scale) {\n+ super(fieldName, origin, scale);\n }\n \n @Override", "filename": "src/main/java/org/elasticsearch/index/query/functionscore/exp/ExponentialDecayFunctionBuilder.java", "status": "modified" }, { "diff": "@@ -44,20 +44,20 @@ final static class ExponentialDecayScoreFunction implements DecayFunction {\n \n @Override\n public double evaluate(double value, double scale) {\n- return Math.exp(scale * Math.abs(value));\n+ return Math.exp(scale * value);\n }\n \n @Override\n public Explanation explainFunction(String valueExpl, double value, double scale) {\n ComplexExplanation ce = new ComplexExplanation();\n ce.setValue((float) evaluate(value, scale));\n- ce.setDescription(\"exp(- abs(\" + valueExpl + \") * \" + -1*scale + \")\");\n+ ce.setDescription(\"exp(- abs(\" + valueExpl + \") * \" + -1 * scale + \")\");\n return ce;\n }\n \n @Override\n- public double processScale(double scale, double value) {\n- return Math.log(value) / scale;\n+ public double processScale(double scale, double decay) {\n+ return Math.log(decay) / scale;\n }\n \n }", "filename": "src/main/java/org/elasticsearch/index/query/functionscore/exp/ExponentialDecayFunctionParser.java", "status": "modified" }, { "diff": "@@ -24,8 +24,8 @@\n \n public class GaussDecayFunctionBuilder extends DecayFunctionBuilder {\n \n- public GaussDecayFunctionBuilder(String fieldName, Object reference, Object scale) {\n- super(fieldName, reference, scale);\n+ public GaussDecayFunctionBuilder(String fieldName, Object origin, Object scale) {\n+ super(fieldName, origin, scale);\n }\n \n @Override", "filename": "src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionBuilder.java", "status": "modified" }, { "diff": "@@ -47,13 +47,13 @@ public double evaluate(double value, double scale) {\n public Explanation explainFunction(String valueExpl, double value, double scale) {\n ComplexExplanation ce = new ComplexExplanation();\n ce.setValue((float) evaluate(value, scale));\n- ce.setDescription(\"-exp(-0.5*pow(\" + valueExpl + \",2.0)/\" + -1*scale + \")\");\n+ ce.setDescription(\"-exp(-0.5*pow(\" + valueExpl + \",2.0)/\" + -1 * scale + \")\");\n return ce;\n }\n \n @Override\n- public double processScale(double scale, double value) {\n- return 0.5 * Math.pow(scale, 2.0) / Math.log(value);\n+ public double processScale(double scale, double decay) {\n+ return 0.5 * Math.pow(scale, 2.0) / Math.log(decay);\n }\n }\n ", "filename": "src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionParser.java", "status": "modified" }, { "diff": "@@ -23,8 +23,8 @@\n \n public class LinearDecayFunctionBuilder extends DecayFunctionBuilder {\n \n- public LinearDecayFunctionBuilder(String fieldName, Object reference, Object scale) {\n- super(fieldName, reference, scale);\n+ public LinearDecayFunctionBuilder(String fieldName, Object origin, Object scale) {\n+ super(fieldName, origin, scale);\n }\n \n @Override", "filename": "src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionBuilder.java", "status": "modified" }, { "diff": "@@ -43,8 +43,8 @@ public DecayFunction getDecayFunction() {\n final static class LinearDecayScoreFunction implements DecayFunction {\n \n @Override\n- public double evaluate(double value, double scale) {\n- return Math.max(0.0, (scale - Math.abs(value)) / scale);\n+ public double evaluate(double value, double scale) { \n+ return Math.max(0.0, (scale - value) / scale);\n }\n \n @Override\n@@ -56,8 +56,8 @@ public Explanation explainFunction(String valueExpl, double value, double scale)\n }\n \n @Override\n- public double processScale(double scale, double value) {\n- return scale / (1.0 - value);\n+ public double processScale(double scale, double decay) {\n+ return scale / (1.0 - decay);\n }\n \n }", "filename": "src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionParser.java", "status": "modified" }, { "diff": "@@ -39,7 +39,6 @@\n import org.elasticsearch.index.engine.VersionConflictEngineException;\n import org.elasticsearch.index.query.FilterBuilders;\n import org.elasticsearch.index.query.QueryBuilders;\n-import org.elasticsearch.index.query.functionscore.script.ScriptScoreFunctionBuilder;\n import org.elasticsearch.test.integration.AbstractSharedClusterTest;\n import org.junit.Test;\n \n@@ -50,6 +49,7 @@\n import static org.elasticsearch.common.xcontent.XContentFactory.*;\n import static org.elasticsearch.index.query.FilterBuilders.termFilter;\n import static org.elasticsearch.index.query.QueryBuilders.*;\n+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;\n import static org.hamcrest.Matchers.*;\n \n@@ -1159,7 +1159,7 @@ public void testPercolateScoreAndSorting() throws Exception {\n .setScore(true)\n .setSize(size)\n .setPercolateDoc(docBuilder().setDoc(\"field\", \"value\"))\n- .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery()).add(new ScriptScoreFunctionBuilder().script(\"doc['level'].value\")))\n+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction(\"doc['level'].value\")))\n .execute().actionGet();\n assertNoFailures(response);\n assertThat(response.getCount(), equalTo(numQueries));\n@@ -1178,7 +1178,7 @@ public void testPercolateScoreAndSorting() throws Exception {\n .setSort(true)\n .setSize(size)\n .setPercolateDoc(docBuilder().setDoc(\"field\", \"value\"))\n- .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery()).add(new ScriptScoreFunctionBuilder().script(\"doc['level'].value\")))\n+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction(\"doc['level'].value\")))\n .execute().actionGet();\n assertNoFailures(response);\n assertThat(response.getCount(), equalTo(numQueries));\n@@ -1203,8 +1203,7 @@ public void testPercolateScoreAndSorting() throws Exception {\n .setSort(true)\n .setSize(size)\n .setPercolateDoc(docBuilder().setDoc(\"field\", \"value\"))\n- .setPercolateQuery(QueryBuilders.functionScoreQuery(matchQuery(\"field1\", value))\n- .add(new ScriptScoreFunctionBuilder().script(\"doc['level'].value\")))\n+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchQuery(\"field1\", value), scriptFunction(\"doc['level'].value\")))\n .execute().actionGet();\n assertNoFailures(response);\n \n@@ -1237,7 +1236,7 @@ public void testPercolateSortingWithNoSize() throws Exception {\n .setSort(true)\n .setSize(2)\n .setPercolateDoc(docBuilder().setDoc(\"field\", \"value\"))\n- .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery()).add(new ScriptScoreFunctionBuilder().script(\"doc['level'].value\")))\n+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction(\"doc['level'].value\")))\n .execute().actionGet();\n assertNoFailures(response);\n assertThat(response.getCount(), equalTo(2l));\n@@ -1249,7 +1248,7 @@ public void testPercolateSortingWithNoSize() throws Exception {\n response = client().preparePercolate().setIndices(\"my-index\").setDocumentType(\"my-type\")\n .setSort(true)\n .setPercolateDoc(docBuilder().setDoc(\"field\", \"value\"))\n- .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery()).add(new ScriptScoreFunctionBuilder().script(\"doc['level'].value\")))\n+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction(\"doc['level'].value\")))\n .execute().actionGet();\n assertThat(response.getCount(), equalTo(0l));\n assertThat(response.getSuccessfulShards(), equalTo(3));\n@@ -1269,7 +1268,7 @@ public void testPercolateOnEmptyIndex() throws Exception {\n .setSort(true)\n .setSize(2)\n .setPercolateDoc(docBuilder().setDoc(\"field\", \"value\"))\n- .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery()).add(new ScriptScoreFunctionBuilder().script(\"doc['level'].value\")))\n+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction(\"doc['level'].value\")))\n .execute().actionGet();\n assertNoFailures(response);\n assertThat(response.getCount(), equalTo(0l));\n@@ -1290,7 +1289,7 @@ public void testPercolateNotEmptyIndexButNoRefresh() throws Exception {\n .setSort(true)\n .setSize(2)\n .setPercolateDoc(docBuilder().setDoc(\"field\", \"value\"))\n- .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery()).add(new ScriptScoreFunctionBuilder().script(\"doc['level'].value\")))\n+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction(\"doc['level'].value\")))\n .execute().actionGet();\n assertNoFailures(response);\n assertThat(response.getCount(), equalTo(0l));", "filename": "src/test/java/org/elasticsearch/test/integration/percolator/SimplePercolatorTests.java", "status": "modified" }, { "diff": "", "filename": "src/test/java/org/elasticsearch/test/integration/search/child/SimpleChildQuerySearchTests.java", "status": "modified" }, { "diff": "@@ -19,16 +19,13 @@\n \n package org.elasticsearch.test.integration.search.customscore;\n \n-\n import org.apache.lucene.search.Explanation;\n import org.elasticsearch.ElasticSearchException;\n import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;\n import org.elasticsearch.action.search.SearchResponse;\n import org.elasticsearch.action.search.SearchType;\n import org.elasticsearch.common.Priority;\n import org.elasticsearch.index.query.FilterBuilders;\n-import org.elasticsearch.index.query.functionscore.factor.FactorBuilder;\n-import org.elasticsearch.index.query.functionscore.script.ScriptScoreFunctionBuilder;\n import org.elasticsearch.test.integration.AbstractSharedClusterTest;\n import org.junit.Test;\n \n@@ -40,13 +37,14 @@\n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n import static org.elasticsearch.index.query.FilterBuilders.termFilter;\n import static org.elasticsearch.index.query.QueryBuilders.*;\n+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.factorFunction;\n+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;\n import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;\n import static org.hamcrest.Matchers.anyOf;\n import static org.hamcrest.Matchers.equalTo;\n \n-\n /**\n *\n */\n@@ -55,8 +53,10 @@ public class CustomScoreSearchTests extends AbstractSharedClusterTest {\n @Test\n public void testScoreExplainBug_2283() throws Exception {\n client().admin().indices().prepareDelete().execute().actionGet();\n- client().admin().indices().prepareCreate(\"test\").setSettings(settingsBuilder().put(\"index.number_of_shards\", 1)).execute().actionGet();\n- ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth(\"test\").setWaitForYellowStatus().execute().actionGet();\n+ client().admin().indices().prepareCreate(\"test\").setSettings(settingsBuilder().put(\"index.number_of_shards\", 1)).execute()\n+ .actionGet();\n+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth(\"test\").setWaitForYellowStatus().execute()\n+ .actionGet();\n assertThat(healthResponse.isTimedOut(), equalTo(false));\n \n client().prepareIndex(\"test\", \"type\", \"1\").setSource(\"field\", \"value1\", \"color\", \"red\").execute().actionGet();\n@@ -66,73 +66,72 @@ public void testScoreExplainBug_2283() throws Exception {\n \n client().admin().indices().prepareRefresh().execute().actionGet();\n \n- SearchResponse searchResponse = client().prepareSearch(\"test\")\n- .setQuery(customFiltersScoreQuery(matchAllQuery())\n- .add(termFilter(\"field\", \"value4\"), \"2\")\n- .add(termFilter(\"field\", \"value2\"), \"3\")\n- .scoreMode(\"first\"))\n- .setExplain(true)\n- .execute().actionGet();\n+ SearchResponse searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ customFiltersScoreQuery(matchAllQuery()).add(termFilter(\"field\", \"value4\"), \"2\")\n+ .add(termFilter(\"field\", \"value2\"), \"3\").scoreMode(\"first\")).setExplain(true).execute().actionGet();\n \n assertNoFailures(searchResponse);\n \n assertThat(searchResponse.getHits().totalHits(), equalTo(4l));\n assertThat(searchResponse.getHits().getAt(0).id(), equalTo(\"2\"));\n assertThat(searchResponse.getHits().getAt(0).score(), equalTo(3.0f));\n- logger.info(\"--> Hit[0] {} Explanation:\\n {}\", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());\n+ logger.info(\"--> Hit[0] {} Explanation:\\n {}\", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0)\n+ .explanation());\n Explanation explanation = searchResponse.getHits().getAt(0).explanation();\n assertNotNull(explanation);\n assertThat(explanation.isMatch(), equalTo(true));\n assertThat(explanation.getValue(), equalTo(3f));\n- assertThat(explanation.getDescription(), equalTo(\"function score, score mode [first]\"));\n+ assertThat(explanation.getDescription(), equalTo(\"function score, product of:\"));\n \n- assertThat(explanation.getDetails().length, equalTo(2));\n+ assertThat(explanation.getDetails().length, equalTo(3));\n assertThat(explanation.getDetails()[0].isMatch(), equalTo(true));\n assertThat(explanation.getDetails()[0].getValue(), equalTo(1f));\n assertThat(explanation.getDetails()[0].getDetails().length, equalTo(2));\n assertThat(explanation.getDetails()[1].isMatch(), equalTo(true));\n assertThat(explanation.getDetails()[1].getValue(), equalTo(3f));\n- assertThat(explanation.getDetails()[1].getDetails().length, equalTo(3));\n+ assertThat(explanation.getDetails()[1].getDetails().length, equalTo(2));\n \n // Same query but with boost\n- searchResponse = client().prepareSearch(\"test\")\n- .setQuery(customFiltersScoreQuery(matchAllQuery())\n- .add(termFilter(\"field\", \"value4\"), \"2\")\n- .add(termFilter(\"field\", \"value2\"), \"3\")\n- .boost(2)\n- .scoreMode(\"first\"))\n- .setExplain(true)\n- .execute().actionGet();\n+ searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ customFiltersScoreQuery(matchAllQuery()).add(termFilter(\"field\", \"value4\"), \"2\")\n+ .add(termFilter(\"field\", \"value2\"), \"3\").boost(2).scoreMode(\"first\")).setExplain(true).execute()\n+ .actionGet();\n \n assertNoFailures(searchResponse);\n \n assertThat(searchResponse.getHits().totalHits(), equalTo(4l));\n assertThat(searchResponse.getHits().getAt(0).id(), equalTo(\"2\"));\n assertThat(searchResponse.getHits().getAt(0).score(), equalTo(6f));\n- logger.info(\"--> Hit[0] {} Explanation:\\n {}\", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());\n+ logger.info(\"--> Hit[0] {} Explanation:\\n {}\", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0)\n+ .explanation());\n explanation = searchResponse.getHits().getAt(0).explanation();\n assertNotNull(explanation);\n assertThat(explanation.isMatch(), equalTo(true));\n assertThat(explanation.getValue(), equalTo(6f));\n- assertThat(explanation.getDescription(), equalTo(\"function score, score mode [first]\"));\n+ assertThat(explanation.getDescription(), equalTo(\"function score, product of:\"));\n \n- assertThat(explanation.getDetails().length, equalTo(2));\n+ assertThat(explanation.getDetails().length, equalTo(3));\n assertThat(explanation.getDetails()[0].isMatch(), equalTo(true));\n assertThat(explanation.getDetails()[0].getValue(), equalTo(1f));\n assertThat(explanation.getDetails()[0].getDetails().length, equalTo(2));\n assertThat(explanation.getDetails()[1].isMatch(), equalTo(true));\n- assertThat(explanation.getDetails()[1].getValue(), equalTo(6f));\n- assertThat(explanation.getDetails()[1].getDetails().length, equalTo(3));\n- assertThat(explanation.getDetails()[1].getDetails()[2].getDescription(), equalTo(\"queryBoost\"));\n- assertThat(explanation.getDetails()[1].getDetails()[2].getValue(), equalTo(2f));\n+ assertThat(explanation.getDetails()[1].getValue(), equalTo(3f));\n+ assertThat(explanation.getDetails()[1].getDetails().length, equalTo(2));\n+ assertThat(explanation.getDetails()[2].getDescription(), equalTo(\"queryBoost\"));\n+ assertThat(explanation.getDetails()[2].getValue(), equalTo(2f));\n }\n \n-\n @Test\n public void testScoreExplainBug_2283_withFunctionScore() throws Exception {\n client().admin().indices().prepareDelete().execute().actionGet();\n- client().admin().indices().prepareCreate(\"test\").setSettings(settingsBuilder().put(\"index.number_of_shards\", 1)).execute().actionGet();\n- ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth(\"test\").setWaitForYellowStatus().execute().actionGet();\n+ client().admin().indices().prepareCreate(\"test\").setSettings(settingsBuilder().put(\"index.number_of_shards\", 1)).execute()\n+ .actionGet();\n+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth(\"test\").setWaitForYellowStatus().execute()\n+ .actionGet();\n assertThat(healthResponse.isTimedOut(), equalTo(false));\n \n client().prepareIndex(\"test\", \"type\", \"1\").setSource(\"field\", \"value1\", \"color\", \"red\").execute().actionGet();\n@@ -142,73 +141,77 @@ public void testScoreExplainBug_2283_withFunctionScore() throws Exception {\n \n client().admin().indices().prepareRefresh().execute().actionGet();\n \n- SearchResponse searchResponse = client().prepareSearch(\"test\")\n- .setQuery(functionScoreQuery(matchAllQuery()).scoreMode(\"first\").add(termFilter(\"field\", \"value4\"), new ScriptScoreFunctionBuilder().script(\"2\")).add(termFilter(\"field\", \"value2\"), new ScriptScoreFunctionBuilder().script(\"3\")))\n- .setExplain(true)\n- .execute().actionGet();\n+ SearchResponse searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ functionScoreQuery(matchAllQuery()).scoreMode(\"first\").add(termFilter(\"field\", \"value4\"), scriptFunction(\"2\"))\n+ .add(termFilter(\"field\", \"value2\"), scriptFunction(\"3\"))).setExplain(true).execute().actionGet();\n \n assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));\n \n assertThat(searchResponse.getHits().totalHits(), equalTo(4l));\n assertThat(searchResponse.getHits().getAt(0).id(), equalTo(\"2\"));\n assertThat(searchResponse.getHits().getAt(0).score(), equalTo(3.0f));\n- logger.info(\"--> Hit[0] {} Explanation:\\n {}\", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());\n+ logger.info(\"--> Hit[0] {} Explanation:\\n {}\", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0)\n+ .explanation());\n Explanation explanation = searchResponse.getHits().getAt(0).explanation();\n assertNotNull(explanation);\n assertThat(explanation.isMatch(), equalTo(true));\n assertThat(explanation.getValue(), equalTo(3f));\n- assertThat(explanation.getDescription(), equalTo(\"function score, score mode [first]\"));\n-\n- assertThat(explanation.getDetails().length, equalTo(2));\n+ assertThat(explanation.getDescription(), equalTo(\"function score, product of:\"));\n+ assertThat(explanation.getDetails().length, equalTo(3));\n assertThat(explanation.getDetails()[0].isMatch(), equalTo(true));\n assertThat(explanation.getDetails()[0].getValue(), equalTo(1f));\n assertThat(explanation.getDetails()[0].getDetails().length, equalTo(2));\n assertThat(explanation.getDetails()[1].isMatch(), equalTo(true));\n assertThat(explanation.getDetails()[1].getValue(), equalTo(3f));\n- assertThat(explanation.getDetails()[1].getDetails().length, equalTo(3));\n+ assertThat(explanation.getDetails()[1].getDetails().length, equalTo(2));\n \n // Same query but with boost\n- searchResponse = client().prepareSearch(\"test\")\n- .setQuery(functionScoreQuery(matchAllQuery()).scoreMode(\"first\").add(termFilter(\"field\", \"value4\"), new ScriptScoreFunctionBuilder().script(\"2\")).add(termFilter(\"field\", \"value2\"), new ScriptScoreFunctionBuilder().script(\"3\")).boost(2))\n- .setExplain(true)\n- .execute().actionGet();\n+ searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ functionScoreQuery(matchAllQuery()).scoreMode(\"first\").add(termFilter(\"field\", \"value4\"), scriptFunction(\"2\"))\n+ .add(termFilter(\"field\", \"value2\"), scriptFunction(\"3\")).boost(2)).setExplain(true).execute().actionGet();\n \n assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));\n \n assertThat(searchResponse.getHits().totalHits(), equalTo(4l));\n assertThat(searchResponse.getHits().getAt(0).id(), equalTo(\"2\"));\n assertThat(searchResponse.getHits().getAt(0).score(), equalTo(6f));\n- logger.info(\"--> Hit[0] {} Explanation:\\n {}\", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());\n+ logger.info(\"--> Hit[0] {} Explanation:\\n {}\", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0)\n+ .explanation());\n explanation = searchResponse.getHits().getAt(0).explanation();\n assertNotNull(explanation);\n assertThat(explanation.isMatch(), equalTo(true));\n assertThat(explanation.getValue(), equalTo(6f));\n- assertThat(explanation.getDescription(), equalTo(\"function score, score mode [first]\"));\n+ assertThat(explanation.getDescription(), equalTo(\"function score, product of:\"));\n \n- assertThat(explanation.getDetails().length, equalTo(2));\n+ assertThat(explanation.getDetails().length, equalTo(3));\n assertThat(explanation.getDetails()[0].isMatch(), equalTo(true));\n assertThat(explanation.getDetails()[0].getValue(), equalTo(1f));\n assertThat(explanation.getDetails()[0].getDetails().length, equalTo(2));\n assertThat(explanation.getDetails()[1].isMatch(), equalTo(true));\n- assertThat(explanation.getDetails()[1].getValue(), equalTo(6f));\n- assertThat(explanation.getDetails()[1].getDetails().length, equalTo(3));\n- assertThat(explanation.getDetails()[1].getDetails()[2].getDescription(), equalTo(\"queryBoost\"));\n- assertThat(explanation.getDetails()[1].getDetails()[2].getValue(), equalTo(2f));\n+ assertThat(explanation.getDetails()[1].getValue(), equalTo(3f));\n+ assertThat(explanation.getDetails()[1].getDetails().length, equalTo(2));\n+ assertThat(explanation.getDetails()[2].getDescription(), equalTo(\"queryBoost\"));\n+ assertThat(explanation.getDetails()[2].getValue(), equalTo(2f));\n }\n \n @Test\n public void testMultiValueCustomScriptBoost() throws ElasticSearchException, IOException {\n client().admin().indices().prepareDelete().execute().actionGet();\n \n- client().admin().indices().prepareCreate(\"test\")\n+ client().admin()\n+ .indices()\n+ .prepareCreate(\"test\")\n .setSettings(settingsBuilder().put(\"index.number_of_shards\", 1).put(\"index.number_of_replicas\", 0))\n- .addMapping(\"type\", jsonBuilder().startObject().startObject(\"type\").startObject(\"properties\")\n- .startObject(\"snum\").field(\"type\", \"string\").endObject()\n- .startObject(\"dnum\").field(\"type\", \"double\").endObject()\n- .startObject(\"slnum\").field(\"type\", \"long\").endObject()\n- .startObject(\"gp\").field(\"type\", \"geo_point\").endObject()\n- .endObject().endObject().endObject())\n- .execute().actionGet();\n+ .addMapping(\n+ \"type\",\n+ jsonBuilder().startObject().startObject(\"type\").startObject(\"properties\").startObject(\"snum\")\n+ .field(\"type\", \"string\").endObject().startObject(\"dnum\").field(\"type\", \"double\").endObject()\n+ .startObject(\"slnum\").field(\"type\", \"long\").endObject().startObject(\"gp\").field(\"type\", \"geo_point\")\n+ .endObject().endObject().endObject().endObject()).execute().actionGet();\n client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();\n \n String[] values = new String[100];\n@@ -223,71 +226,75 @@ public void testMultiValueCustomScriptBoost() throws ElasticSearchException, IOE\n lValues[i] = (i + offset);\n dValues[i] = (i + offset);\n }\n- client().index(indexRequest(\"test\").type(\"type1\").id(\"1\")\n- .source(jsonBuilder().startObject().field(\"test\", \"value check\")\n- .field(\"snum\", values)\n- .field(\"dnum\", dValues)\n- .field(\"lnum\", lValues)\n- .field(\"gp\", gp)\n- .endObject())).actionGet();\n+ client().index(\n+ indexRequest(\"test\")\n+ .type(\"type1\")\n+ .id(\"1\")\n+ .source(jsonBuilder().startObject().field(\"test\", \"value check\").field(\"snum\", values).field(\"dnum\", dValues)\n+ .field(\"lnum\", lValues).field(\"gp\", gp).endObject())).actionGet();\n offset++;\n for (int i = 0; i < values.length; i++) {\n values[i] = \"\" + (i + offset);\n gp[i] = \"\" + (i + offset) + \",\" + (i + offset);\n lValues[i] = (i + offset);\n dValues[i] = (i + offset);\n }\n- client().index(indexRequest(\"test\").type(\"type1\").id(\"2\")\n- .source(jsonBuilder().startObject().field(\"test\", \"value check\")\n- .field(\"snum\", values)\n- .field(\"dnum\", dValues)\n- .field(\"lnum\", lValues)\n- .field(\"gp\", gp)\n- .endObject())).actionGet();\n+ client().index(\n+ indexRequest(\"test\")\n+ .type(\"type1\")\n+ .id(\"2\")\n+ .source(jsonBuilder().startObject().field(\"test\", \"value check\").field(\"snum\", values).field(\"dnum\", dValues)\n+ .field(\"lnum\", lValues).field(\"gp\", gp).endObject())).actionGet();\n client().admin().indices().refresh(refreshRequest()).actionGet();\n \n logger.info(\"running min(doc['num1'].value)\");\n- SearchResponse response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(customScoreQuery(termQuery(\"test\", \"value\"))\n- .script(\"c_min = 1000; foreach (x : doc['snum'].values) { c_min = min(Integer.parseInt(x), c_min) } return c_min\")))\n- ).actionGet();\n+ SearchResponse response = client()\n+ .search(searchRequest()\n+ .searchType(SearchType.QUERY_THEN_FETCH)\n+ .source(searchSource()\n+ .explain(true)\n+ .query(customScoreQuery(termQuery(\"test\", \"value\"))\n+ .script(\"c_min = 1000; foreach (x : doc['snum'].values) { c_min = min(Integer.parseInt(x), c_min) } return c_min\"))))\n+ .actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n logger.info(\"Hit[1] {} Explanation {}\", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());\n assertThat(response.getHits().getAt(0).id(), equalTo(\"2\"));\n assertThat(response.getHits().getAt(1).id(), equalTo(\"1\"));\n \n- response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(customScoreQuery(termQuery(\"test\", \"value\"))\n- .script(\"c_min = 1000; foreach (x : doc['lnum'].values) { c_min = min(x, c_min) } return c_min\")))\n- ).actionGet();\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ customScoreQuery(termQuery(\"test\", \"value\")).script(\n+ \"c_min = 1000; foreach (x : doc['lnum'].values) { c_min = min(x, c_min) } return c_min\"))))\n+ .actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n logger.info(\"Hit[1] {} Explanation {}\", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());\n assertThat(response.getHits().getAt(0).id(), equalTo(\"2\"));\n assertThat(response.getHits().getAt(1).id(), equalTo(\"1\"));\n \n- response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(customScoreQuery(termQuery(\"test\", \"value\"))\n- .script(\"c_min = 1000; foreach (x : doc['dnum'].values) { c_min = min(x, c_min) } return c_min\")))\n- ).actionGet();\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ customScoreQuery(termQuery(\"test\", \"value\")).script(\n+ \"c_min = 1000; foreach (x : doc['dnum'].values) { c_min = min(x, c_min) } return c_min\"))))\n+ .actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n logger.info(\"Hit[1] {} Explanation {}\", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());\n assertThat(response.getHits().getAt(0).id(), equalTo(\"2\"));\n assertThat(response.getHits().getAt(1).id(), equalTo(\"1\"));\n \n- response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(customScoreQuery(termQuery(\"test\", \"value\"))\n- .script(\"c_min = 1000; foreach (x : doc['gp'].values) { c_min = min(x.lat, c_min) } return c_min\")))\n- ).actionGet();\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ customScoreQuery(termQuery(\"test\", \"value\")).script(\n+ \"c_min = 1000; foreach (x : doc['gp'].values) { c_min = min(x.lat, c_min) } return c_min\"))))\n+ .actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n@@ -296,20 +303,20 @@ public void testMultiValueCustomScriptBoost() throws ElasticSearchException, IOE\n assertThat(response.getHits().getAt(1).id(), equalTo(\"1\"));\n }\n \n-\n @Test\n public void testMultiValueCustomScriptBoost_withFunctionScore() throws ElasticSearchException, IOException {\n client().admin().indices().prepareDelete().execute().actionGet();\n \n- client().admin().indices().prepareCreate(\"test\")\n+ client().admin()\n+ .indices()\n+ .prepareCreate(\"test\")\n .setSettings(settingsBuilder().put(\"index.number_of_shards\", 1).put(\"index.number_of_replicas\", 0))\n- .addMapping(\"type\", jsonBuilder().startObject().startObject(\"type\").startObject(\"properties\")\n- .startObject(\"snum\").field(\"type\", \"string\").endObject()\n- .startObject(\"dnum\").field(\"type\", \"double\").endObject()\n- .startObject(\"slnum\").field(\"type\", \"long\").endObject()\n- .startObject(\"gp\").field(\"type\", \"geo_point\").endObject()\n- .endObject().endObject().endObject())\n- .execute().actionGet();\n+ .addMapping(\n+ \"type\",\n+ jsonBuilder().startObject().startObject(\"type\").startObject(\"properties\").startObject(\"snum\")\n+ .field(\"type\", \"string\").endObject().startObject(\"dnum\").field(\"type\", \"double\").endObject()\n+ .startObject(\"slnum\").field(\"type\", \"long\").endObject().startObject(\"gp\").field(\"type\", \"geo_point\")\n+ .endObject().endObject().endObject().endObject()).execute().actionGet();\n client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();\n \n String[] values = new String[100];\n@@ -324,71 +331,85 @@ public void testMultiValueCustomScriptBoost_withFunctionScore() throws ElasticSe\n lValues[i] = (i + offset);\n dValues[i] = (i + offset);\n }\n- client().index(indexRequest(\"test\").type(\"type1\").id(\"1\")\n- .source(jsonBuilder().startObject().field(\"test\", \"value check\")\n- .field(\"snum\", values)\n- .field(\"dnum\", dValues)\n- .field(\"lnum\", lValues)\n- .field(\"gp\", gp)\n- .endObject())).actionGet();\n+ client().index(\n+ indexRequest(\"test\")\n+ .type(\"type1\")\n+ .id(\"1\")\n+ .source(jsonBuilder().startObject().field(\"test\", \"value check\").field(\"snum\", values).field(\"dnum\", dValues)\n+ .field(\"lnum\", lValues).field(\"gp\", gp).endObject())).actionGet();\n offset++;\n for (int i = 0; i < values.length; i++) {\n values[i] = \"\" + (i + offset);\n gp[i] = \"\" + (i + offset) + \",\" + (i + offset);\n lValues[i] = (i + offset);\n dValues[i] = (i + offset);\n }\n- client().index(indexRequest(\"test\").type(\"type1\").id(\"2\")\n- .source(jsonBuilder().startObject().field(\"test\", \"value check\")\n- .field(\"snum\", values)\n- .field(\"dnum\", dValues)\n- .field(\"lnum\", lValues)\n- .field(\"gp\", gp)\n- .endObject())).actionGet();\n+ client().index(\n+ indexRequest(\"test\")\n+ .type(\"type1\")\n+ .id(\"2\")\n+ .source(jsonBuilder().startObject().field(\"test\", \"value check\").field(\"snum\", values).field(\"dnum\", dValues)\n+ .field(\"lnum\", lValues).field(\"gp\", gp).endObject())).actionGet();\n client().admin().indices().refresh(refreshRequest()).actionGet();\n \n logger.info(\"running min(doc['num1'].value)\");\n- SearchResponse response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(functionScoreQuery(termQuery(\"test\", \"value\")).add(new ScriptScoreFunctionBuilder()\n- .script(\"c_min = 1000; foreach (x : doc['snum'].values) { c_min = min(Integer.parseInt(x), c_min) } return c_min\"))))\n- ).actionGet();\n+ SearchResponse response = client()\n+ .search(searchRequest()\n+ .searchType(SearchType.QUERY_THEN_FETCH)\n+ .source(searchSource()\n+ .explain(true)\n+ .query(functionScoreQuery(\n+ termQuery(\"test\", \"value\"),\n+ scriptFunction(\"c_min = 1000; foreach (x : doc['snum'].values) { c_min = min(Integer.parseInt(x), c_min) } return c_min\")))))\n+ .actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n logger.info(\"Hit[1] {} Explanation {}\", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());\n assertThat(response.getHits().getAt(0).id(), equalTo(\"2\"));\n assertThat(response.getHits().getAt(1).id(), equalTo(\"1\"));\n \n- response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(functionScoreQuery(termQuery(\"test\", \"value\")).add(new ScriptScoreFunctionBuilder()\n- .script(\"c_min = 1000; foreach (x : doc['lnum'].values) { c_min = min(x, c_min) } return c_min\"))))\n- ).actionGet();\n+ response = client()\n+ .search(searchRequest()\n+ .searchType(SearchType.QUERY_THEN_FETCH)\n+ .source(searchSource()\n+ .explain(true)\n+ .query(functionScoreQuery(\n+ termQuery(\"test\", \"value\"),\n+ scriptFunction(\"c_min = 1000; foreach (x : doc['lnum'].values) { c_min = min(x, c_min) } return c_min\")))))\n+ .actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n logger.info(\"Hit[1] {} Explanation {}\", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());\n assertThat(response.getHits().getAt(0).id(), equalTo(\"2\"));\n assertThat(response.getHits().getAt(1).id(), equalTo(\"1\"));\n \n- response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(functionScoreQuery(termQuery(\"test\", \"value\")).add(new ScriptScoreFunctionBuilder()\n- .script(\"c_min = 1000; foreach (x : doc['dnum'].values) { c_min = min(x, c_min) } return c_min\"))))\n- ).actionGet();\n+ response = client()\n+ .search(searchRequest()\n+ .searchType(SearchType.QUERY_THEN_FETCH)\n+ .source(searchSource()\n+ .explain(true)\n+ .query(functionScoreQuery(\n+ termQuery(\"test\", \"value\"),\n+ scriptFunction(\"c_min = 1000; foreach (x : doc['dnum'].values) { c_min = min(x, c_min) } return c_min\")))))\n+ .actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n logger.info(\"Hit[1] {} Explanation {}\", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());\n assertThat(response.getHits().getAt(0).id(), equalTo(\"2\"));\n assertThat(response.getHits().getAt(1).id(), equalTo(\"1\"));\n \n- response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(functionScoreQuery(termQuery(\"test\", \"value\")).add(new ScriptScoreFunctionBuilder()\n- .script(\"c_min = 1000; foreach (x : doc['gp'].values) { c_min = min(x.lat, c_min) } return c_min\"))))\n- ).actionGet();\n+ response = client()\n+ .search(searchRequest()\n+ .searchType(SearchType.QUERY_THEN_FETCH)\n+ .source(searchSource()\n+ .explain(true)\n+ .query(functionScoreQuery(\n+ termQuery(\"test\", \"value\"),\n+ scriptFunction(\"c_min = 1000; foreach (x : doc['gp'].values) { c_min = min(x.lat, c_min) } return c_min\")))))\n+ .actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n@@ -400,21 +421,24 @@ public void testMultiValueCustomScriptBoost_withFunctionScore() throws ElasticSe\n @Test\n public void testCustomScriptBoost() throws Exception {\n client().admin().indices().prepareDelete().execute().actionGet();\n- client().admin().indices().prepareCreate(\"test\").setSettings(settingsBuilder().put(\"index.number_of_shards\", 1)).execute().actionGet();\n-\n- client().index(indexRequest(\"test\").type(\"type1\").id(\"1\")\n- .source(jsonBuilder().startObject().field(\"test\", \"value beck\").field(\"num1\", 1.0f).endObject())).actionGet();\n- client().index(indexRequest(\"test\").type(\"type1\").id(\"2\")\n- .source(jsonBuilder().startObject().field(\"test\", \"value check\").field(\"num1\", 2.0f).endObject())).actionGet();\n+ client().admin().indices().prepareCreate(\"test\").setSettings(settingsBuilder().put(\"index.number_of_shards\", 1)).execute()\n+ .actionGet();\n+\n+ client().index(\n+ indexRequest(\"test\").type(\"type1\").id(\"1\")\n+ .source(jsonBuilder().startObject().field(\"test\", \"value beck\").field(\"num1\", 1.0f).endObject())).actionGet();\n+ client().index(\n+ indexRequest(\"test\").type(\"type1\").id(\"2\")\n+ .source(jsonBuilder().startObject().field(\"test\", \"value check\").field(\"num1\", 2.0f).endObject())).actionGet();\n client().admin().indices().refresh(refreshRequest()).actionGet();\n \n logger.info(\"--- QUERY_THEN_FETCH\");\n \n logger.info(\"running doc['num1'].value\");\n- SearchResponse response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(customScoreQuery(termQuery(\"test\", \"value\")).script(\"doc['num1'].value\")))\n- ).actionGet();\n+ SearchResponse response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(customScoreQuery(termQuery(\"test\", \"value\")).script(\"doc['num1'].value\"))))\n+ .actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n@@ -423,23 +447,22 @@ public void testCustomScriptBoost() throws Exception {\n assertThat(response.getHits().getAt(1).id(), equalTo(\"1\"));\n \n logger.info(\"running -doc['num1'].value\");\n- response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(customScoreQuery(termQuery(\"test\", \"value\")).script(\"-doc['num1'].value\")))\n- ).actionGet();\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(customScoreQuery(termQuery(\"test\", \"value\")).script(\"-doc['num1'].value\"))))\n+ .actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n logger.info(\"Hit[1] {} Explanation {}\", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());\n assertThat(response.getHits().getAt(0).id(), equalTo(\"1\"));\n assertThat(response.getHits().getAt(1).id(), equalTo(\"2\"));\n \n-\n logger.info(\"running pow(doc['num1'].value, 2)\");\n- response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(customScoreQuery(termQuery(\"test\", \"value\")).script(\"pow(doc['num1'].value, 2)\")))\n- ).actionGet();\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true)\n+ .query(customScoreQuery(termQuery(\"test\", \"value\")).script(\"pow(doc['num1'].value, 2)\")))).actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n@@ -448,10 +471,10 @@ public void testCustomScriptBoost() throws Exception {\n assertThat(response.getHits().getAt(1).id(), equalTo(\"1\"));\n \n logger.info(\"running max(doc['num1'].value, 1)\");\n- response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(customScoreQuery(termQuery(\"test\", \"value\")).script(\"max(doc['num1'].value, 1d)\")))\n- ).actionGet();\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ customScoreQuery(termQuery(\"test\", \"value\")).script(\"max(doc['num1'].value, 1d)\")))).actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n@@ -460,10 +483,10 @@ public void testCustomScriptBoost() throws Exception {\n assertThat(response.getHits().getAt(1).id(), equalTo(\"1\"));\n \n logger.info(\"running doc['num1'].value * _score\");\n- response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(customScoreQuery(termQuery(\"test\", \"value\")).script(\"doc['num1'].value * _score\")))\n- ).actionGet();\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ customScoreQuery(termQuery(\"test\", \"value\")).script(\"doc['num1'].value * _score\")))).actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n@@ -472,50 +495,59 @@ public void testCustomScriptBoost() throws Exception {\n assertThat(response.getHits().getAt(1).id(), equalTo(\"1\"));\n \n logger.info(\"running param1 * param2 * _score\");\n- response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(customScoreQuery(termQuery(\"test\", \"value\")).script(\"param1 * param2 * _score\").param(\"param1\", 2).param(\"param2\", 2)))\n- ).actionGet();\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ customScoreQuery(termQuery(\"test\", \"value\")).script(\"param1 * param2 * _score\").param(\"param1\", 2)\n+ .param(\"param2\", 2)))).actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n logger.info(\"Hit[1] {} Explanation {}\", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());\n assertSearchHits(response, \"1\", \"2\");\n \n-\n logger.info(\"running param1 * param2 * _score with filter instead of query\");\n- response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(customScoreQuery(termFilter(\"test\", \"value\")).script(\"param1 * param2 * _score\").param(\"param1\", 2).param(\"param2\", 2)))\n- ).actionGet();\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ customScoreQuery(termFilter(\"test\", \"value\")).script(\"param1 * param2 * _score\").param(\"param1\", 2)\n+ .param(\"param2\", 2)))).actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n logger.info(\"Hit[1] {} Explanation {}\", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());\n assertSearchHits(response, \"1\", \"2\");\n- assertThat(response.getHits().getAt(0).score(), equalTo(4f)); // _score is always 1\n- assertThat(response.getHits().getAt(1).score(), equalTo(4f)); // _score is always 1\n+ assertThat(response.getHits().getAt(0).score(), equalTo(4f)); // _score\n+ // is\n+ // always\n+ // 1\n+ assertThat(response.getHits().getAt(1).score(), equalTo(4f)); // _score\n+ // is\n+ // always\n+ // 1\n }\n \n-\n @Test\n public void testCustomScriptBoost_withFunctionScore() throws Exception {\n client().admin().indices().prepareDelete().execute().actionGet();\n- client().admin().indices().prepareCreate(\"test\").setSettings(settingsBuilder().put(\"index.number_of_shards\", 1)).execute().actionGet();\n-\n- client().index(indexRequest(\"test\").type(\"type1\").id(\"1\")\n- .source(jsonBuilder().startObject().field(\"test\", \"value beck\").field(\"num1\", 1.0f).endObject())).actionGet();\n- client().index(indexRequest(\"test\").type(\"type1\").id(\"2\")\n- .source(jsonBuilder().startObject().field(\"test\", \"value check\").field(\"num1\", 2.0f).endObject())).actionGet();\n+ client().admin().indices().prepareCreate(\"test\").setSettings(settingsBuilder().put(\"index.number_of_shards\", 1)).execute()\n+ .actionGet();\n+\n+ client().index(\n+ indexRequest(\"test\").type(\"type1\").id(\"1\")\n+ .source(jsonBuilder().startObject().field(\"test\", \"value beck\").field(\"num1\", 1.0f).endObject())).actionGet();\n+ client().index(\n+ indexRequest(\"test\").type(\"type1\").id(\"2\")\n+ .source(jsonBuilder().startObject().field(\"test\", \"value check\").field(\"num1\", 2.0f).endObject())).actionGet();\n client().admin().indices().refresh(refreshRequest()).actionGet();\n \n logger.info(\"--- QUERY_THEN_FETCH\");\n \n logger.info(\"running doc['num1'].value\");\n- SearchResponse response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(functionScoreQuery(termQuery(\"test\", \"value\")).add(new ScriptScoreFunctionBuilder().script(\"doc['num1'].value\"))))\n- ).actionGet();\n+ SearchResponse response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), scriptFunction(\"doc['num1'].value\"))))).actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n@@ -524,23 +556,22 @@ public void testCustomScriptBoost_withFunctionScore() throws Exception {\n assertThat(response.getHits().getAt(1).id(), equalTo(\"1\"));\n \n logger.info(\"running -doc['num1'].value\");\n- response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(functionScoreQuery(termQuery(\"test\", \"value\")).add(new ScriptScoreFunctionBuilder().script(\"-doc['num1'].value\"))))\n- ).actionGet();\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), scriptFunction(\"-doc['num1'].value\"))))).actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n logger.info(\"Hit[1] {} Explanation {}\", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());\n assertThat(response.getHits().getAt(0).id(), equalTo(\"1\"));\n assertThat(response.getHits().getAt(1).id(), equalTo(\"2\"));\n \n-\n logger.info(\"running pow(doc['num1'].value, 2)\");\n- response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(functionScoreQuery(termQuery(\"test\", \"value\")).add(new ScriptScoreFunctionBuilder().script(\"pow(doc['num1'].value, 2)\"))))\n- ).actionGet();\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), scriptFunction(\"pow(doc['num1'].value, 2)\"))))).actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n@@ -549,10 +580,10 @@ public void testCustomScriptBoost_withFunctionScore() throws Exception {\n assertThat(response.getHits().getAt(1).id(), equalTo(\"1\"));\n \n logger.info(\"running max(doc['num1'].value, 1)\");\n- response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(functionScoreQuery(termQuery(\"test\", \"value\")).add(new ScriptScoreFunctionBuilder().script(\"max(doc['num1'].value, 1d)\"))))\n- ).actionGet();\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), scriptFunction(\"max(doc['num1'].value, 1d)\"))))).actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n@@ -561,10 +592,10 @@ public void testCustomScriptBoost_withFunctionScore() throws Exception {\n assertThat(response.getHits().getAt(1).id(), equalTo(\"1\"));\n \n logger.info(\"running doc['num1'].value * _score\");\n- response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(functionScoreQuery(termQuery(\"test\", \"value\")).add(new ScriptScoreFunctionBuilder().script(\"doc['num1'].value * _score\"))))\n- ).actionGet();\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), scriptFunction(\"doc['num1'].value * _score\"))))).actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n@@ -573,64 +604,73 @@ public void testCustomScriptBoost_withFunctionScore() throws Exception {\n assertThat(response.getHits().getAt(1).id(), equalTo(\"1\"));\n \n logger.info(\"running param1 * param2 * _score\");\n- response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(functionScoreQuery(termQuery(\"test\", \"value\")).add(new ScriptScoreFunctionBuilder().script(\"param1 * param2 * _score\").param(\"param1\", 2).param(\"param2\", 2))))\n- ).actionGet();\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), scriptFunction(\"param1 * param2 * _score\")\n+ .param(\"param1\", 2).param(\"param2\", 2))))).actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n logger.info(\"Hit[1] {} Explanation {}\", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());\n assertSearchHits(response, \"1\", \"2\");\n \n-\n logger.info(\"running param1 * param2 * _score with filter instead of query\");\n- response = client().search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(functionScoreQuery(termFilter(\"test\", \"value\")).add(new ScriptScoreFunctionBuilder().script(\"param1 * param2 * _score\").param(\"param1\", 2).param(\"param2\", 2))))\n- ).actionGet();\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termFilter(\"test\", \"value\"),\n+ scriptFunction(\"param1 * param2 * _score\").param(\"param1\", 2).param(\"param2\", 2))))).actionGet();\n \n assertThat(response.getHits().totalHits(), equalTo(2l));\n logger.info(\"Hit[0] {} Explanation {}\", response.getHits().getAt(0).id(), response.getHits().getAt(0).explanation());\n logger.info(\"Hit[1] {} Explanation {}\", response.getHits().getAt(1).id(), response.getHits().getAt(1).explanation());\n assertSearchHits(response, \"1\", \"2\");\n- assertThat(response.getHits().getAt(0).score(), equalTo(4f)); // _score is always 1\n- assertThat(response.getHits().getAt(1).score(), equalTo(4f)); // _score is always 1\n+ assertThat(response.getHits().getAt(0).score(), equalTo(4f)); // _score\n+ // is\n+ // always\n+ // 1\n+ assertThat(response.getHits().getAt(1).score(), equalTo(4f)); // _score\n+ // is\n+ // always\n+ // 1\n }\n \n @Test\n public void testTriggerBooleanScorer() throws Exception {\n client().admin().indices().prepareDelete().execute().actionGet();\n- client().admin().indices().prepareCreate(\"test\").setSettings(settingsBuilder().put(\"index.number_of_shards\", 1)).execute().actionGet();\n+ client().admin().indices().prepareCreate(\"test\").setSettings(settingsBuilder().put(\"index.number_of_shards\", 1)).execute()\n+ .actionGet();\n \n client().prepareIndex(\"test\", \"type\", \"1\").setSource(\"field\", \"value1\", \"color\", \"red\").execute().actionGet();\n client().prepareIndex(\"test\", \"type\", \"2\").setSource(\"field\", \"value2\", \"color\", \"blue\").execute().actionGet();\n client().prepareIndex(\"test\", \"type\", \"3\").setSource(\"field\", \"value3\", \"color\", \"red\").execute().actionGet();\n client().prepareIndex(\"test\", \"type\", \"4\").setSource(\"field\", \"value4\", \"color\", \"blue\").execute().actionGet();\n client().admin().indices().prepareRefresh().execute().actionGet();\n SearchResponse searchResponse = client().prepareSearch(\"test\")\n- .setQuery(customFiltersScoreQuery(fuzzyQuery(\"field\", \"value\"))\n- .add(FilterBuilders.idsFilter(\"type\").addIds(\"1\"), 3))\n+ .setQuery(customFiltersScoreQuery(fuzzyQuery(\"field\", \"value\")).add(FilterBuilders.idsFilter(\"type\").addIds(\"1\"), 3))\n .execute().actionGet();\n assertNoFailures(searchResponse);\n \n assertThat(searchResponse.getHits().totalHits(), equalTo(4l));\n }\n \n-\n @Test\n public void testTriggerBooleanScorer_withFunctionScore() throws Exception {\n client().admin().indices().prepareDelete().execute().actionGet();\n- client().admin().indices().prepareCreate(\"test\").setSettings(settingsBuilder().put(\"index.number_of_shards\", 1)).execute().actionGet();\n+ client().admin().indices().prepareCreate(\"test\").setSettings(settingsBuilder().put(\"index.number_of_shards\", 1)).execute()\n+ .actionGet();\n \n client().prepareIndex(\"test\", \"type\", \"1\").setSource(\"field\", \"value1\", \"color\", \"red\").execute().actionGet();\n client().prepareIndex(\"test\", \"type\", \"2\").setSource(\"field\", \"value2\", \"color\", \"blue\").execute().actionGet();\n client().prepareIndex(\"test\", \"type\", \"3\").setSource(\"field\", \"value3\", \"color\", \"red\").execute().actionGet();\n client().prepareIndex(\"test\", \"type\", \"4\").setSource(\"field\", \"value4\", \"color\", \"blue\").execute().actionGet();\n client().admin().indices().prepareRefresh().execute().actionGet();\n- SearchResponse searchResponse = client().prepareSearch(\"test\")\n- .setQuery(functionScoreQuery(fuzzyQuery(\"field\", \"value\")).add(FilterBuilders.idsFilter(\"type\").addIds(\"1\"), new FactorBuilder().boostFactor(3)))\n- .execute().actionGet();\n+ SearchResponse searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ functionScoreQuery(fuzzyQuery(\"field\", \"value\")).add(FilterBuilders.idsFilter(\"type\").addIds(\"1\"),\n+ factorFunction(3))).execute().actionGet();\n assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));\n \n assertThat(searchResponse.getHits().totalHits(), equalTo(4l));\n@@ -639,7 +679,8 @@ public void testTriggerBooleanScorer_withFunctionScore() throws Exception {\n @Test\n public void testCustomFiltersScore() throws Exception {\n client().admin().indices().prepareDelete().execute().actionGet();\n- client().admin().indices().prepareCreate(\"test\").setSettings(settingsBuilder().put(\"index.number_of_shards\", 1)).execute().actionGet();\n+ client().admin().indices().prepareCreate(\"test\").setSettings(settingsBuilder().put(\"index.number_of_shards\", 1)).execute()\n+ .actionGet();\n \n client().prepareIndex(\"test\", \"type\", \"1\").setSource(\"field\", \"value1\", \"color\", \"red\").execute().actionGet();\n client().prepareIndex(\"test\", \"type\", \"2\").setSource(\"field\", \"value2\", \"color\", \"blue\").execute().actionGet();\n@@ -648,12 +689,11 @@ public void testCustomFiltersScore() throws Exception {\n \n client().admin().indices().prepareRefresh().execute().actionGet();\n \n- SearchResponse searchResponse = client().prepareSearch(\"test\")\n- .setQuery(customFiltersScoreQuery(matchAllQuery())\n- .add(termFilter(\"field\", \"value4\"), \"2\")\n- .add(termFilter(\"field\", \"value2\"), \"3\"))\n- .setExplain(true)\n- .execute().actionGet();\n+ SearchResponse searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ customFiltersScoreQuery(matchAllQuery()).add(termFilter(\"field\", \"value4\"), \"2\").add(termFilter(\"field\", \"value2\"),\n+ \"3\")).setExplain(true).execute().actionGet();\n \n assertNoFailures(searchResponse);\n \n@@ -668,12 +708,11 @@ public void testCustomFiltersScore() throws Exception {\n assertThat(searchResponse.getHits().getAt(3).id(), anyOf(equalTo(\"1\"), equalTo(\"3\")));\n assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));\n \n- searchResponse = client().prepareSearch(\"test\")\n- .setQuery(customFiltersScoreQuery(matchAllQuery())\n- .add(termFilter(\"field\", \"value4\"), 2)\n- .add(termFilter(\"field\", \"value2\"), 3))\n- .setExplain(true)\n- .execute().actionGet();\n+ searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ customFiltersScoreQuery(matchAllQuery()).add(termFilter(\"field\", \"value4\"), 2)\n+ .add(termFilter(\"field\", \"value2\"), 3)).setExplain(true).execute().actionGet();\n \n assertNoFailures(searchResponse);\n \n@@ -688,41 +727,50 @@ public void testCustomFiltersScore() throws Exception {\n assertThat(searchResponse.getHits().getAt(3).id(), anyOf(equalTo(\"1\"), equalTo(\"3\")));\n assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));\n \n- searchResponse = client().prepareSearch(\"test\")\n- .setQuery(customFiltersScoreQuery(matchAllQuery()).scoreMode(\"total\")\n- .add(termFilter(\"field\", \"value4\"), 2)\n- .add(termFilter(\"field\", \"value1\"), 3)\n- .add(termFilter(\"color\", \"red\"), 5))\n- .setExplain(true)\n- .execute().actionGet();\n+ searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ customFiltersScoreQuery(matchAllQuery()).scoreMode(\"total\").add(termFilter(\"field\", \"value4\"), 2)\n+ .add(termFilter(\"field\", \"value1\"), 3).add(termFilter(\"color\", \"red\"), 5)).setExplain(true).execute()\n+ .actionGet();\n \n assertNoFailures(searchResponse);\n assertThat(searchResponse.getHits().totalHits(), equalTo(4l));\n assertThat(searchResponse.getHits().getAt(0).id(), equalTo(\"1\"));\n assertThat(searchResponse.getHits().getAt(0).score(), equalTo(8.0f));\n logger.info(\"--> Hit[0] {} Explanation {}\", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());\n \n- searchResponse = client().prepareSearch(\"test\")\n- .setQuery(customFiltersScoreQuery(matchAllQuery()).scoreMode(\"max\")\n- .add(termFilter(\"field\", \"value4\"), 2)\n- .add(termFilter(\"field\", \"value1\"), 3)\n- .add(termFilter(\"color\", \"red\"), 5))\n- .setExplain(true)\n- .execute().actionGet();\n+ searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ customFiltersScoreQuery(matchAllQuery()).scoreMode(\"max\").add(termFilter(\"field\", \"value4\"), 2)\n+ .add(termFilter(\"field\", \"value1\"), 3).add(termFilter(\"color\", \"red\"), 5)).setExplain(true).execute()\n+ .actionGet();\n \n assertNoFailures(searchResponse);\n assertThat(searchResponse.getHits().totalHits(), equalTo(4l));\n- assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo(\"1\"), equalTo(\"3\"))); // could be both depending on the order of the docs internally (lucene order)\n+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo(\"1\"), equalTo(\"3\"))); // could\n+ // be\n+ // both\n+ // depending\n+ // on\n+ // the\n+ // order\n+ // of\n+ // the\n+ // docs\n+ // internally\n+ // (lucene\n+ // order)\n assertThat(searchResponse.getHits().getAt(0).score(), equalTo(5.0f));\n logger.info(\"--> Hit[0] {} Explanation {}\", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());\n \n- searchResponse = client().prepareSearch(\"test\")\n- .setQuery(customFiltersScoreQuery(matchAllQuery()).scoreMode(\"avg\")\n- .add(termFilter(\"field\", \"value4\"), 2)\n- .add(termFilter(\"field\", \"value1\"), 3)\n- .add(termFilter(\"color\", \"red\"), 5))\n- .setExplain(true)\n- .execute().actionGet();\n+ searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ customFiltersScoreQuery(matchAllQuery()).scoreMode(\"avg\").add(termFilter(\"field\", \"value4\"), 2)\n+ .add(termFilter(\"field\", \"value1\"), 3).add(termFilter(\"color\", \"red\"), 5)).setExplain(true).execute()\n+ .actionGet();\n \n assertNoFailures(searchResponse);\n assertThat(searchResponse.getHits().totalHits(), equalTo(4l));\n@@ -733,13 +781,12 @@ public void testCustomFiltersScore() throws Exception {\n assertThat(searchResponse.getHits().getAt(1).score(), equalTo(4.0f));\n logger.info(\"--> Hit[1] {} Explanation {}\", searchResponse.getHits().getAt(1).id(), searchResponse.getHits().getAt(1).explanation());\n \n- searchResponse = client().prepareSearch(\"test\")\n- .setQuery(customFiltersScoreQuery(matchAllQuery()).scoreMode(\"min\")\n- .add(termFilter(\"field\", \"value4\"), 2)\n- .add(termFilter(\"field\", \"value1\"), 3)\n- .add(termFilter(\"color\", \"red\"), 5))\n- .setExplain(true)\n- .execute().actionGet();\n+ searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ customFiltersScoreQuery(matchAllQuery()).scoreMode(\"min\").add(termFilter(\"field\", \"value4\"), 2)\n+ .add(termFilter(\"field\", \"value1\"), 3).add(termFilter(\"color\", \"red\"), 5)).setExplain(true).execute()\n+ .actionGet();\n \n assertNoFailures(searchResponse);\n assertThat(searchResponse.getHits().totalHits(), equalTo(4l));\n@@ -753,13 +800,12 @@ public void testCustomFiltersScore() throws Exception {\n assertThat(searchResponse.getHits().getAt(3).id(), equalTo(\"2\"));\n assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));\n \n- searchResponse = client().prepareSearch(\"test\")\n- .setQuery(customFiltersScoreQuery(matchAllQuery()).scoreMode(\"multiply\")\n- .add(termFilter(\"field\", \"value4\"), 2)\n- .add(termFilter(\"field\", \"value1\"), 3)\n- .add(termFilter(\"color\", \"red\"), 5))\n- .setExplain(true)\n- .execute().actionGet();\n+ searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ customFiltersScoreQuery(matchAllQuery()).scoreMode(\"multiply\").add(termFilter(\"field\", \"value4\"), 2)\n+ .add(termFilter(\"field\", \"value1\"), 3).add(termFilter(\"color\", \"red\"), 5)).setExplain(true).execute()\n+ .actionGet();\n \n assertNoFailures(searchResponse);\n assertThat(searchResponse.getHits().totalHits(), equalTo(4l));\n@@ -773,13 +819,12 @@ public void testCustomFiltersScore() throws Exception {\n assertThat(searchResponse.getHits().getAt(3).id(), equalTo(\"2\"));\n assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));\n \n- searchResponse = client().prepareSearch(\"test\")\n- .setQuery(customFiltersScoreQuery(termsQuery(\"field\", \"value1\", \"value2\", \"value3\", \"value4\")).scoreMode(\"first\")\n- .add(termFilter(\"field\", \"value4\"), 2)\n- .add(termFilter(\"field\", \"value3\"), 3)\n- .add(termFilter(\"field\", \"value2\"), 4))\n- .setExplain(true)\n- .execute().actionGet();\n+ searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ customFiltersScoreQuery(termsQuery(\"field\", \"value1\", \"value2\", \"value3\", \"value4\")).scoreMode(\"first\")\n+ .add(termFilter(\"field\", \"value4\"), 2).add(termFilter(\"field\", \"value3\"), 3)\n+ .add(termFilter(\"field\", \"value2\"), 4)).setExplain(true).execute().actionGet();\n \n assertNoFailures(searchResponse);\n assertThat(searchResponse.getHits().totalHits(), equalTo(4l));\n@@ -793,14 +838,12 @@ public void testCustomFiltersScore() throws Exception {\n assertThat(searchResponse.getHits().getAt(3).id(), equalTo(\"1\"));\n assertThat(searchResponse.getHits().getAt(3).score(), equalTo(searchResponse.getHits().getAt(3).explanation().getValue()));\n \n-\n- searchResponse = client().prepareSearch(\"test\")\n- .setQuery(customFiltersScoreQuery(termsQuery(\"field\", \"value1\", \"value2\", \"value3\", \"value4\")).scoreMode(\"multiply\")\n- .add(termFilter(\"field\", \"value4\"), 2)\n- .add(termFilter(\"field\", \"value1\"), 3)\n- .add(termFilter(\"color\", \"red\"), 5))\n- .setExplain(true)\n- .execute().actionGet();\n+ searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ customFiltersScoreQuery(termsQuery(\"field\", \"value1\", \"value2\", \"value3\", \"value4\")).scoreMode(\"multiply\")\n+ .add(termFilter(\"field\", \"value4\"), 2).add(termFilter(\"field\", \"value1\"), 3)\n+ .add(termFilter(\"color\", \"red\"), 5)).setExplain(true).execute().actionGet();\n \n assertNoFailures(searchResponse);\n assertThat(searchResponse.getHits().totalHits(), equalTo(4l));\n@@ -815,11 +858,11 @@ public void testCustomFiltersScore() throws Exception {\n assertThat(searchResponse.getHits().getAt(3).score(), equalTo(searchResponse.getHits().getAt(3).explanation().getValue()));\n }\n \n-\n @Test\n public void testCustomFiltersScore_withFunctionScore() throws Exception {\n client().admin().indices().prepareDelete().execute().actionGet();\n- client().admin().indices().prepareCreate(\"test\").setSettings(settingsBuilder().put(\"index.number_of_shards\", 1)).execute().actionGet();\n+ client().admin().indices().prepareCreate(\"test\").setSettings(settingsBuilder().put(\"index.number_of_shards\", 1)).execute()\n+ .actionGet();\n \n client().prepareIndex(\"test\", \"type\", \"1\").setSource(\"field\", \"value1\", \"color\", \"red\").execute().actionGet();\n client().prepareIndex(\"test\", \"type\", \"2\").setSource(\"field\", \"value2\", \"color\", \"blue\").execute().actionGet();\n@@ -828,9 +871,12 @@ public void testCustomFiltersScore_withFunctionScore() throws Exception {\n \n client().admin().indices().prepareRefresh().execute().actionGet();\n \n- SearchResponse searchResponse = client().prepareSearch(\"test\")\n- .setQuery(functionScoreQuery(matchAllQuery()).add(termFilter(\"field\", \"value4\"), new ScriptScoreFunctionBuilder().script(\"2\")).add(termFilter(\"field\", \"value2\"), new ScriptScoreFunctionBuilder().script(\"3\")))\n- .setExplain(true)\n+ SearchResponse searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ functionScoreQuery(matchAllQuery())\n+ .add(termFilter(\"field\", \"value4\"), scriptFunction(\"2\")).add(\n+ termFilter(\"field\", \"value2\"), scriptFunction(\"3\"))).setExplain(true)\n .execute().actionGet();\n \n assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));\n@@ -846,10 +892,11 @@ public void testCustomFiltersScore_withFunctionScore() throws Exception {\n assertThat(searchResponse.getHits().getAt(3).id(), anyOf(equalTo(\"1\"), equalTo(\"3\")));\n assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));\n \n- searchResponse = client().prepareSearch(\"test\")\n- .setQuery(functionScoreQuery(matchAllQuery()).add(termFilter(\"field\", \"value4\"), new FactorBuilder().boostFactor(2)).add(termFilter(\"field\", \"value2\"), new FactorBuilder().boostFactor(3)))\n- .setExplain(true)\n- .execute().actionGet();\n+ searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ functionScoreQuery(matchAllQuery()).add(termFilter(\"field\", \"value4\"), factorFunction(2)).add(\n+ termFilter(\"field\", \"value2\"), factorFunction(3))).setExplain(true).execute().actionGet();\n \n assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));\n \n@@ -864,32 +911,56 @@ public void testCustomFiltersScore_withFunctionScore() throws Exception {\n assertThat(searchResponse.getHits().getAt(3).id(), anyOf(equalTo(\"1\"), equalTo(\"3\")));\n assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));\n \n- searchResponse = client().prepareSearch(\"test\")\n- .setQuery(functionScoreQuery(matchAllQuery()).scoreMode(\"total\").add(termFilter(\"field\", \"value4\"), new FactorBuilder().boostFactor(2)).add(termFilter(\"field\", \"value1\"), new FactorBuilder().boostFactor(3)).add(termFilter(\"color\", \"red\"), new FactorBuilder().boostFactor(5)))\n- .setExplain(true)\n- .execute().actionGet();\n+ searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ functionScoreQuery(matchAllQuery()).scoreMode(\"sum\")\n+ .add(termFilter(\"field\", \"value4\"), factorFunction(2))\n+ .add(termFilter(\"field\", \"value1\"), factorFunction(3))\n+ .add(termFilter(\"color\", \"red\"), factorFunction(5))).setExplain(true).execute()\n+ .actionGet();\n \n assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));\n assertThat(searchResponse.getHits().totalHits(), equalTo(4l));\n assertThat(searchResponse.getHits().getAt(0).id(), equalTo(\"1\"));\n assertThat(searchResponse.getHits().getAt(0).score(), equalTo(8.0f));\n logger.info(\"--> Hit[0] {} Explanation {}\", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());\n \n- searchResponse = client().prepareSearch(\"test\")\n- .setQuery(functionScoreQuery(matchAllQuery()).scoreMode(\"max\").add(termFilter(\"field\", \"value4\"), new FactorBuilder().boostFactor(2)).add(termFilter(\"field\", \"value1\"), new FactorBuilder().boostFactor(3)).add(termFilter(\"color\", \"red\"), new FactorBuilder().boostFactor(5)))\n- .setExplain(true)\n- .execute().actionGet();\n+ searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ functionScoreQuery(matchAllQuery()).scoreMode(\"max\")\n+ .add(termFilter(\"field\", \"value4\"), factorFunction(2))\n+ .add(termFilter(\"field\", \"value1\"), factorFunction(3))\n+ .add(termFilter(\"color\", \"red\"), factorFunction(5))).setExplain(true).execute()\n+ .actionGet();\n \n assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));\n assertThat(searchResponse.getHits().totalHits(), equalTo(4l));\n- assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo(\"1\"), equalTo(\"3\"))); // could be both depending on the order of the docs internally (lucene order)\n+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo(\"1\"), equalTo(\"3\"))); // could\n+ // be\n+ // both\n+ // depending\n+ // on\n+ // the\n+ // order\n+ // of\n+ // the\n+ // docs\n+ // internally\n+ // (lucene\n+ // order)\n assertThat(searchResponse.getHits().getAt(0).score(), equalTo(5.0f));\n logger.info(\"--> Hit[0] {} Explanation {}\", searchResponse.getHits().getAt(0).id(), searchResponse.getHits().getAt(0).explanation());\n \n- searchResponse = client().prepareSearch(\"test\")\n- .setQuery(functionScoreQuery(matchAllQuery()).scoreMode(\"avg\").add(termFilter(\"field\", \"value4\"), new FactorBuilder().boostFactor(2)).add(termFilter(\"field\", \"value1\"), new FactorBuilder().boostFactor(3)).add(termFilter(\"color\", \"red\"), new FactorBuilder().boostFactor(5)))\n- .setExplain(true)\n- .execute().actionGet();\n+ searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ functionScoreQuery(matchAllQuery()).scoreMode(\"avg\")\n+ .add(termFilter(\"field\", \"value4\"), factorFunction(2))\n+ .add(termFilter(\"field\", \"value1\"), factorFunction(3))\n+ .add(termFilter(\"color\", \"red\"), factorFunction(5))).setExplain(true).execute()\n+ .actionGet();\n \n assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));\n assertThat(searchResponse.getHits().totalHits(), equalTo(4l));\n@@ -900,10 +971,14 @@ public void testCustomFiltersScore_withFunctionScore() throws Exception {\n assertThat(searchResponse.getHits().getAt(1).score(), equalTo(4.0f));\n logger.info(\"--> Hit[1] {} Explanation {}\", searchResponse.getHits().getAt(1).id(), searchResponse.getHits().getAt(1).explanation());\n \n- searchResponse = client().prepareSearch(\"test\")\n- .setQuery(functionScoreQuery(matchAllQuery()).scoreMode(\"min\").add(termFilter(\"field\", \"value4\"), new FactorBuilder().boostFactor(2)).add(termFilter(\"field\", \"value1\"), new FactorBuilder().boostFactor(3)).add(termFilter(\"color\", \"red\"), new FactorBuilder().boostFactor(5)))\n- .setExplain(true)\n- .execute().actionGet();\n+ searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ functionScoreQuery(matchAllQuery()).scoreMode(\"min\")\n+ .add(termFilter(\"field\", \"value4\"), factorFunction(2))\n+ .add(termFilter(\"field\", \"value1\"), factorFunction(3))\n+ .add(termFilter(\"color\", \"red\"), factorFunction(5))).setExplain(true).execute()\n+ .actionGet();\n \n assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));\n assertThat(searchResponse.getHits().totalHits(), equalTo(4l));\n@@ -917,10 +992,14 @@ public void testCustomFiltersScore_withFunctionScore() throws Exception {\n assertThat(searchResponse.getHits().getAt(3).id(), equalTo(\"2\"));\n assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));\n \n- searchResponse = client().prepareSearch(\"test\")\n- .setQuery(functionScoreQuery(matchAllQuery()).scoreMode(\"multiply\").add(termFilter(\"field\", \"value4\"), new FactorBuilder().boostFactor(2)).add(termFilter(\"field\", \"value1\"), new FactorBuilder().boostFactor(3)).add(termFilter(\"color\", \"red\"), new FactorBuilder().boostFactor(5)))\n- .setExplain(true)\n- .execute().actionGet();\n+ searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ functionScoreQuery(matchAllQuery()).scoreMode(\"multiply\")\n+ .add(termFilter(\"field\", \"value4\"), factorFunction(2))\n+ .add(termFilter(\"field\", \"value1\"), factorFunction(3))\n+ .add(termFilter(\"color\", \"red\"), factorFunction(5))).setExplain(true).execute()\n+ .actionGet();\n \n assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));\n assertThat(searchResponse.getHits().totalHits(), equalTo(4l));\n@@ -934,10 +1013,14 @@ public void testCustomFiltersScore_withFunctionScore() throws Exception {\n assertThat(searchResponse.getHits().getAt(3).id(), equalTo(\"2\"));\n assertThat(searchResponse.getHits().getAt(3).score(), equalTo(1.0f));\n \n- searchResponse = client().prepareSearch(\"test\")\n- .setQuery(functionScoreQuery(termsQuery(\"field\", \"value1\", \"value2\", \"value3\", \"value4\")).scoreMode(\"first\").add(termFilter(\"field\", \"value4\"), new FactorBuilder().boostFactor(2)).add(termFilter(\"field\", \"value3\"), new FactorBuilder().boostFactor(3)).add(termFilter(\"field\", \"value2\"), new FactorBuilder().boostFactor(4)))\n- .setExplain(true)\n- .execute().actionGet();\n+ searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ functionScoreQuery(termsQuery(\"field\", \"value1\", \"value2\", \"value3\", \"value4\")).scoreMode(\"first\")\n+ .add(termFilter(\"field\", \"value4\"), factorFunction(2))\n+ .add(termFilter(\"field\", \"value3\"), factorFunction(3))\n+ .add(termFilter(\"field\", \"value2\"), factorFunction(4))).setExplain(true).execute()\n+ .actionGet();\n \n assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));\n assertThat(searchResponse.getHits().totalHits(), equalTo(4l));\n@@ -951,11 +1034,14 @@ public void testCustomFiltersScore_withFunctionScore() throws Exception {\n assertThat(searchResponse.getHits().getAt(3).id(), equalTo(\"1\"));\n assertThat(searchResponse.getHits().getAt(3).score(), equalTo(searchResponse.getHits().getAt(3).explanation().getValue()));\n \n-\n- searchResponse = client().prepareSearch(\"test\")\n- .setQuery(functionScoreQuery(termsQuery(\"field\", \"value1\", \"value2\", \"value3\", \"value4\")).scoreMode(\"multiply\").add(termFilter(\"field\", \"value4\"), new FactorBuilder().boostFactor(2)).add(termFilter(\"field\", \"value1\"), new FactorBuilder().boostFactor(3)).add(termFilter(\"color\", \"red\"), new FactorBuilder().boostFactor(5)))\n- .setExplain(true)\n- .execute().actionGet();\n+ searchResponse = client()\n+ .prepareSearch(\"test\")\n+ .setQuery(\n+ functionScoreQuery(termsQuery(\"field\", \"value1\", \"value2\", \"value3\", \"value4\")).scoreMode(\"multiply\")\n+ .add(termFilter(\"field\", \"value4\"), factorFunction(2))\n+ .add(termFilter(\"field\", \"value1\"), factorFunction(3))\n+ .add(termFilter(\"color\", \"red\"), factorFunction(5))).setExplain(true).execute()\n+ .actionGet();\n \n assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));\n assertThat(searchResponse.getHits().totalHits(), equalTo(4l));", "filename": "src/test/java/org/elasticsearch/test/integration/search/customscore/CustomScoreSearchTests.java", "status": "modified" }, { "diff": "@@ -25,11 +25,11 @@\n import org.elasticsearch.action.search.SearchPhaseExecutionException;\n import org.elasticsearch.action.search.SearchResponse;\n import org.elasticsearch.action.search.SearchType;\n+import org.elasticsearch.common.geo.GeoPoint;\n+import org.elasticsearch.common.lucene.search.function.CombineFunction;\n import org.elasticsearch.index.query.MatchAllFilterBuilder;\n import org.elasticsearch.index.query.functionscore.DecayFunctionBuilder;\n-import org.elasticsearch.index.query.functionscore.exp.ExponentialDecayFunctionBuilder;\n import org.elasticsearch.index.query.functionscore.gauss.GaussDecayFunctionBuilder;\n-import org.elasticsearch.index.query.functionscore.lin.LinearDecayFunctionBuilder;\n import org.elasticsearch.search.SearchHits;\n import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;\n import org.elasticsearch.test.integration.AbstractSharedClusterTest;\n@@ -43,9 +43,9 @@\n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;\n import static org.elasticsearch.index.query.QueryBuilders.termQuery;\n+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.*;\n import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;\n-import static org.hamcrest.Matchers.equalTo;\n-import static org.hamcrest.Matchers.lessThan;\n+import static org.hamcrest.Matchers.*;\n \n public class DecayFunctionScoreTests extends AbstractSharedClusterTest {\n \n@@ -90,7 +90,6 @@ public void testDistanceScoreGeoLinGaussExp() throws Exception {\n List<Float> lonlat = new ArrayList<Float>();\n lonlat.add(new Float(20));\n lonlat.add(new Float(11));\n- DecayFunctionBuilder fb = new GaussDecayFunctionBuilder(\"loc\", lonlat, \"1000km\");\n \n ActionFuture<SearchResponse> response = client().search(\n searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n@@ -101,15 +100,15 @@ public void testDistanceScoreGeoLinGaussExp() throws Exception {\n \n response = client().search(\n searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n- searchSource().explain(true).query(functionScoreQuery(termQuery(\"test\", \"value\")).add(fb))));\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), gaussDecayFunction(\"loc\", lonlat, \"1000km\")))));\n sr = response.actionGet();\n sh = sr.getHits();\n assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));\n \n assertThat(sh.getAt(0).getId(), equalTo(\"1\"));\n assertThat(sh.getAt(1).getId(), equalTo(\"2\"));\n // Test Exp\n- fb = new ExponentialDecayFunctionBuilder(\"loc\", lonlat, \"1000km\");\n \n response = client().search(\n searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n@@ -120,15 +119,15 @@ public void testDistanceScoreGeoLinGaussExp() throws Exception {\n \n response = client().search(\n searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n- searchSource().explain(true).query(functionScoreQuery(termQuery(\"test\", \"value\")).add(fb))));\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), linearDecayFunction(\"loc\", lonlat, \"1000km\")))));\n sr = response.actionGet();\n sh = sr.getHits();\n assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));\n \n assertThat(sh.getAt(0).getId(), equalTo(\"1\"));\n assertThat(sh.getAt(1).getId(), equalTo(\"2\"));\n // Test Lin\n- fb = new LinearDecayFunctionBuilder(\"loc\", lonlat, \"1000km\");\n \n response = client().search(\n searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n@@ -139,7 +138,8 @@ public void testDistanceScoreGeoLinGaussExp() throws Exception {\n \n response = client().search(\n searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n- searchSource().explain(true).query(functionScoreQuery(termQuery(\"test\", \"value\")).add(fb))));\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), exponentialDecayFunction(\"loc\", lonlat, \"1000km\")))));\n sr = response.actionGet();\n sh = sr.getHits();\n assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));\n@@ -148,6 +148,277 @@ public void testDistanceScoreGeoLinGaussExp() throws Exception {\n assertThat(sh.getAt(1).getId(), equalTo(\"2\"));\n }\n \n+ @Test\n+ public void testDistanceScoreGeoLinGaussExpWithOffset() throws Exception {\n+\n+ createIndexMapped(\"test\", \"type1\", \"test\", \"string\", \"num\", \"double\");\n+ ensureYellow();\n+\n+ // add tw docs within offset\n+ List<IndexRequestBuilder> indexBuilders = new ArrayList<IndexRequestBuilder>();\n+ indexBuilders.add(new IndexRequestBuilder(client()).setType(\"type1\").setId(\"1\").setIndex(\"test\")\n+ .setSource(jsonBuilder().startObject().field(\"test\", \"value\").field(\"num\", 0.5).endObject()));\n+ indexBuilders.add(new IndexRequestBuilder(client()).setType(\"type1\").setId(\"2\").setIndex(\"test\")\n+ .setSource(jsonBuilder().startObject().field(\"test\", \"value\").field(\"num\", 1.7).endObject()));\n+\n+ // add docs outside offset\n+ int numDummyDocs = 20;\n+ for (int i = 0; i < numDummyDocs; i++) {\n+ indexBuilders.add(new IndexRequestBuilder(client()).setType(\"type1\").setId(Integer.toString(i + 3)).setIndex(\"test\")\n+ .setSource(jsonBuilder().startObject().field(\"test\", \"value\").field(\"num\", 3.0 + i).endObject()));\n+ }\n+ IndexRequestBuilder[] builders = indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()]);\n+\n+ indexRandom(\"test\", false, builders);\n+ refresh();\n+\n+ // Test Gauss\n+\n+ ActionFuture<SearchResponse> response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource()\n+ .explain(true)\n+ .size(numDummyDocs + 2)\n+ .query(functionScoreQuery(termQuery(\"test\", \"value\"), gaussDecayFunction(\"num\", 1.0, 5.0).setOffset(1.0))\n+ .boostMode(CombineFunction.REPLACE.getName()))));\n+ SearchResponse sr = response.actionGet();\n+ SearchHits sh = sr.getHits();\n+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));\n+ assertThat(sh.getAt(0).getId(), anyOf(equalTo(\"1\"), equalTo(\"2\")));\n+ assertThat(sh.getAt(1).getId(), anyOf(equalTo(\"1\"), equalTo(\"2\")));\n+ assertThat(sh.getAt(1).score(), equalTo(sh.getAt(0).score()));\n+ for (int i = 0; i < numDummyDocs; i++) {\n+ assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3)));\n+ }\n+\n+ // Test Exp\n+\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource()\n+ .explain(true)\n+ .size(numDummyDocs + 2)\n+ .query(functionScoreQuery(termQuery(\"test\", \"value\"),\n+ exponentialDecayFunction(\"num\", 1.0, 5.0).setOffset(1.0)).boostMode(\n+ CombineFunction.REPLACE.getName()))));\n+ sr = response.actionGet();\n+ sh = sr.getHits();\n+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));\n+ assertThat(sh.getAt(0).getId(), anyOf(equalTo(\"1\"), equalTo(\"2\")));\n+ assertThat(sh.getAt(1).getId(), anyOf(equalTo(\"1\"), equalTo(\"2\")));\n+ assertThat(sh.getAt(1).score(), equalTo(sh.getAt(0).score()));\n+ for (int i = 0; i < numDummyDocs; i++) {\n+ assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3)));\n+ }\n+ // Test Lin\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource()\n+ .explain(true)\n+ .size(numDummyDocs + 2)\n+ .query(functionScoreQuery(termQuery(\"test\", \"value\"), linearDecayFunction(\"num\", 1.0, 20.0).setOffset(1.0))\n+ .boostMode(CombineFunction.REPLACE.getName()))));\n+ sr = response.actionGet();\n+ sh = sr.getHits();\n+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));\n+ assertThat(sh.getAt(0).getId(), anyOf(equalTo(\"1\"), equalTo(\"2\")));\n+ assertThat(sh.getAt(1).getId(), anyOf(equalTo(\"1\"), equalTo(\"2\")));\n+ assertThat(sh.getAt(1).score(), equalTo(sh.getAt(0).score()));\n+ for (int i = 0; i < numDummyDocs; i++) {\n+ System.out.println(\"id \" + sh.getAt(i + 2).getId() + \" got score \" + sh.getAt(i + 2).score());\n+ }\n+ }\n+\n+ @Test\n+ public void testBoostModeSettingWorks() throws Exception {\n+\n+ createIndexMapped(\"test\", \"type1\", \"test\", \"string\", \"loc\", \"geo_point\");\n+ ensureYellow();\n+\n+ List<IndexRequestBuilder> indexBuilders = new ArrayList<IndexRequestBuilder>();\n+ indexBuilders.add(new IndexRequestBuilder(client())\n+ .setType(\"type1\")\n+ .setId(\"1\")\n+ .setIndex(\"test\")\n+ .setSource(\n+ jsonBuilder().startObject().field(\"test\", \"value\").startObject(\"loc\").field(\"lat\", 11).field(\"lon\", 21).endObject()\n+ .endObject()));\n+ indexBuilders.add(new IndexRequestBuilder(client())\n+ .setType(\"type1\")\n+ .setId(\"2\")\n+ .setIndex(\"test\")\n+ .setSource(\n+ jsonBuilder().startObject().field(\"test\", \"value value\").startObject(\"loc\").field(\"lat\", 11).field(\"lon\", 20)\n+ .endObject().endObject()));\n+ IndexRequestBuilder[] builders = indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()]);\n+\n+ indexRandom(\"test\", false, builders);\n+ refresh();\n+\n+ // Test Gauss\n+ List<Float> lonlat = new ArrayList<Float>();\n+ lonlat.add(new Float(20));\n+ lonlat.add(new Float(11));\n+\n+ ActionFuture<SearchResponse> response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), gaussDecayFunction(\"loc\", lonlat, \"1000km\")).boostMode(\n+ CombineFunction.MULT.getName()))));\n+ SearchResponse sr = response.actionGet();\n+ SearchHits sh = sr.getHits();\n+ assertThat(sh.getTotalHits(), equalTo((long) (2)));\n+ assertThat(sh.getAt(0).getId(), equalTo(\"1\"));\n+ assertThat(sh.getAt(1).getId(), equalTo(\"2\"));\n+\n+ // Test Exp\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), gaussDecayFunction(\"loc\", lonlat, \"1000km\")).boostMode(\n+ CombineFunction.REPLACE.getName()))));\n+ sr = response.actionGet();\n+ sh = sr.getHits();\n+ assertThat(sh.getTotalHits(), equalTo((long) (2)));\n+ assertThat(sh.getAt(0).getId(), equalTo(\"2\"));\n+ assertThat(sh.getAt(1).getId(), equalTo(\"1\"));\n+\n+ }\n+\n+ @Test\n+ public void testParseGeoPoint() throws Exception {\n+\n+ createIndexMapped(\"test\", \"type1\", \"test\", \"string\", \"loc\", \"geo_point\");\n+ ensureYellow();\n+\n+ List<IndexRequestBuilder> indexBuilders = new ArrayList<IndexRequestBuilder>();\n+ indexBuilders.add(new IndexRequestBuilder(client())\n+ .setType(\"type1\")\n+ .setId(\"1\")\n+ .setIndex(\"test\")\n+ .setSource(\n+ jsonBuilder().startObject().field(\"test\", \"value\").startObject(\"loc\").field(\"lat\", 20).field(\"lon\", 11).endObject()\n+ .endObject()));\n+ IndexRequestBuilder[] builders = indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()]);\n+\n+ indexRandom(\"test\", false, builders);\n+ refresh();\n+ GeoPoint point = new GeoPoint(20, 11);\n+ ActionFuture<SearchResponse> response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), gaussDecayFunction(\"loc\", point, \"1000km\")).boostMode(\n+ CombineFunction.MULT.getName()))));\n+ SearchResponse sr = response.actionGet();\n+ SearchHits sh = sr.getHits();\n+ assertThat(sh.getTotalHits(), equalTo((long) (1)));\n+ assertThat(sh.getAt(0).getId(), equalTo(\"1\"));\n+ assertThat((double) sh.getAt(0).score(), closeTo(0.30685282, 1.e-5));\n+ float[] coords = { 11, 20 };\n+\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), gaussDecayFunction(\"loc\", coords, \"1000km\")).boostMode(\n+ CombineFunction.MULT.getName()))));\n+ sr = response.actionGet();\n+ sh = sr.getHits();\n+ assertThat(sh.getTotalHits(), equalTo((long) (1)));\n+ assertThat(sh.getAt(0).getId(), equalTo(\"1\"));\n+ assertThat((double) sh.getAt(0).score(), closeTo(0.30685282, 1.e-5));\n+ System.out.println();\n+ }\n+\n+ @Test\n+ public void testCombineModes() throws Exception {\n+\n+ createIndexMapped(\"test\", \"type1\", \"test\", \"string\", \"num\", \"double\");\n+ ensureYellow();\n+\n+ List<IndexRequestBuilder> indexBuilders = new ArrayList<IndexRequestBuilder>();\n+ indexBuilders.add(new IndexRequestBuilder(client()).setType(\"type1\").setId(\"1\").setIndex(\"test\")\n+ .setSource(jsonBuilder().startObject().field(\"test\", \"value\").field(\"num\", 1.0).endObject()));\n+ IndexRequestBuilder[] builders = indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()]);\n+\n+ indexRandom(\"test\", false, builders);\n+ refresh();\n+\n+ // function score should return 0.5 for this function\n+\n+ ActionFuture<SearchResponse> response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), gaussDecayFunction(\"num\", 0.0, 1.0).setDecay(0.5)).boost(\n+ 2.0f).boostMode(CombineFunction.MULT.getName()))));\n+ SearchResponse sr = response.actionGet();\n+ SearchHits sh = sr.getHits();\n+ assertThat(sh.getTotalHits(), equalTo((long) (1)));\n+ assertThat(sh.getAt(0).getId(), equalTo(\"1\"));\n+ assertThat((double) sh.getAt(0).score(), closeTo(0.30685282, 1.e-5));\n+ logger.info(\"--> Hit[0] {} Explanation:\\n {}\", sr.getHits().getAt(0).id(), sr.getHits().getAt(0).explanation());\n+\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), gaussDecayFunction(\"num\", 0.0, 1.0).setDecay(0.5)).boost(\n+ 2.0f).boostMode(CombineFunction.REPLACE.getName()))));\n+ sr = response.actionGet();\n+ sh = sr.getHits();\n+ assertThat(sh.getTotalHits(), equalTo((long) (1)));\n+ assertThat(sh.getAt(0).getId(), equalTo(\"1\"));\n+ assertThat((double) sh.getAt(0).score(), closeTo(1.0, 1.e-5));\n+ logger.info(\"--> Hit[0] {} Explanation:\\n {}\", sr.getHits().getAt(0).id(), sr.getHits().getAt(0).explanation());\n+\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), gaussDecayFunction(\"num\", 0.0, 1.0).setDecay(0.5)).boost(\n+ 2.0f).boostMode(CombineFunction.SUM.getName()))));\n+ sr = response.actionGet();\n+ sh = sr.getHits();\n+ assertThat(sh.getTotalHits(), equalTo((long) (1)));\n+ assertThat(sh.getAt(0).getId(), equalTo(\"1\"));\n+ assertThat((double) sh.getAt(0).score(), closeTo(2.0 * (0.30685282 + 0.5), 1.e-5));\n+ logger.info(\"--> Hit[0] {} Explanation:\\n {}\", sr.getHits().getAt(0).id(), sr.getHits().getAt(0).explanation());\n+\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), gaussDecayFunction(\"num\", 0.0, 1.0).setDecay(0.5)).boost(\n+ 2.0f).boostMode(CombineFunction.AVG.getName()))));\n+ sr = response.actionGet();\n+ sh = sr.getHits();\n+ assertThat(sh.getTotalHits(), equalTo((long) (1)));\n+ assertThat(sh.getAt(0).getId(), equalTo(\"1\"));\n+ assertThat((double) sh.getAt(0).score(), closeTo((0.30685282 + 0.5), 1.e-5));\n+ logger.info(\"--> Hit[0] {} Explanation:\\n {}\", sr.getHits().getAt(0).id(), sr.getHits().getAt(0).explanation());\n+\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), gaussDecayFunction(\"num\", 0.0, 1.0).setDecay(0.5)).boost(\n+ 2.0f).boostMode(CombineFunction.MIN.getName()))));\n+ sr = response.actionGet();\n+ sh = sr.getHits();\n+ assertThat(sh.getTotalHits(), equalTo((long) (1)));\n+ assertThat(sh.getAt(0).getId(), equalTo(\"1\"));\n+ assertThat((double) sh.getAt(0).score(), closeTo(2.0 * (0.30685282), 1.e-5));\n+ logger.info(\"--> Hit[0] {} Explanation:\\n {}\", sr.getHits().getAt(0).id(), sr.getHits().getAt(0).explanation());\n+\n+ response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), gaussDecayFunction(\"num\", 0.0, 1.0).setDecay(0.5)).boost(\n+ 2.0f).boostMode(CombineFunction.MAX.getName()))));\n+ sr = response.actionGet();\n+ sh = sr.getHits();\n+ assertThat(sh.getTotalHits(), equalTo((long) (1)));\n+ assertThat(sh.getAt(0).getId(), equalTo(\"1\"));\n+ assertThat((double) sh.getAt(0).score(), closeTo(1.0, 1.e-5));\n+ logger.info(\"--> Hit[0] {} Explanation:\\n {}\", sr.getHits().getAt(0).id(), sr.getHits().getAt(0).explanation());\n+\n+ }\n+\n @Test(expected = SearchPhaseExecutionException.class)\n public void testExceptionThrownIfScaleLE0() throws Exception {\n \n@@ -161,11 +432,10 @@ public void testExceptionThrownIfScaleLE0() throws Exception {\n .source(jsonBuilder().startObject().field(\"test\", \"value\").field(\"num1\", \"2013-05-28\").endObject())).actionGet();\n refresh();\n \n- DecayFunctionBuilder gfb = new GaussDecayFunctionBuilder(\"num1\", \"2013-05-28\", \"-1d\");\n-\n ActionFuture<SearchResponse> response = client().search(\n searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n- searchSource().explain(true).query(functionScoreQuery(termQuery(\"test\", \"value\")).add(gfb))));\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"), gaussDecayFunction(\"num1\", \"2013-05-28\", \"-1d\")))));\n \n SearchResponse sr = response.actionGet();\n ElasticsearchAssertions.assertNoFailures(sr);\n@@ -179,7 +449,7 @@ public void testExceptionThrownIfScaleLE0() throws Exception {\n @Test(expected = ElasticSearchIllegalStateException.class)\n public void testExceptionThrownIfScaleRefNotBetween0And1() throws Exception {\n \n- DecayFunctionBuilder gfb = new GaussDecayFunctionBuilder(\"num1\", \"2013-05-28\", \"1d\").setScaleWeight(100);\n+ DecayFunctionBuilder gfb = new GaussDecayFunctionBuilder(\"num1\", \"2013-05-28\", \"1d\").setDecay(100);\n \n }\n \n@@ -209,14 +479,12 @@ public void testValueMissingLin() throws Exception {\n \n refresh();\n \n- DecayFunctionBuilder gfb1 = new LinearDecayFunctionBuilder(\"num1\", \"2013-05-28\", \"+3d\");\n- DecayFunctionBuilder gfb2 = new LinearDecayFunctionBuilder(\"num2\", \"0.0\", \"1\");\n-\n ActionFuture<SearchResponse> response = client().search(\n searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n- searchSource().explain(true).query(\n- functionScoreQuery(termQuery(\"test\", \"value\")).add(new MatchAllFilterBuilder(), gfb1)\n- .add(new MatchAllFilterBuilder(), gfb2).scoreMode(\"multiply\"))));\n+ searchSource().explain(false).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\"))\n+ .add(new MatchAllFilterBuilder(), linearDecayFunction(\"num1\", \"2013-05-28\", \"+3d\"))\n+ .add(new MatchAllFilterBuilder(), linearDecayFunction(\"num2\", \"0.0\", \"1\")).scoreMode(\"multiply\"))));\n \n SearchResponse sr = response.actionGet();\n ElasticsearchAssertions.assertNoFailures(sr);\n@@ -258,22 +526,22 @@ public void testManyDocsLin() throws Exception {\n \n indexRandom(\"test\", false, builders);\n refresh();\n- \n+\n List<Float> lonlat = new ArrayList<Float>();\n lonlat.add(new Float(100));\n lonlat.add(new Float(110));\n- DecayFunctionBuilder gfb1 = new LinearDecayFunctionBuilder(\"date\", \"2013-05-30\", \"+15d\");\n- DecayFunctionBuilder gfb2 = new LinearDecayFunctionBuilder(\"geo\", lonlat, \"1000km\");\n- DecayFunctionBuilder gfb3 = new LinearDecayFunctionBuilder(\"num\", Integer.toString(numDocs), Integer.toString(numDocs / 2));\n \n ActionFuture<SearchResponse> response = client().search(\n searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n searchSource()\n .explain(true)\n .size(numDocs)\n- .query(functionScoreQuery(termQuery(\"test\", \"value\")).add(new MatchAllFilterBuilder(), gfb1)\n- .add(new MatchAllFilterBuilder(), gfb2).add(new MatchAllFilterBuilder(), gfb3)\n- .scoreMode(\"multiply\"))));\n+ .query(functionScoreQuery(termQuery(\"test\", \"value\"))\n+ .add(new MatchAllFilterBuilder(), linearDecayFunction(\"date\", \"2013-05-30\", \"+15d\"))\n+ .add(new MatchAllFilterBuilder(), linearDecayFunction(\"geo\", lonlat, \"1000km\"))\n+ .add(new MatchAllFilterBuilder(),\n+ linearDecayFunction(\"num\", Integer.toString(numDocs), Integer.toString(numDocs / 2)))\n+ .scoreMode(\"multiply\").boostMode(CombineFunction.REPLACE.getName()))));\n \n SearchResponse sr = response.actionGet();\n ElasticsearchAssertions.assertNoFailures(sr);\n@@ -285,6 +553,7 @@ public void testManyDocsLin() throws Exception {\n }\n for (int i = 0; i < numDocs - 1; i++) {\n assertThat(scores[i], lessThan(scores[i + 1]));\n+\n }\n \n }\n@@ -303,14 +572,13 @@ public void testParsingExceptionIfFieldDoesNotExist() throws Exception {\n List<Float> lonlat = new ArrayList<Float>();\n lonlat.add(new Float(100));\n lonlat.add(new Float(110));\n- DecayFunctionBuilder gfb2 = new LinearDecayFunctionBuilder(\"type1.geo\", lonlat, \"1000km\");\n ActionFuture<SearchResponse> response = client().search(\n searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n searchSource()\n .explain(true)\n .size(numDocs)\n- .query(functionScoreQuery(termQuery(\"test\", \"value\")).add(new MatchAllFilterBuilder(), gfb2).scoreMode(\n- \"multiply\"))));\n+ .query(functionScoreQuery(termQuery(\"test\", \"value\")).add(new MatchAllFilterBuilder(),\n+ linearDecayFunction(\"type1.geo\", lonlat, \"1000km\")).scoreMode(\"multiply\"))));\n SearchResponse sr = response.actionGet();\n \n }\n@@ -323,13 +591,12 @@ public void testParsingExceptionIfFieldTypeDoesNotMatch() throws Exception {\n indexRequest(\"test\").type(\"type\").source(\n jsonBuilder().startObject().field(\"test\", \"value\").field(\"num\", Integer.toString(1)).endObject())).actionGet();\n refresh();\n- DecayFunctionBuilder lfb = new LinearDecayFunctionBuilder(\"num\", Integer.toString(1), Integer.toString(1 / 2));\n // so, we indexed a string field, but now we try to score a num field\n- ActionFuture<SearchResponse> response = client()\n- .search(searchRequest()\n- .searchType(SearchType.QUERY_THEN_FETCH)\n- .source(searchSource().explain(true).query(\n- functionScoreQuery(termQuery(\"test\", \"value\")).add(new MatchAllFilterBuilder(), lfb).scoreMode(\"multiply\"))));\n+ ActionFuture<SearchResponse> response = client().search(\n+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(\n+ searchSource().explain(true).query(\n+ functionScoreQuery(termQuery(\"test\", \"value\")).add(new MatchAllFilterBuilder(),\n+ linearDecayFunction(\"num\", Integer.toString(1), Integer.toString(1 / 2))).scoreMode(\"multiply\"))));\n response.actionGet();\n }\n ", "filename": "src/test/java/org/elasticsearch/test/integration/search/functionscore/DecayFunctionScoreTests.java", "status": "modified" }, { "diff": "@@ -118,18 +118,18 @@ public void onModule(FunctionScoreModule scoreModule) {\n \n public static class CustomDistanceScoreParser extends DecayFunctionParser {\n \n- public static final String[] NAMES = {\"linear_mult\", \"linearMult\"};\n+ public static final String[] NAMES = { \"linear_mult\", \"linearMult\" };\n \n @Override\n public String[] getNames() {\n return NAMES;\n }\n \n- static final DecayFunction distanceFunction = new LinearMultScoreFunction();\n+ static final DecayFunction decayFunction = new LinearMultScoreFunction();\n \n @Override\n public DecayFunction getDecayFunction() {\n- return distanceFunction;\n+ return decayFunction;\n }\n \n static class LinearMultScoreFunction implements DecayFunction {\n@@ -138,7 +138,8 @@ static class LinearMultScoreFunction implements DecayFunction {\n \n @Override\n public double evaluate(double value, double scale) {\n- return Math.abs(value);\n+ \n+ return value;\n }\n \n @Override\n@@ -155,12 +156,10 @@ public double processScale(double userGivenScale, double userGivenValue) {\n }\n }\n \n-\n public class CustomDistanceScoreBuilder extends DecayFunctionBuilder {\n \n-\n- public CustomDistanceScoreBuilder(String fieldName, Object reference, Object scale) {\n- super(fieldName, reference, scale);\n+ public CustomDistanceScoreBuilder(String fieldName, Object origin, Object scale) {\n+ super(fieldName, origin, scale);\n }\n \n @Override", "filename": "src/test/java/org/elasticsearch/test/integration/search/functionscore/FunctionScorePluginTests.java", "status": "modified" }, { "diff": "@@ -21,7 +21,6 @@\n \n import org.elasticsearch.action.search.SearchResponse;\n import org.elasticsearch.common.settings.ImmutableSettings;\n-import org.elasticsearch.index.query.functionscore.random.RandomScoreFunctionBuilder;\n import org.elasticsearch.search.SearchHits;\n import org.elasticsearch.test.integration.AbstractSharedClusterTest;\n import org.hamcrest.CoreMatchers;\n@@ -33,6 +32,7 @@\n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;\n import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;\n+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.randomFunction;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;\n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.nullValue;\n@@ -63,7 +63,7 @@ public void consistentHitsWithSameSeed() throws Exception {\n for (int i = 0; i < innerIters; i++) {\n SearchResponse searchResponse = client().prepareSearch()\n .setPreference(preference)\n- .setQuery(functionScoreQuery(matchAllQuery()).add(new RandomScoreFunctionBuilder().seed(seed)))\n+ .setQuery(functionScoreQuery(matchAllQuery(), randomFunction(seed)))\n .execute().actionGet();\n assertThat(\"Failures \" + Arrays.toString(searchResponse.getShardFailures()), searchResponse.getShardFailures().length, CoreMatchers.equalTo(0));\n int hitCount = searchResponse.getHits().getHits().length;\n@@ -99,7 +99,7 @@ public void distribution() throws Exception {\n for (int i = 0; i < count; i++) {\n \n SearchResponse searchResponse = client().prepareSearch()\n- .setQuery(functionScoreQuery(matchAllQuery()).add(new RandomScoreFunctionBuilder().seed(System.nanoTime())))\n+ .setQuery(functionScoreQuery(matchAllQuery(), randomFunction(System.nanoTime())))\n .execute().actionGet();\n \n matrix[Integer.valueOf(searchResponse.getHits().getAt(0).id())]++;", "filename": "src/test/java/org/elasticsearch/test/integration/search/functionscore/RandomScoreFunctionTests.java", "status": "modified" }, { "diff": "@@ -24,13 +24,13 @@\n import org.apache.lucene.util.English;\n import org.elasticsearch.action.search.SearchResponse;\n import org.elasticsearch.action.search.SearchType;\n+import org.elasticsearch.common.lucene.search.function.CombineFunction;\n import org.elasticsearch.common.settings.ImmutableSettings;\n import org.elasticsearch.common.settings.ImmutableSettings.Builder;\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.index.query.MatchQueryBuilder;\n import org.elasticsearch.index.query.QueryBuilders;\n-import org.elasticsearch.index.query.functionscore.script.ScriptScoreFunctionBuilder;\n import org.elasticsearch.search.SearchHit;\n import org.elasticsearch.search.SearchHits;\n import org.elasticsearch.search.rescore.RescoreBuilder;\n@@ -39,6 +39,7 @@\n import org.junit.Test;\n \n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;\n import static org.hamcrest.Matchers.equalTo;\n import static org.hamcrest.Matchers.notNullValue;\n@@ -481,9 +482,9 @@ public void testScoring_withFunctionScore() throws Exception {\n .queryRescorer(\n QueryBuilders.boolQuery()\n .disableCoord(true)\n- .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery(\"field1\", intToEnglish[0])).add(new ScriptScoreFunctionBuilder().script(\"5.0f\")))\n- .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery(\"field1\", intToEnglish[1])).add(new ScriptScoreFunctionBuilder().script(\"7.0f\")))\n- .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery(\"field1\", intToEnglish[3])).add(new ScriptScoreFunctionBuilder().script(\"0.0f\"))))\n+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery(\"field1\", intToEnglish[0]), scriptFunction(\"5.0f\")).boostMode(CombineFunction.REPLACE.getName()))\n+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery(\"field1\", intToEnglish[1]), scriptFunction(\"7.0f\")).boostMode(CombineFunction.REPLACE.getName()))\n+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery(\"field1\", intToEnglish[3]), scriptFunction(\"0.0f\")).boostMode(CombineFunction.REPLACE.getName())))\n .setQueryWeight(primaryWeight)\n .setRescoreQueryWeight(secondaryWeight);\n \n@@ -496,10 +497,10 @@ public void testScoring_withFunctionScore() throws Exception {\n .setPreference(\"test\") // ensure we hit the same shards for tie-breaking\n .setQuery(QueryBuilders.boolQuery()\n .disableCoord(true)\n- .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery(\"field1\", intToEnglish[0])).add(new ScriptScoreFunctionBuilder().script(\"2.0f\")))\n- .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery(\"field1\", intToEnglish[1])).add(new ScriptScoreFunctionBuilder().script(\"3.0f\")))\n- .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery(\"field1\", intToEnglish[2])).add(new ScriptScoreFunctionBuilder().script(\"5.0f\")))\n- .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery(\"field1\", intToEnglish[3])).add(new ScriptScoreFunctionBuilder().script(\"0.2f\"))))\n+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery(\"field1\", intToEnglish[0]), scriptFunction(\"2.0f\")).boostMode(CombineFunction.REPLACE.getName()))\n+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery(\"field1\", intToEnglish[1]), scriptFunction(\"3.0f\")).boostMode(CombineFunction.REPLACE.getName()))\n+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery(\"field1\", intToEnglish[2]), scriptFunction(\"5.0f\")).boostMode(CombineFunction.REPLACE.getName()))\n+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery(\"field1\", intToEnglish[3]), scriptFunction(\"0.2f\")).boostMode(CombineFunction.REPLACE.getName())))\n .setFrom(0)\n .setSize(10)\n .setRescorer(rescoreQuery)", "filename": "src/test/java/org/elasticsearch/test/integration/search/rescore/QueryRescorerTests.java", "status": "modified" }, { "diff": "@@ -34,7 +34,6 @@\n import org.elasticsearch.common.xcontent.XContentBuilder;\n import org.elasticsearch.common.xcontent.XContentFactory;\n import org.elasticsearch.index.query.FilterBuilders;\n-import org.elasticsearch.index.query.functionscore.script.ScriptScoreFunctionBuilder;\n import org.elasticsearch.search.SearchHit;\n import org.elasticsearch.search.sort.ScriptSortBuilder;\n import org.elasticsearch.search.sort.SortBuilders;\n@@ -50,6 +49,7 @@\n \n import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;\n import static org.elasticsearch.index.query.QueryBuilders.*;\n+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;\n import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;\n import static org.hamcrest.Matchers.*;\n \n@@ -279,21 +279,21 @@ public void testScoreSortDirection_withFunctionScore() throws Exception {\n \n refresh();\n \n- SearchResponse searchResponse = client().prepareSearch(\"test\").setQuery(functionScoreQuery(matchAllQuery()).add(new ScriptScoreFunctionBuilder().script(\"_source.field\"))).execute().actionGet();\n+ SearchResponse searchResponse = client().prepareSearch(\"test\").setQuery(functionScoreQuery(matchAllQuery(), scriptFunction(\"_source.field\"))).execute().actionGet();\n assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(\"1\"));\n assertThat(searchResponse.getHits().getAt(1).score(), Matchers.lessThan(searchResponse.getHits().getAt(0).score()));\n assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(\"2\"));\n assertThat(searchResponse.getHits().getAt(2).score(), Matchers.lessThan(searchResponse.getHits().getAt(1).score()));\n assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(\"3\"));\n \n- searchResponse = client().prepareSearch(\"test\").setQuery(functionScoreQuery(matchAllQuery()).add(new ScriptScoreFunctionBuilder().script(\"_source.field\"))).addSort(\"_score\", SortOrder.DESC).execute().actionGet();\n+ searchResponse = client().prepareSearch(\"test\").setQuery(functionScoreQuery(matchAllQuery(), scriptFunction(\"_source.field\"))).addSort(\"_score\", SortOrder.DESC).execute().actionGet();\n assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(\"1\"));\n assertThat(searchResponse.getHits().getAt(1).score(), Matchers.lessThan(searchResponse.getHits().getAt(0).score()));\n assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(\"2\"));\n assertThat(searchResponse.getHits().getAt(2).score(), Matchers.lessThan(searchResponse.getHits().getAt(1).score()));\n assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(\"3\"));\n \n- searchResponse = client().prepareSearch(\"test\").setQuery(functionScoreQuery(matchAllQuery()).add(new ScriptScoreFunctionBuilder().script(\"_source.field\"))).addSort(\"_score\", SortOrder.DESC).execute().actionGet();\n+ searchResponse = client().prepareSearch(\"test\").setQuery(functionScoreQuery(matchAllQuery(), scriptFunction(\"_source.field\"))).addSort(\"_score\", SortOrder.DESC).execute().actionGet();\n assertThat(searchResponse.getHits().getAt(2).getId(), equalTo(\"3\"));\n assertThat(searchResponse.getHits().getAt(1).getId(), equalTo(\"2\"));\n assertThat(searchResponse.getHits().getAt(0).getId(), equalTo(\"1\"));", "filename": "src/test/java/org/elasticsearch/test/integration/search/sort/SimpleSortTests.java", "status": "modified" } ] }