Unnamed: 0
int64
0
6.45k
func
stringlengths
37
143k
target
class label
2 classes
project
stringlengths
33
157
191
public interface TransferQueue<E> extends BlockingQueue<E> { /** * Transfers the element to a waiting consumer immediately, if possible. * * <p>More precisely, transfers the specified element immediately * if there exists a consumer already waiting to receive it (in * {@link #take} or timed {@link #poll(long,TimeUnit) poll}), * otherwise returning {@code false} without enqueuing the element. * * @param e the element to transfer * @return {@code true} if the element was transferred, else * {@code false} * @throws ClassCastException if the class of the specified element * prevents it from being added to this queue * @throws NullPointerException if the specified element is null * @throws IllegalArgumentException if some property of the specified * element prevents it from being added to this queue */ boolean tryTransfer(E e); /** * Transfers the element to a consumer, waiting if necessary to do so. * * <p>More precisely, transfers the specified element immediately * if there exists a consumer already waiting to receive it (in * {@link #take} or timed {@link #poll(long,TimeUnit) poll}), * else waits until the element is received by a consumer. * * @param e the element to transfer * @throws InterruptedException if interrupted while waiting, * in which case the element is not left enqueued * @throws ClassCastException if the class of the specified element * prevents it from being added to this queue * @throws NullPointerException if the specified element is null * @throws IllegalArgumentException if some property of the specified * element prevents it from being added to this queue */ void transfer(E e) throws InterruptedException; /** * Transfers the element to a consumer if it is possible to do so * before the timeout elapses. * * <p>More precisely, transfers the specified element immediately * if there exists a consumer already waiting to receive it (in * {@link #take} or timed {@link #poll(long,TimeUnit) poll}), * else waits until the element is received by a consumer, * returning {@code false} if the specified wait time elapses * before the element can be transferred. * * @param e the element to transfer * @param timeout how long to wait before giving up, in units of * {@code unit} * @param unit a {@code TimeUnit} determining how to interpret the * {@code timeout} parameter * @return {@code true} if successful, or {@code false} if * the specified waiting time elapses before completion, * in which case the element is not left enqueued * @throws InterruptedException if interrupted while waiting, * in which case the element is not left enqueued * @throws ClassCastException if the class of the specified element * prevents it from being added to this queue * @throws NullPointerException if the specified element is null * @throws IllegalArgumentException if some property of the specified * element prevents it from being added to this queue */ boolean tryTransfer(E e, long timeout, TimeUnit unit) throws InterruptedException; /** * Returns {@code true} if there is at least one consumer waiting * to receive an element via {@link #take} or * timed {@link #poll(long,TimeUnit) poll}. * The return value represents a momentary state of affairs. * * @return {@code true} if there is at least one waiting consumer */ boolean hasWaitingConsumer(); /** * Returns an estimate of the number of consumers waiting to * receive elements via {@link #take} or timed * {@link #poll(long,TimeUnit) poll}. The return value is an * approximation of a momentary state of affairs, that may be * inaccurate if consumers have completed or given up waiting. * The value may be useful for monitoring and heuristics, but * not for synchronization control. Implementations of this * method are likely to be noticeably slower than those for * {@link #hasWaitingConsumer}. * * @return the number of consumers waiting to receive elements */ int getWaitingConsumerCount(); }
0true
src_main_java_jsr166y_TransferQueue.java
3,115
public class AddListenerRequest extends CallableClientRequest implements SecureRequest, RetryableRequest { private String name; private boolean includeValue; public AddListenerRequest() { } public AddListenerRequest(String name, boolean includeValue) { this.name = name; this.includeValue = includeValue; } @Override public String getServiceName() { return QueueService.SERVICE_NAME; } @Override public int getFactoryId() { return QueuePortableHook.F_ID; } @Override public int getClassId() { return QueuePortableHook.ADD_LISTENER; } @Override public void write(PortableWriter writer) throws IOException { writer.writeUTF("n", name); writer.writeBoolean("i", includeValue); } @Override public void read(PortableReader reader) throws IOException { name = reader.readUTF("n"); includeValue = reader.readBoolean("i"); } @Override public Object call() throws Exception { final ClientEndpoint endpoint = getEndpoint(); final ClientEngine clientEngine = getClientEngine(); final QueueService service = getService(); ItemListener listener = new ItemListener() { @Override public void itemAdded(ItemEvent item) { send(item); } @Override public void itemRemoved(ItemEvent item) { send(item); } private void send(ItemEvent event) { if (endpoint.live()) { Data item = clientEngine.toData(event.getItem()); PortableItemEvent portableItemEvent = new PortableItemEvent( item, event.getEventType(), event.getMember().getUuid()); endpoint.sendEvent(portableItemEvent, getCallId()); } } }; String registrationId = service.addItemListener(name, listener, includeValue); endpoint.setListenerRegistration(QueueService.SERVICE_NAME, name, registrationId); return registrationId; } @Override public Permission getRequiredPermission() { return new QueuePermission(name, ActionConstants.ACTION_LISTEN); } }
1no label
hazelcast_src_main_java_com_hazelcast_queue_client_AddListenerRequest.java
624
indexEngine.getEntriesMajor(iRangeFrom, isInclusive, null, new OIndexEngine.EntriesResultListener() { @Override public boolean addResult(ODocument entry) { return entriesResultListener.addResult(entry); } });
1no label
core_src_main_java_com_orientechnologies_orient_core_index_OIndexOneValue.java
314
public class TransportClusterHealthAction extends TransportMasterNodeReadOperationAction<ClusterHealthRequest, ClusterHealthResponse> { private final ClusterName clusterName; @Inject public TransportClusterHealthAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ClusterName clusterName) { super(settings, transportService, clusterService, threadPool); this.clusterName = clusterName; } @Override protected String executor() { // we block here... return ThreadPool.Names.GENERIC; } @Override protected String transportAction() { return ClusterHealthAction.NAME; } @Override protected ClusterHealthRequest newRequest() { return new ClusterHealthRequest(); } @Override protected ClusterHealthResponse newResponse() { return new ClusterHealthResponse(); } @Override protected void masterOperation(final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener<ClusterHealthResponse> listener) throws ElasticsearchException { long endTime = System.currentTimeMillis() + request.timeout().millis(); if (request.waitForEvents() != null) { final CountDownLatch latch = new CountDownLatch(1); clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", request.waitForEvents(), new ProcessedClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return currentState; } @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { latch.countDown(); } @Override public void onFailure(String source, Throwable t) { logger.error("unexpected failure during [{}]", t, source); } }); try { latch.await(request.timeout().millis(), TimeUnit.MILLISECONDS); } catch (InterruptedException e) { // ignore } } int waitFor = 5; if (request.waitForStatus() == null) { waitFor--; } if (request.waitForRelocatingShards() == -1) { waitFor--; } if (request.waitForActiveShards() == -1) { waitFor--; } if (request.waitForNodes().isEmpty()) { waitFor--; } if (request.indices().length == 0) { // check that they actually exists in the meta data waitFor--; } if (waitFor == 0) { // no need to wait for anything ClusterState clusterState = clusterService.state(); listener.onResponse(clusterHealth(request, clusterState)); return; } while (true) { int waitForCounter = 0; ClusterState clusterState = clusterService.state(); ClusterHealthResponse response = clusterHealth(request, clusterState); if (request.waitForStatus() != null && response.getStatus().value() <= request.waitForStatus().value()) { waitForCounter++; } if (request.waitForRelocatingShards() != -1 && response.getRelocatingShards() <= request.waitForRelocatingShards()) { waitForCounter++; } if (request.waitForActiveShards() != -1 && response.getActiveShards() >= request.waitForActiveShards()) { waitForCounter++; } if (request.indices().length > 0) { try { clusterState.metaData().concreteIndices(request.indices()); waitForCounter++; } catch (IndexMissingException e) { response.status = ClusterHealthStatus.RED; // no indices, make sure its RED // missing indices, wait a bit more... } } if (!request.waitForNodes().isEmpty()) { if (request.waitForNodes().startsWith(">=")) { int expected = Integer.parseInt(request.waitForNodes().substring(2)); if (response.getNumberOfNodes() >= expected) { waitForCounter++; } } else if (request.waitForNodes().startsWith("ge(")) { int expected = Integer.parseInt(request.waitForNodes().substring(3, request.waitForNodes().length() - 1)); if (response.getNumberOfNodes() >= expected) { waitForCounter++; } } else if (request.waitForNodes().startsWith("<=")) { int expected = Integer.parseInt(request.waitForNodes().substring(2)); if (response.getNumberOfNodes() <= expected) { waitForCounter++; } } else if (request.waitForNodes().startsWith("le(")) { int expected = Integer.parseInt(request.waitForNodes().substring(3, request.waitForNodes().length() - 1)); if (response.getNumberOfNodes() <= expected) { waitForCounter++; } } else if (request.waitForNodes().startsWith(">")) { int expected = Integer.parseInt(request.waitForNodes().substring(1)); if (response.getNumberOfNodes() > expected) { waitForCounter++; } } else if (request.waitForNodes().startsWith("gt(")) { int expected = Integer.parseInt(request.waitForNodes().substring(3, request.waitForNodes().length() - 1)); if (response.getNumberOfNodes() > expected) { waitForCounter++; } } else if (request.waitForNodes().startsWith("<")) { int expected = Integer.parseInt(request.waitForNodes().substring(1)); if (response.getNumberOfNodes() < expected) { waitForCounter++; } } else if (request.waitForNodes().startsWith("lt(")) { int expected = Integer.parseInt(request.waitForNodes().substring(3, request.waitForNodes().length() - 1)); if (response.getNumberOfNodes() < expected) { waitForCounter++; } } else { int expected = Integer.parseInt(request.waitForNodes()); if (response.getNumberOfNodes() == expected) { waitForCounter++; } } } if (waitForCounter == waitFor) { listener.onResponse(response); return; } if (System.currentTimeMillis() > endTime) { response.timedOut = true; listener.onResponse(response); return; } try { Thread.sleep(200); } catch (InterruptedException e) { response.timedOut = true; listener.onResponse(response); return; } } } private ClusterHealthResponse clusterHealth(ClusterHealthRequest request, ClusterState clusterState) { if (logger.isTraceEnabled()) { logger.trace("Calculating health based on state version [{}]", clusterState.version()); } String[] concreteIndices; try { concreteIndices = clusterState.metaData().concreteIndicesIgnoreMissing(request.indices()); } catch (IndexMissingException e) { // one of the specified indices is not there - treat it as RED. ClusterHealthResponse response = new ClusterHealthResponse(clusterName.value(), Strings.EMPTY_ARRAY, clusterState); response.status = ClusterHealthStatus.RED; return response; } return new ClusterHealthResponse(clusterName.value(), concreteIndices, clusterState); } }
0true
src_main_java_org_elasticsearch_action_admin_cluster_health_TransportClusterHealthAction.java
225
public class RuntimeEnvironmentPropertiesManagerTest extends BaseTest { @Resource(name = "blConfigurationManager") RuntimeEnvironmentPropertiesManager configurationManager; @Test public void testPropertyOnly() throws Exception { String s = configurationManager.getProperty("detect.sequence.generator.inconsistencies"); if(s.indexOf("$")>=0) { Assert.fail("RuntimeEnvironmentPropertiesManager bean not defined"); } } @Test(dependsOnMethods={"testPropertyOnly"}) public void testPrefix() throws Exception { configurationManager.setPrefix("detect"); String s = configurationManager.getProperty("sequence.generator.inconsistencies"); if(s.indexOf("$")>=0) { Assert.fail("RuntimeEnvironmentPropertiesManager bean not defined"); } } @Test(dependsOnMethods={"testPrefix"}) public void testSuffix() throws Exception { String s = configurationManager.getProperty("sequence.generator","inconsistencies"); if(s.indexOf("$")>=0) { Assert.fail("RuntimeEnvironmentPropertiesManager bean not defined"); } } @Test(dependsOnMethods={"testSuffix"}) public void testNullSuffix() throws Exception { configurationManager.setPrefix("detect"); String s = configurationManager.getProperty("sequence.generator.inconsistencies", "SOMETHING"); Assert.assertNotNull(s); } @Test public void testNULL() throws Exception { String s = configurationManager.getProperty(null, "SOMETHING"); Assert.assertEquals(s, null); } }
0true
integration_src_test_java_org_broadleafcommerce_common_config_RuntimeEnvironmentPropertiesManagerTest.java
6,274
public class MatchAssertion extends Assertion { private static final ESLogger logger = Loggers.getLogger(MatchAssertion.class); public MatchAssertion(String field, Object expectedValue) { super(field, expectedValue); } @Override protected void doAssert(Object actualValue, Object expectedValue) { //if the value is wrapped into / it is a regexp (e.g. /s+d+/) if (expectedValue instanceof String) { String expValue = ((String) expectedValue).trim(); if (expValue.length() > 2 && expValue.startsWith("/") && expValue.endsWith("/")) { String regex = expValue.substring(1, expValue.length() - 1); logger.trace("assert that [{}] matches [{}]", actualValue, regex); assertThat("field [" + getField() + "] was expected to match the provided regex but didn't", actualValue.toString(), matches(regex, Pattern.COMMENTS)); return; } } assertThat(errorMessage(), actualValue, notNullValue()); logger.trace("assert that [{}] matches [{}]", actualValue, expectedValue); if (!actualValue.getClass().equals(expectedValue.getClass())) { if (actualValue instanceof Number && expectedValue instanceof Number) { //Double 1.0 is equal to Integer 1 assertThat(errorMessage(), ((Number) actualValue).doubleValue(), equalTo(((Number) expectedValue).doubleValue())); return; } } assertThat(errorMessage(), actualValue, equalTo(expectedValue)); } private String errorMessage() { return "field [" + getField() + "] doesn't match the expected value"; } }
1no label
src_test_java_org_elasticsearch_test_rest_section_MatchAssertion.java
95
static final class ReservationNode<K,V> extends Node<K,V> { ReservationNode() { super(RESERVED, null, null, null); } Node<K,V> find(int h, Object k) { return null; } }
0true
src_main_java_jsr166e_ConcurrentHashMapV8.java
115
{ @Override public Object doWork( Void state ) { try { tm.commit(); } catch ( Exception e ) { throw new RuntimeException( e ); } return null; } };
0true
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_TestJtaCompliance.java
570
public class OpenIndexRequest extends AcknowledgedRequest<OpenIndexRequest> { private String[] indices; private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, false, true); OpenIndexRequest() { } /** * Constructs a new open index request for the specified index. */ public OpenIndexRequest(String... indices) { this.indices = indices; } @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; if (indices == null || indices.length == 0) { validationException = addValidationError("index is missing", validationException); } return validationException; } /** * The indices to be opened * @return the indices to be opened */ String[] indices() { return indices; } /** * Sets the indices to be opened * @param indices the indices to be opened * @return the request itself */ public OpenIndexRequest indices(String... indices) { this.indices = indices; return this; } /** * Specifies what type of requested indices to ignore and how to deal with wildcard expressions. * For example indices that don't exist. * * @return the current behaviour when it comes to index names and wildcard indices expressions */ public IndicesOptions indicesOptions() { return indicesOptions; } /** * Specifies what type of requested indices to ignore and how to deal with wildcard expressions. * For example indices that don't exist. * * @param indicesOptions the desired behaviour regarding indices to ignore and wildcard indices expressions * @return the request itself */ public OpenIndexRequest indicesOptions(IndicesOptions indicesOptions) { this.indicesOptions = indicesOptions; return this; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); indices = in.readStringArray(); readTimeout(in); indicesOptions = IndicesOptions.readIndicesOptions(in); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArray(indices); writeTimeout(out); indicesOptions.writeIndicesOptions(out); } }
0true
src_main_java_org_elasticsearch_action_admin_indices_open_OpenIndexRequest.java
2,079
public class PartitionWideEntryBackupOperation extends AbstractMapOperation implements BackupOperation, PartitionAwareOperation { EntryBackupProcessor entryProcessor; public PartitionWideEntryBackupOperation(String name, EntryBackupProcessor entryProcessor) { super(name); this.entryProcessor = entryProcessor; } public PartitionWideEntryBackupOperation() { } public void run() { Map.Entry entry; RecordStore recordStore = mapService.getRecordStore(getPartitionId(), name); Map<Data, Record> records = recordStore.getReadonlyRecordMap(); for (Map.Entry<Data, Record> recordEntry : records.entrySet()) { Data dataKey = recordEntry.getKey(); Record record = recordEntry.getValue(); Object objectKey = mapService.toObject(record.getKey()); Object valueBeforeProcess = mapService.toObject(record.getValue()); if (getPredicate() != null) { QueryEntry queryEntry = new QueryEntry(getNodeEngine().getSerializationService(), dataKey, objectKey, valueBeforeProcess); if (!getPredicate().apply(queryEntry)) { continue; } } entry = new AbstractMap.SimpleEntry(objectKey, valueBeforeProcess); entryProcessor.processBackup(entry); if (entry.getValue() == null){ recordStore.removeBackup(dataKey); } else { recordStore.putBackup(dataKey, entry.getValue()); } } } @Override public boolean returnsResponse() { return true; } protected Predicate getPredicate() { return null; } @Override protected void readInternal(ObjectDataInput in) throws IOException { super.readInternal(in); entryProcessor = in.readObject(); } @Override protected void writeInternal(ObjectDataOutput out) throws IOException { super.writeInternal(out); out.writeObject(entryProcessor); } @Override public Object getResponse() { return true; } @Override public String toString() { return "PartitionWideEntryBackupOperation{}"; } }
1no label
hazelcast_src_main_java_com_hazelcast_map_operation_PartitionWideEntryBackupOperation.java
380
@Entity @Inheritance(strategy = InheritanceType.JOINED) @Table(name = "BLC_LOCALE") @Cache(usage= CacheConcurrencyStrategy.NONSTRICT_READ_WRITE, region="blCMSElements") @AdminPresentationClass(friendlyName = "LocaleImpl_baseLocale") public class LocaleImpl implements Locale { private static final long serialVersionUID = 1L; @Id @Column (name = "LOCALE_CODE") @AdminPresentation(friendlyName = "LocaleImpl_Locale_Code", order = 1, group = "LocaleImpl_Details", prominent = true, gridOrder = 2) protected String localeCode; @Column (name = "FRIENDLY_NAME") @AdminPresentation(friendlyName = "LocaleImpl_Name", order = 2, group = "LocaleImpl_Details", prominent = true, gridOrder = 1) protected String friendlyName; @Column (name = "DEFAULT_FLAG") @AdminPresentation(friendlyName = "LocaleImpl_Is_Default", order = 3, group = "LocaleImpl_Details", prominent = true, gridOrder = 3) protected Boolean defaultFlag = false; @ManyToOne(targetEntity = BroadleafCurrencyImpl.class) @JoinColumn(name = "CURRENCY_CODE") @AdminPresentation(friendlyName = "LocaleImpl_Currency", order = 4, group = "LocaleImpl_Details", prominent = true) protected BroadleafCurrency defaultCurrency; @Column (name = "USE_IN_SEARCH_INDEX") @AdminPresentation(friendlyName = "LocaleImpl_Use_In_Search_Index", order = 5, group = "LocaleImpl_Details", prominent = true, gridOrder = 3) protected Boolean useInSearchIndex = false; @Override public String getLocaleCode() { return localeCode; } @Override public void setLocaleCode(String localeCode) { this.localeCode = localeCode; } @Override public String getFriendlyName() { return friendlyName; } @Override public void setFriendlyName(String friendlyName) { this.friendlyName = friendlyName; } @Override public void setDefaultFlag(Boolean defaultFlag) { this.defaultFlag = defaultFlag; } @Override public Boolean getDefaultFlag() { if (defaultFlag == null) { return Boolean.FALSE; } else { return defaultFlag; } } @Override public BroadleafCurrency getDefaultCurrency() { return defaultCurrency; } @Override public void setDefaultCurrency(BroadleafCurrency defaultCurrency) { this.defaultCurrency = defaultCurrency; } @Override public Boolean getUseInSearchIndex() { return useInSearchIndex == null ? false : useInSearchIndex; } @Override public void setUseInSearchIndex(Boolean useInSearchIndex) { this.useInSearchIndex = useInSearchIndex; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof Locale)) { return false; } LocaleImpl locale = (LocaleImpl) o; if (localeCode != null ? !localeCode.equals(locale.localeCode) : locale.localeCode != null) { return false; } if (friendlyName != null ? !friendlyName.equals(locale.friendlyName) : locale.friendlyName != null) { return false; } return true; } @Override public int hashCode() { int result = localeCode != null ? localeCode.hashCode() : 0; result = 31 * result + (friendlyName != null ? friendlyName.hashCode() : 0); return result; } }
1no label
common_src_main_java_org_broadleafcommerce_common_locale_domain_LocaleImpl.java
688
public class CollectionEventFilter implements EventFilter, IdentifiedDataSerializable { boolean includeValue; public CollectionEventFilter() { } public CollectionEventFilter(boolean includeValue) { this.includeValue = includeValue; } public boolean isIncludeValue() { return includeValue; } @Override public boolean eval(Object arg) { return false; } @Override public void writeData(ObjectDataOutput out) throws IOException { out.writeBoolean(includeValue); } @Override public void readData(ObjectDataInput in) throws IOException { includeValue = in.readBoolean(); } @Override public int getFactoryId() { return CollectionDataSerializerHook.F_ID; } @Override public int getId() { return CollectionDataSerializerHook.COLLECTION_EVENT_FILTER; } }
1no label
hazelcast_src_main_java_com_hazelcast_collection_CollectionEventFilter.java
606
updateSettingsService.updateSettings(clusterStateUpdateRequest, new ClusterStateUpdateListener() { @Override public void onResponse(ClusterStateUpdateResponse response) { listener.onResponse(new UpdateSettingsResponse(response.isAcknowledged())); } @Override public void onFailure(Throwable t) { logger.debug("failed to update settings on indices [{}]", t, request.indices()); listener.onFailure(t); } });
1no label
src_main_java_org_elasticsearch_action_admin_indices_settings_put_TransportUpdateSettingsAction.java
121
public class JMSArchivedPageSubscriber implements MessageListener { @Resource(name = "blPageService") private PageService pageService; /* * (non-Javadoc) * @see javax.jms.MessageListener#onMessage(javax.jms.Message) */ @SuppressWarnings("unchecked") public void onMessage(Message message) { String basePageCacheKey = null; try { basePageCacheKey = ((TextMessage) message).getText(); pageService.removePageFromCache(basePageCacheKey); } catch (JMSException e) { throw new RuntimeException(e); } } }
0true
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_page_message_jms_JMSArchivedPageSubscriber.java
120
archivePageTemplate.send(archivePageDestination, new MessageCreator() { public Message createMessage(Session session) throws JMSException { return session.createTextMessage(basePageKey); } });
0true
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_page_message_jms_JMSArchivedPagePublisher.java
335
public class LiberalNodeReplaceInsert extends NodeReplaceInsert { protected boolean checkNode(List<Node> usedNodes, Node[] primaryNodes, Node node) { //find matching nodes based on id if (replaceNode(primaryNodes, node, "id", usedNodes)) { return true; } //find matching nodes based on name if (replaceNode(primaryNodes, node, "name", usedNodes)) { return true; } if (replaceNode(primaryNodes, node, "class", usedNodes)) { usedNodes.add(node); return true; } //check if this same node already exists if (exactNodeExists(primaryNodes, node, usedNodes)) { return true; } return false; } }
0true
common_src_main_java_org_broadleafcommerce_common_extensibility_context_merge_handlers_LiberalNodeReplaceInsert.java
3,761
public class LogByteSizeMergePolicyProvider extends AbstractMergePolicyProvider<LogByteSizeMergePolicy> { private final IndexSettingsService indexSettingsService; private volatile ByteSizeValue minMergeSize; private volatile ByteSizeValue maxMergeSize; private volatile int mergeFactor; private volatile int maxMergeDocs; private final boolean calibrateSizeByDeletes; private boolean asyncMerge; private final Set<CustomLogByteSizeMergePolicy> policies = new CopyOnWriteArraySet<CustomLogByteSizeMergePolicy>(); private final ApplySettings applySettings = new ApplySettings(); @Inject public LogByteSizeMergePolicyProvider(Store store, IndexSettingsService indexSettingsService) { super(store); Preconditions.checkNotNull(store, "Store must be provided to merge policy"); this.indexSettingsService = indexSettingsService; this.minMergeSize = componentSettings.getAsBytesSize("min_merge_size", new ByteSizeValue((long) (LogByteSizeMergePolicy.DEFAULT_MIN_MERGE_MB * 1024 * 1024), ByteSizeUnit.BYTES)); this.maxMergeSize = componentSettings.getAsBytesSize("max_merge_size", new ByteSizeValue((long) LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_MB, ByteSizeUnit.MB)); this.mergeFactor = componentSettings.getAsInt("merge_factor", LogByteSizeMergePolicy.DEFAULT_MERGE_FACTOR); this.maxMergeDocs = componentSettings.getAsInt("max_merge_docs", LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_DOCS); this.calibrateSizeByDeletes = componentSettings.getAsBoolean("calibrate_size_by_deletes", true); this.asyncMerge = indexSettings.getAsBoolean("index.merge.async", true); logger.debug("using [log_bytes_size] merge policy with merge_factor[{}], min_merge_size[{}], max_merge_size[{}], max_merge_docs[{}], calibrate_size_by_deletes[{}], async_merge[{}]", mergeFactor, minMergeSize, maxMergeSize, maxMergeDocs, calibrateSizeByDeletes, asyncMerge); indexSettingsService.addListener(applySettings); } @Override public LogByteSizeMergePolicy newMergePolicy() { CustomLogByteSizeMergePolicy mergePolicy; if (asyncMerge) { mergePolicy = new EnableMergeLogByteSizeMergePolicy(this); } else { mergePolicy = new CustomLogByteSizeMergePolicy(this); } mergePolicy.setMinMergeMB(minMergeSize.mbFrac()); mergePolicy.setMaxMergeMB(maxMergeSize.mbFrac()); mergePolicy.setMergeFactor(mergeFactor); mergePolicy.setMaxMergeDocs(maxMergeDocs); mergePolicy.setCalibrateSizeByDeletes(calibrateSizeByDeletes); mergePolicy.setNoCFSRatio(noCFSRatio); policies.add(mergePolicy); return mergePolicy; } @Override public void close() throws ElasticsearchException { indexSettingsService.removeListener(applySettings); } public static final String INDEX_MERGE_POLICY_MIN_MERGE_SIZE = "index.merge.policy.min_merge_size"; public static final String INDEX_MERGE_POLICY_MAX_MERGE_SIZE = "index.merge.policy.max_merge_size"; public static final String INDEX_MERGE_POLICY_MAX_MERGE_DOCS = "index.merge.policy.max_merge_docs"; public static final String INDEX_MERGE_POLICY_MERGE_FACTOR = "index.merge.policy.merge_factor"; class ApplySettings implements IndexSettingsService.Listener { @Override public void onRefreshSettings(Settings settings) { ByteSizeValue minMergeSize = settings.getAsBytesSize(INDEX_MERGE_POLICY_MIN_MERGE_SIZE, LogByteSizeMergePolicyProvider.this.minMergeSize); if (!minMergeSize.equals(LogByteSizeMergePolicyProvider.this.minMergeSize)) { logger.info("updating min_merge_size from [{}] to [{}]", LogByteSizeMergePolicyProvider.this.minMergeSize, minMergeSize); LogByteSizeMergePolicyProvider.this.minMergeSize = minMergeSize; for (CustomLogByteSizeMergePolicy policy : policies) { policy.setMinMergeMB(minMergeSize.mbFrac()); } } ByteSizeValue maxMergeSize = settings.getAsBytesSize(INDEX_MERGE_POLICY_MAX_MERGE_SIZE, LogByteSizeMergePolicyProvider.this.maxMergeSize); if (!maxMergeSize.equals(LogByteSizeMergePolicyProvider.this.maxMergeSize)) { logger.info("updating max_merge_size from [{}] to [{}]", LogByteSizeMergePolicyProvider.this.maxMergeSize, maxMergeSize); LogByteSizeMergePolicyProvider.this.maxMergeSize = maxMergeSize; for (CustomLogByteSizeMergePolicy policy : policies) { policy.setMaxMergeMB(maxMergeSize.mbFrac()); } } int maxMergeDocs = settings.getAsInt(INDEX_MERGE_POLICY_MAX_MERGE_DOCS, LogByteSizeMergePolicyProvider.this.maxMergeDocs); if (maxMergeDocs != LogByteSizeMergePolicyProvider.this.maxMergeDocs) { logger.info("updating max_merge_docs from [{}] to [{}]", LogByteSizeMergePolicyProvider.this.maxMergeDocs, maxMergeDocs); LogByteSizeMergePolicyProvider.this.maxMergeDocs = maxMergeDocs; for (CustomLogByteSizeMergePolicy policy : policies) { policy.setMaxMergeDocs(maxMergeDocs); } } int mergeFactor = settings.getAsInt(INDEX_MERGE_POLICY_MERGE_FACTOR, LogByteSizeMergePolicyProvider.this.mergeFactor); if (mergeFactor != LogByteSizeMergePolicyProvider.this.mergeFactor) { logger.info("updating merge_factor from [{}] to [{}]", LogByteSizeMergePolicyProvider.this.mergeFactor, mergeFactor); LogByteSizeMergePolicyProvider.this.mergeFactor = mergeFactor; for (CustomLogByteSizeMergePolicy policy : policies) { policy.setMergeFactor(mergeFactor); } } final double noCFSRatio = parseNoCFSRatio(settings.get(INDEX_COMPOUND_FORMAT, Double.toString(LogByteSizeMergePolicyProvider.this.noCFSRatio))); if (noCFSRatio != LogByteSizeMergePolicyProvider.this.noCFSRatio) { logger.info("updating index.compound_format from [{}] to [{}]", formatNoCFSRatio(LogByteSizeMergePolicyProvider.this.noCFSRatio), formatNoCFSRatio(noCFSRatio)); LogByteSizeMergePolicyProvider.this.noCFSRatio = noCFSRatio; for (CustomLogByteSizeMergePolicy policy : policies) { policy.setNoCFSRatio(noCFSRatio); } } } } public static class CustomLogByteSizeMergePolicy extends LogByteSizeMergePolicy { private final LogByteSizeMergePolicyProvider provider; public CustomLogByteSizeMergePolicy(LogByteSizeMergePolicyProvider provider) { super(); this.provider = provider; } @Override public void close() { super.close(); provider.policies.remove(this); } @Override public MergePolicy clone() { // Lucene IW makes a clone internally but since we hold on to this instance // the clone will just be the identity. return this; } } public static class EnableMergeLogByteSizeMergePolicy extends CustomLogByteSizeMergePolicy { public EnableMergeLogByteSizeMergePolicy(LogByteSizeMergePolicyProvider provider) { super(provider); } @Override public MergeSpecification findMerges(MergeTrigger trigger, SegmentInfos infos) throws IOException { // we don't enable merges while indexing documents, we do them in the background if (trigger == MergeTrigger.SEGMENT_FLUSH) { return null; } return super.findMerges(trigger, infos); } } }
1no label
src_main_java_org_elasticsearch_index_merge_policy_LogByteSizeMergePolicyProvider.java
3,460
public class HazelcastClientBeanDefinitionParser extends AbstractHazelcastBeanDefinitionParser { protected AbstractBeanDefinition parseInternal(Element element, ParserContext parserContext) { final SpringXmlBuilder springXmlBuilder = new SpringXmlBuilder(parserContext); springXmlBuilder.handleClient(element); return springXmlBuilder.getBeanDefinition(); } private class SpringXmlBuilder extends SpringXmlBuilderHelper { private final ParserContext parserContext; private BeanDefinitionBuilder builder; private ManagedMap nearCacheConfigMap;//= new HashMap<String, NearCacheConfig>(); public SpringXmlBuilder(ParserContext parserContext) { this.parserContext = parserContext; this.builder = BeanDefinitionBuilder.rootBeanDefinition(HazelcastClient.class); this.builder.setFactoryMethod("newHazelcastClient"); this.builder.setDestroyMethodName("shutdown"); this.nearCacheConfigMap = new ManagedMap(); this.configBuilder = BeanDefinitionBuilder.rootBeanDefinition(ClientConfig.class); configBuilder.addPropertyValue("nearCacheConfigMap", nearCacheConfigMap); BeanDefinitionBuilder managedContextBeanBuilder = createBeanBuilder(SpringManagedContext.class); this.configBuilder.addPropertyValue("managedContext", managedContextBeanBuilder.getBeanDefinition()); } public AbstractBeanDefinition getBeanDefinition() { return builder.getBeanDefinition(); } public void handleClient(Element element) { handleCommonBeanAttributes(element, builder, parserContext); final NamedNodeMap attrs = element.getAttributes(); if (attrs != null) { for (int a = 0; a < attrs.getLength(); a++) { final org.w3c.dom.Node att = attrs.item(a); final String name = att.getNodeName(); final String value = att.getNodeValue(); if ("executor-pool-size".equals(name)) { configBuilder.addPropertyValue("executorPoolSize", value); } else if ("credentials-ref".equals(name)) { configBuilder.addPropertyReference("credentials", value); } } } for (org.w3c.dom.Node node : new IterableNodeList(element, Node.ELEMENT_NODE)) { final String nodeName = cleanNodeName(node.getNodeName()); if ("group".equals(nodeName)) { createAndFillBeanBuilder(node, GroupConfig.class, "groupConfig", configBuilder); } else if ("properties".equals(nodeName)) { handleProperties(node, configBuilder); } else if ("network".equals(nodeName)) { handleNetwork(node); } else if ("listeners".equals(nodeName)) { final List listeners = parseListeners(node, ListenerConfig.class); configBuilder.addPropertyValue("listenerConfigs", listeners); } else if ("serialization".equals(nodeName)) { handleSerialization(node); } else if ("proxy-factories".equals(nodeName)) { final List list = parseProxyFactories(node, ProxyFactoryConfig.class); configBuilder.addPropertyValue("proxyFactoryConfigs", list); } else if ("load-balancer".equals(nodeName)) { handleLoadBalancer(node); } else if ("near-cache".equals(nodeName)) { handleNearCache(node); } } builder.addConstructorArgValue(configBuilder.getBeanDefinition()); } private void handleNetwork(Node node) { List<String> members = new ArrayList<String>(10); fillAttributeValues(node, configBuilder); for (org.w3c.dom.Node child : new IterableNodeList(node, Node.ELEMENT_NODE)) { final String nodeName = cleanNodeName(child); if ("member".equals(nodeName)) { members.add(getTextContent(child)); } else if ("socket-options".equals(nodeName)) { createAndFillBeanBuilder(child, SocketOptions.class, "socketOptions", configBuilder); } else if ("socket-interceptor".equals(nodeName)) { handleSocketInterceptorConfig(node, configBuilder); } else if ("ssl".equals(nodeName)) { handleSSLConfig(node, configBuilder); } } configBuilder.addPropertyValue("addresses", members); } private void handleSSLConfig(final Node node, final BeanDefinitionBuilder networkConfigBuilder) { BeanDefinitionBuilder sslConfigBuilder = createBeanBuilder(SSLConfig.class); final String implAttribute = "factory-implementation"; fillAttributeValues(node, sslConfigBuilder, implAttribute); Node implNode = node.getAttributes().getNamedItem(implAttribute); String implementation = implNode != null ? getTextContent(implNode) : null; if (implementation != null) { sslConfigBuilder.addPropertyReference(xmlToJavaName(implAttribute), implementation); } for (org.w3c.dom.Node child : new IterableNodeList(node, Node.ELEMENT_NODE)) { final String name = cleanNodeName(child); if ("properties".equals(name)) { handleProperties(child, sslConfigBuilder); } } networkConfigBuilder.addPropertyValue("SSLConfig", sslConfigBuilder.getBeanDefinition()); } private void handleLoadBalancer(Node node) { final String type = getAttribute(node, "type"); if ("random".equals(type)) { configBuilder.addPropertyValue("loadBalancer", new RandomLB()); } else if ("round-robin".equals(type)) { configBuilder.addPropertyValue("loadBalancer", new RoundRobinLB()); } } private void handleNearCache(Node node) { createAndFillListedBean(node, NearCacheConfig.class, "name", nearCacheConfigMap, "name"); } } }
1no label
hazelcast-spring_src_main_java_com_hazelcast_spring_HazelcastClientBeanDefinitionParser.java
226
private static final class SimulateLoadTask implements Callable, Serializable, HazelcastInstanceAware { private static final long serialVersionUID = 1; private final int delay; private final int taskId; private final String latchId; private transient HazelcastInstance hz; private SimulateLoadTask(int delay, int taskId, String latchId) { this.delay = delay; this.taskId = taskId; this.latchId = latchId; } @Override public void setHazelcastInstance(HazelcastInstance hazelcastInstance) { this.hz = hazelcastInstance; } @Override public Object call() throws Exception { try { Thread.sleep(delay * ONE_THOUSAND); } catch (InterruptedException e) { throw new RuntimeException(e); } hz.getCountDownLatch(latchId).countDown(); System.out.println("Finished task:" + taskId); return null; } }
0true
hazelcast-client_src_main_java_com_hazelcast_client_examples_ClientTestApp.java
1,036
@SuppressWarnings("unchecked") public class OCommandExecutorSQLDropClass extends OCommandExecutorSQLAbstract implements OCommandDistributedReplicateRequest { public static final String KEYWORD_DROP = "DROP"; public static final String KEYWORD_CLASS = "CLASS"; private String className; public OCommandExecutorSQLDropClass parse(final OCommandRequest iRequest) { init((OCommandRequestText) iRequest); final StringBuilder word = new StringBuilder(); int oldPos = 0; int pos = nextWord(parserText, parserTextUpperCase, oldPos, word, true); if (pos == -1 || !word.toString().equals(KEYWORD_DROP)) throw new OCommandSQLParsingException("Keyword " + KEYWORD_DROP + " not found. Use " + getSyntax(), parserText, oldPos); pos = nextWord(parserText, parserTextUpperCase, pos, word, true); if (pos == -1 || !word.toString().equals(KEYWORD_CLASS)) throw new OCommandSQLParsingException("Keyword " + KEYWORD_CLASS + " not found. Use " + getSyntax(), parserText, oldPos); pos = nextWord(parserText, parserTextUpperCase, pos, word, false); if (pos == -1) throw new OCommandSQLParsingException("Expected <class>. Use " + getSyntax(), parserText, pos); className = word.toString(); return this; } /** * Execute the DROP CLASS. */ public Object execute(final Map<Object, Object> iArgs) { if (className == null) throw new OCommandExecutionException("Cannot execute the command because it has not been parsed yet"); final ODatabaseRecord database = getDatabase(); final OClass oClass = database.getMetadata().getSchema().getClass(className); if (oClass == null) return null; for (final OIndex<?> oIndex : oClass.getClassIndexes()) { database.getMetadata().getIndexManager().dropIndex(oIndex.getName()); } final OClass superClass = oClass.getSuperClass(); final int[] clustersToIndex = oClass.getPolymorphicClusterIds(); final String[] clusterNames = new String[clustersToIndex.length]; for (int i = 0; i < clustersToIndex.length; i++) { clusterNames[i] = database.getClusterNameById(clustersToIndex[i]); } final int clusterId = oClass.getDefaultClusterId(); ((OSchemaProxy) database.getMetadata().getSchema()).dropClassInternal(className); ((OSchemaProxy) database.getMetadata().getSchema()).saveInternal(); database.getMetadata().getSchema().reload(); deleteDefaultCluster(clusterId); if (superClass == null) return true; for (final OIndex<?> oIndex : superClass.getIndexes()) { for (final String clusterName : clusterNames) oIndex.getInternal().removeCluster(clusterName); OLogManager.instance() .info(this, "Index %s is used in super class of %s and should be rebuilt.", oIndex.getName(), className); oIndex.rebuild(); } return true; } protected void deleteDefaultCluster(int clusterId) { final ODatabaseRecord database = getDatabase(); OCluster cluster = database.getStorage().getClusterById(clusterId); if (cluster.getName().equalsIgnoreCase(className)) { if (isClusterDeletable(clusterId)) { database.getStorage().dropCluster(clusterId, true); } } } protected boolean isClusterDeletable(int clusterId) { final ODatabaseRecord database = getDatabase(); for (OClass iClass : database.getMetadata().getSchema().getClasses()) { for (int i : iClass.getClusterIds()) { if (i == clusterId) return false; } } return true; } @Override public String getSyntax() { return "DROP CLASS <class>"; } }
1no label
core_src_main_java_com_orientechnologies_orient_core_sql_OCommandExecutorSQLDropClass.java
4,963
public class RestUtils { public static PathTrie.Decoder REST_DECODER = new PathTrie.Decoder() { @Override public String decode(String value) { return RestUtils.decodeComponent(value); } }; public static boolean isBrowser(@Nullable String userAgent) { if (userAgent == null) { return false; } // chrome, safari, firefox, ie if (userAgent.startsWith("Mozilla")) { return true; } return false; } public static void decodeQueryString(String s, int fromIndex, Map<String, String> params) { if (fromIndex < 0) { return; } if (fromIndex >= s.length()) { return; } String name = null; int pos = fromIndex; // Beginning of the unprocessed region int i; // End of the unprocessed region char c = 0; // Current character for (i = fromIndex; i < s.length(); i++) { c = s.charAt(i); if (c == '=' && name == null) { if (pos != i) { name = decodeComponent(s.substring(pos, i)); } pos = i + 1; } else if (c == '&') { if (name == null && pos != i) { // We haven't seen an `=' so far but moved forward. // Must be a param of the form '&a&' so add it with // an empty value. addParam(params, decodeComponent(s.substring(pos, i)), ""); } else if (name != null) { addParam(params, name, decodeComponent(s.substring(pos, i))); name = null; } pos = i + 1; } } if (pos != i) { // Are there characters we haven't dealt with? if (name == null) { // Yes and we haven't seen any `='. addParam(params, decodeComponent(s.substring(pos, i)), ""); } else { // Yes and this must be the last value. addParam(params, name, decodeComponent(s.substring(pos, i))); } } else if (name != null) { // Have we seen a name without value? addParam(params, name, ""); } } private static void addParam(Map<String, String> params, String name, String value) { params.put(name, value); } /** * Decodes a bit of an URL encoded by a browser. * <p/> * This is equivalent to calling {@link #decodeComponent(String, Charset)} * with the UTF-8 charset (recommended to comply with RFC 3986, Section 2). * * @param s The string to decode (can be empty). * @return The decoded string, or {@code s} if there's nothing to decode. * If the string to decode is {@code null}, returns an empty string. * @throws IllegalArgumentException if the string contains a malformed * escape sequence. */ public static String decodeComponent(final String s) { return decodeComponent(s, Charsets.UTF_8); } /** * Decodes a bit of an URL encoded by a browser. * <p/> * The string is expected to be encoded as per RFC 3986, Section 2. * This is the encoding used by JavaScript functions {@code encodeURI} * and {@code encodeURIComponent}, but not {@code escape}. For example * in this encoding, &eacute; (in Unicode {@code U+00E9} or in UTF-8 * {@code 0xC3 0xA9}) is encoded as {@code %C3%A9} or {@code %c3%a9}. * <p/> * This is essentially equivalent to calling * <code>{@link java.net.URLDecoder URLDecoder}.{@link * java.net.URLDecoder#decode(String, String)}</code> * except that it's over 2x faster and generates less garbage for the GC. * Actually this function doesn't allocate any memory if there's nothing * to decode, the argument itself is returned. * * @param s The string to decode (can be empty). * @param charset The charset to use to decode the string (should really * be {@link Charsets#UTF_8}. * @return The decoded string, or {@code s} if there's nothing to decode. * If the string to decode is {@code null}, returns an empty string. * @throws IllegalArgumentException if the string contains a malformed * escape sequence. */ @SuppressWarnings("fallthrough") public static String decodeComponent(final String s, final Charset charset) { if (s == null) { return ""; } final int size = s.length(); boolean modified = false; for (int i = 0; i < size; i++) { final char c = s.charAt(i); switch (c) { case '%': i++; // We can skip at least one char, e.g. `%%'. // Fall through. case '+': modified = true; break; } } if (!modified) { return s; } final byte[] buf = new byte[size]; int pos = 0; // position in `buf'. for (int i = 0; i < size; i++) { char c = s.charAt(i); switch (c) { case '+': buf[pos++] = ' '; // "+" -> " " break; case '%': if (i == size - 1) { throw new IllegalArgumentException("unterminated escape" + " sequence at end of string: " + s); } c = s.charAt(++i); if (c == '%') { buf[pos++] = '%'; // "%%" -> "%" break; } else if (i == size - 1) { throw new IllegalArgumentException("partial escape" + " sequence at end of string: " + s); } c = decodeHexNibble(c); final char c2 = decodeHexNibble(s.charAt(++i)); if (c == Character.MAX_VALUE || c2 == Character.MAX_VALUE) { throw new IllegalArgumentException( "invalid escape sequence `%" + s.charAt(i - 1) + s.charAt(i) + "' at index " + (i - 2) + " of: " + s); } c = (char) (c * 16 + c2); // Fall through. default: buf[pos++] = (byte) c; break; } } return new String(buf, 0, pos, charset); } /** * Helper to decode half of a hexadecimal number from a string. * * @param c The ASCII character of the hexadecimal number to decode. * Must be in the range {@code [0-9a-fA-F]}. * @return The hexadecimal value represented in the ASCII character * given, or {@link Character#MAX_VALUE} if the character is invalid. */ private static char decodeHexNibble(final char c) { if ('0' <= c && c <= '9') { return (char) (c - '0'); } else if ('a' <= c && c <= 'f') { return (char) (c - 'a' + 10); } else if ('A' <= c && c <= 'F') { return (char) (c - 'A' + 10); } else { return Character.MAX_VALUE; } } }
1no label
src_main_java_org_elasticsearch_rest_support_RestUtils.java
3,742
node.nodeEngine.getExecutionService().execute("hz:wan", new Runnable() { @Override public void run() { final Data data = packet.getData(); try { WanReplicationEvent replicationEvent = (WanReplicationEvent) node.nodeEngine.toObject(data); String serviceName = replicationEvent.getServiceName(); ReplicationSupportingService service = node.nodeEngine.getService(serviceName); service.onReplicationEvent(replicationEvent); } catch (Exception e) { logger.severe(e); } } });
1no label
hazelcast_src_main_java_com_hazelcast_wan_impl_WanReplicationServiceImpl.java
55
public class OExclusiveLock extends OAbstractLock { private final ReadWriteLock lock; public OExclusiveLock(final ReadWriteLock iLock) { lock = iLock; } public void lock() { lock.writeLock().lock(); } public void unlock() { lock.writeLock().unlock(); } }
0true
commons_src_main_java_com_orientechnologies_common_concur_lock_OExclusiveLock.java
277
@RunWith(HazelcastSerialClassRunner.class) @Category(SlowTest.class) public class ClientExecutionPoolSizeLowTest { static final int COUNT = 1000; static HazelcastInstance server1; static HazelcastInstance server2; static HazelcastInstance client; static IMap map; @Before public void init() { Config config = new Config(); server1 = Hazelcast.newHazelcastInstance(config); ClientConfig clientConfig = new ClientConfig(); clientConfig.setExecutorPoolSize(1); clientConfig.getNetworkConfig().setRedoOperation(true); client = HazelcastClient.newHazelcastClient(clientConfig); server2 = Hazelcast.newHazelcastInstance(config); map = client.getMap(randomString()); } @After public void destroy() { HazelcastClient.shutdownAll(); Hazelcast.shutdownAll(); } @Test public void testNodeTerminate() throws InterruptedException, ExecutionException { for (int i = 0; i < COUNT; i++) { map.put(i, i); if (i == COUNT / 2) { server2.getLifecycleService().terminate(); } } assertEquals(COUNT, map.size()); } @Test public void testOwnerNodeTerminate() throws InterruptedException, ExecutionException { for (int i = 0; i < COUNT; i++) { map.put(i, i); if (i == COUNT / 2) { server1.getLifecycleService().terminate(); } } assertEquals(COUNT, map.size()); } @Test public void testNodeTerminateWithAsyncOperations() throws InterruptedException, ExecutionException { for (int i = 0; i < COUNT; i++) { map.putAsync(i, i); if (i == COUNT / 2) { server2.getLifecycleService().terminate(); } } assertTrueEventually(new AssertTask() { @Override public void run() throws Exception { assertEquals(COUNT, map.size()); } }); } @Test public void testOwnerNodeTerminateWithAsyncOperations() throws InterruptedException, ExecutionException { for (int i = 0; i < COUNT; i++) { map.putAsync(i, i); if (i == COUNT / 2) { server1.getLifecycleService().terminate(); } } assertTrueEventually(new AssertTask() { @Override public void run() throws Exception { assertEquals(COUNT, map.size()); } }); } }
0true
hazelcast-client_src_test_java_com_hazelcast_client_io_ClientExecutionPoolSizeLowTest.java
1,700
public class HashedBytesArray implements BytesReference { private final byte[] bytes; /** * Cache the hash code for the string */ private int hash; // Defaults to 0 public HashedBytesArray(byte[] bytes) { this.bytes = bytes; } @Override public byte get(int index) { return bytes[index]; } @Override public int length() { return bytes.length; } @Override public BytesReference slice(int from, int length) { if (from < 0 || (from + length) > bytes.length) { throw new ElasticsearchIllegalArgumentException("can't slice a buffer with length [" + bytes.length + "], with slice parameters from [" + from + "], length [" + length + "]"); } return new BytesArray(bytes, from, length); } @Override public StreamInput streamInput() { return new BytesStreamInput(bytes, false); } @Override public void writeTo(OutputStream os) throws IOException { os.write(bytes); } @Override public byte[] toBytes() { return bytes; } @Override public BytesArray toBytesArray() { return new BytesArray(bytes); } @Override public BytesArray copyBytesArray() { byte[] copy = new byte[bytes.length]; System.arraycopy(bytes, 0, copy, 0, bytes.length); return new BytesArray(copy); } @Override public ChannelBuffer toChannelBuffer() { return ChannelBuffers.wrappedBuffer(bytes, 0, bytes.length); } @Override public boolean hasArray() { return true; } @Override public byte[] array() { return bytes; } @Override public int arrayOffset() { return 0; } @Override public String toUtf8() { if (bytes.length == 0) { return ""; } return new String(bytes, Charsets.UTF_8); } @Override public BytesRef toBytesRef() { return new BytesRef(bytes); } @Override public BytesRef copyBytesRef() { byte[] copy = new byte[bytes.length]; System.arraycopy(bytes, 0, copy, 0, bytes.length); return new BytesRef(copy); } @Override public int hashCode() { if (hash == 0) { hash = Helper.bytesHashCode(this); } return hash; } @Override public boolean equals(Object obj) { return Helper.bytesEqual(this, (BytesReference) obj); } }
1no label
src_main_java_org_elasticsearch_common_bytes_HashedBytesArray.java
243
@Repository("blCurrencyDao") public class BroadleafCurrencyDaoImpl implements BroadleafCurrencyDao { @PersistenceContext(unitName = "blPU") protected EntityManager em; @Resource(name="blEntityConfiguration") protected EntityConfiguration entityConfiguration; @Override public BroadleafCurrency findDefaultBroadleafCurrency() { Query query = em.createNamedQuery("BC_READ_DEFAULT_CURRENCY"); query.setHint(org.hibernate.ejb.QueryHints.HINT_CACHEABLE, true); List<BroadleafCurrency> currencyList = (List<BroadleafCurrency>) query.getResultList(); if (currencyList.size() >= 1) { return currencyList.get(0); } return null; } /** * @return The locale for the passed in code */ @Override public BroadleafCurrency findCurrencyByCode(String currencyCode) { Query query = em.createNamedQuery("BC_READ_CURRENCY_BY_CODE"); query.setParameter("currencyCode", currencyCode); query.setHint(org.hibernate.ejb.QueryHints.HINT_CACHEABLE, true); List<BroadleafCurrency> currencyList = (List<BroadleafCurrency>) query.getResultList(); if (currencyList.size() >= 1) { return currencyList.get(0); } return null; } @Override public List<BroadleafCurrency> getAllCurrencies() { Query query = em.createNamedQuery("BC_READ_ALL_CURRENCIES"); query.setHint(org.hibernate.ejb.QueryHints.HINT_CACHEABLE, true); return (List<BroadleafCurrency>) query.getResultList(); } @Override public BroadleafCurrency save(BroadleafCurrency currency) { return em.merge(currency); } }
0true
common_src_main_java_org_broadleafcommerce_common_currency_dao_BroadleafCurrencyDaoImpl.java
29
{ @Override public int defaultPort() { return 5001; } @Override public int port() { return config.getAddress().getPort(); } }, receiver, logging);
1no label
enterprise_cluster_src_main_java_org_neo4j_cluster_client_ClusterClient.java
7
private static class HBasePidfileParseException extends Exception { private static final long serialVersionUID = 1L; public HBasePidfileParseException(String message) { super(message); } }
0true
titan-hbase-parent_titan-hbase-core_src_test_java_com_thinkaurelius_titan_HBaseStatus.java
2,051
public class EvictOperation extends LockAwareOperation implements BackupAwareOperation { boolean evicted = false; boolean asyncBackup = false; public EvictOperation(String name, Data dataKey, boolean asyncBackup) { super(name, dataKey); this.asyncBackup = asyncBackup; } public EvictOperation() { } public void run() { dataValue = mapService.toData(recordStore.evict(dataKey)); evicted = dataValue != null; } @Override public Object getResponse() { return evicted; } @Override public void onWaitExpire() { getResponseHandler().sendResponse(false); } public Operation getBackupOperation() { return new RemoveBackupOperation(name, dataKey); } public int getAsyncBackupCount() { if (asyncBackup) { return mapService.getMapContainer(name).getTotalBackupCount(); } else { return mapService.getMapContainer(name).getAsyncBackupCount(); } } public int getSyncBackupCount() { if (asyncBackup) { return 0; } else { return mapService.getMapContainer(name).getBackupCount(); } } public boolean shouldBackup() { return evicted; } public void afterRun() { if (evicted) { mapService.interceptAfterRemove(name, dataValue); EntryEventType eventType = EntryEventType.EVICTED; mapService.publishEvent(getCallerAddress(), name, eventType, dataKey, dataValue, null); invalidateNearCaches(); } } @Override protected void writeInternal(ObjectDataOutput out) throws IOException { super.writeInternal(out); out.writeBoolean(asyncBackup); } @Override protected void readInternal(ObjectDataInput in) throws IOException { super.readInternal(in); asyncBackup = in.readBoolean(); } @Override public String toString() { return "EvictOperation{" + name + "}"; } }
1no label
hazelcast_src_main_java_com_hazelcast_map_operation_EvictOperation.java
87
NOT_IN { @Override public boolean evaluate(Object value, Object condition) { Preconditions.checkArgument(isValidCondition(condition), "Invalid condition provided: %s", condition); Collection col = (Collection) condition; return !col.contains(value); } @Override public TitanPredicate negate() { return IN; } };
0true
titan-core_src_main_java_com_thinkaurelius_titan_core_attribute_Contain.java
73
public interface ImageStaticAsset extends StaticAsset { public Integer getWidth(); public void setWidth(Integer width); public Integer getHeight(); public void setHeight(Integer height); }
0true
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_file_domain_ImageStaticAsset.java
1,307
public class OClusterPositionMap extends OSharedResourceAdaptive { public static final String DEF_EXTENSION = ".cpm"; private final ODiskCache diskCache; private String name; private long fileId; private final OWriteAheadLog writeAheadLog; public OClusterPositionMap(ODiskCache diskCache, String name, OWriteAheadLog writeAheadLog) { acquireExclusiveLock(); try { this.diskCache = diskCache; this.name = name; this.writeAheadLog = writeAheadLog; } finally { releaseExclusiveLock(); } } public void open() throws IOException { acquireExclusiveLock(); try { fileId = diskCache.openFile(name + DEF_EXTENSION); } finally { releaseExclusiveLock(); } } public void create() throws IOException { acquireExclusiveLock(); try { fileId = diskCache.openFile(name + DEF_EXTENSION); } finally { releaseExclusiveLock(); } } public void flush() throws IOException { acquireSharedLock(); try { diskCache.flushFile(fileId); } finally { releaseSharedLock(); } } public void close() throws IOException { acquireExclusiveLock(); try { diskCache.closeFile(fileId); } finally { releaseExclusiveLock(); } } public void truncate() throws IOException { acquireExclusiveLock(); try { diskCache.truncateFile(fileId); } finally { releaseExclusiveLock(); } } public void delete() throws IOException { acquireExclusiveLock(); try { diskCache.deleteFile(fileId); } finally { releaseExclusiveLock(); } } public void rename(String newName) throws IOException { acquireExclusiveLock(); try { diskCache.renameFile(fileId, this.name, newName); name = newName; } finally { releaseExclusiveLock(); } } public OClusterPosition add(long pageIndex, int recordPosition, OOperationUnitId unitId, OLogSequenceNumber startLSN, ODurablePage.TrackMode trackMode) throws IOException { acquireExclusiveLock(); try { long lastPage = diskCache.getFilledUpTo(fileId) - 1; boolean isNewPage = false; if (lastPage < 0) { lastPage = 0; isNewPage = true; } OCacheEntry cacheEntry = diskCache.load(fileId, lastPage, false); OCachePointer cachePointer = cacheEntry.getCachePointer(); cachePointer.acquireExclusiveLock(); try { OClusterPositionMapBucket bucket = new OClusterPositionMapBucket(cachePointer.getDataPointer(), trackMode); if (bucket.isFull()) { cachePointer.releaseExclusiveLock(); diskCache.release(cacheEntry); isNewPage = true; cacheEntry = diskCache.allocateNewPage(fileId); cachePointer = cacheEntry.getCachePointer(); cachePointer.acquireExclusiveLock(); bucket = new OClusterPositionMapBucket(cachePointer.getDataPointer(), trackMode); } final long index = bucket.add(pageIndex, recordPosition); final OClusterPosition result = OClusterPositionFactory.INSTANCE.valueOf(index + cacheEntry.getPageIndex() * OClusterPositionMapBucket.MAX_ENTRIES); logPageChanges(bucket, fileId, cacheEntry.getPageIndex(), isNewPage, unitId, startLSN); cacheEntry.markDirty(); return result; } finally { cachePointer.releaseExclusiveLock(); diskCache.release(cacheEntry); } } finally { releaseExclusiveLock(); } } public OClusterPositionMapBucket.PositionEntry get(OClusterPosition clusterPosition) throws IOException { acquireSharedLock(); try { final long position = clusterPosition.longValue(); long pageIndex = position / OClusterPositionMapBucket.MAX_ENTRIES; int index = (int) (position % OClusterPositionMapBucket.MAX_ENTRIES); final OCacheEntry cacheEntry = diskCache.load(fileId, pageIndex, false); final OCachePointer cachePointer = cacheEntry.getCachePointer(); try { final OClusterPositionMapBucket bucket = new OClusterPositionMapBucket(cachePointer.getDataPointer(), ODurablePage.TrackMode.NONE); return bucket.get(index); } finally { diskCache.release(cacheEntry); } } finally { releaseSharedLock(); } } public OClusterPositionMapBucket.PositionEntry remove(OClusterPosition clusterPosition, OOperationUnitId unitId, OLogSequenceNumber startLSN, ODurablePage.TrackMode trackMode) throws IOException { acquireExclusiveLock(); try { final long position = clusterPosition.longValue(); long pageIndex = position / OClusterPositionMapBucket.MAX_ENTRIES; int index = (int) (position % OClusterPositionMapBucket.MAX_ENTRIES); final OCacheEntry cacheEntry = diskCache.load(fileId, pageIndex, false); final OCachePointer cachePointer = cacheEntry.getCachePointer(); cachePointer.acquireExclusiveLock(); try { final OClusterPositionMapBucket bucket = new OClusterPositionMapBucket(cachePointer.getDataPointer(), trackMode); OClusterPositionMapBucket.PositionEntry positionEntry = bucket.remove(index); if (positionEntry == null) return null; cacheEntry.markDirty(); logPageChanges(bucket, fileId, pageIndex, false, unitId, startLSN); return positionEntry; } finally { cachePointer.releaseExclusiveLock(); diskCache.release(cacheEntry); } } finally { releaseExclusiveLock(); } } public OClusterPosition[] higherPositions(OClusterPosition clusterPosition) throws IOException { acquireSharedLock(); try { final long position = clusterPosition.longValue(); if (position == Long.MAX_VALUE) return new OClusterPosition[0]; return ceilingPositions(OClusterPositionFactory.INSTANCE.valueOf(position + 1)); } finally { releaseSharedLock(); } } public OClusterPosition[] ceilingPositions(OClusterPosition clusterPosition) throws IOException { acquireSharedLock(); try { long position = clusterPosition.longValue(); if (position < 0) position = 0; long pageIndex = position / OClusterPositionMapBucket.MAX_ENTRIES; int index = (int) (position % OClusterPositionMapBucket.MAX_ENTRIES); final long filledUpTo = diskCache.getFilledUpTo(fileId); if (pageIndex >= filledUpTo) return new OClusterPosition[0]; OClusterPosition[] result = null; do { OCacheEntry cacheEntry = diskCache.load(fileId, pageIndex, false); OCachePointer cachePointer = cacheEntry.getCachePointer(); OClusterPositionMapBucket bucket = new OClusterPositionMapBucket(cachePointer.getDataPointer(), ODurablePage.TrackMode.NONE); int resultSize = bucket.getSize() - index; if (resultSize <= 0) { diskCache.release(cacheEntry); pageIndex++; } else { int entriesCount = 0; long startIndex = cacheEntry.getPageIndex() * OClusterPositionMapBucket.MAX_ENTRIES + index; result = new OClusterPosition[resultSize]; for (int i = 0; i < resultSize; i++) { if (bucket.exists(i + index)) { result[entriesCount] = OClusterPositionFactory.INSTANCE.valueOf(startIndex + i); entriesCount++; } } if (entriesCount == 0) { result = null; pageIndex++; index = 0; } else result = Arrays.copyOf(result, entriesCount); diskCache.release(cacheEntry); } } while (result == null && pageIndex < filledUpTo); if (result == null) result = new OClusterPosition[0]; return result; } finally { releaseSharedLock(); } } public OClusterPosition[] lowerPositions(OClusterPosition clusterPosition) throws IOException { acquireSharedLock(); try { final long position = clusterPosition.longValue(); if (position == 0) return new OClusterPosition[0]; return floorPositions(OClusterPositionFactory.INSTANCE.valueOf(position - 1)); } finally { releaseSharedLock(); } } public OClusterPosition[] floorPositions(OClusterPosition clusterPosition) throws IOException { acquireSharedLock(); try { final long position = clusterPosition.longValue(); if (position < 0) return new OClusterPosition[0]; long pageIndex = position / OClusterPositionMapBucket.MAX_ENTRIES; int index = (int) (position % OClusterPositionMapBucket.MAX_ENTRIES); final long filledUpTo = diskCache.getFilledUpTo(fileId); OClusterPosition[] result; if (pageIndex >= filledUpTo) { pageIndex = filledUpTo - 1; index = Integer.MIN_VALUE; } do { OCacheEntry cacheEntry = diskCache.load(fileId, pageIndex, false); OCachePointer cachePointer = cacheEntry.getCachePointer(); OClusterPositionMapBucket bucket = new OClusterPositionMapBucket(cachePointer.getDataPointer(), ODurablePage.TrackMode.NONE); if (index == Integer.MIN_VALUE) index = bucket.getSize() - 1; int resultSize = index + 1; int entriesCount = 0; long startPosition = cacheEntry.getPageIndex() * OClusterPositionMapBucket.MAX_ENTRIES; result = new OClusterPosition[resultSize]; for (int i = 0; i < resultSize; i++) { if (bucket.exists(i)) { result[entriesCount] = OClusterPositionFactory.INSTANCE.valueOf(startPosition + i); entriesCount++; } } if (entriesCount == 0) { result = null; pageIndex--; index = Integer.MIN_VALUE; } else result = Arrays.copyOf(result, entriesCount); diskCache.release(cacheEntry); } while (result == null && pageIndex >= 0); if (result == null) result = new OClusterPosition[0]; return result; } finally { releaseSharedLock(); } } public OClusterPosition getFirstPosition() throws IOException { acquireSharedLock(); try { final long filledUpTo = diskCache.getFilledUpTo(fileId); for (long pageIndex = 0; pageIndex < filledUpTo; pageIndex++) { OCacheEntry cacheEntry = diskCache.load(fileId, pageIndex, false); OCachePointer cachePointer = cacheEntry.getCachePointer(); try { OClusterPositionMapBucket bucket = new OClusterPositionMapBucket(cachePointer.getDataPointer(), ODurablePage.TrackMode.NONE); int bucketSize = bucket.getSize(); for (int index = 0; index < bucketSize; index++) { if (bucket.exists(index)) return OClusterPositionFactory.INSTANCE.valueOf(pageIndex * OClusterPositionMapBucket.MAX_ENTRIES + index); } } finally { diskCache.release(cacheEntry); } } return OClusterPosition.INVALID_POSITION; } finally { releaseSharedLock(); } } public OClusterPosition getLastPosition() throws IOException { acquireSharedLock(); try { final long filledUpTo = diskCache.getFilledUpTo(fileId); for (long pageIndex = filledUpTo - 1; pageIndex >= 0; pageIndex--) { OCacheEntry cacheEntry = diskCache.load(fileId, pageIndex, false); OCachePointer cachePointer = cacheEntry.getCachePointer(); try { OClusterPositionMapBucket bucket = new OClusterPositionMapBucket(cachePointer.getDataPointer(), ODurablePage.TrackMode.NONE); final int bucketSize = bucket.getSize(); for (int index = bucketSize - 1; index >= 0; index--) { if (bucket.exists(index)) return OClusterPositionFactory.INSTANCE.valueOf(pageIndex * OClusterPositionMapBucket.MAX_ENTRIES + index); } } finally { diskCache.release(cacheEntry); } } return OClusterPosition.INVALID_POSITION; } finally { releaseSharedLock(); } } public boolean wasSoftlyClosed() throws IOException { acquireSharedLock(); try { return diskCache.wasSoftlyClosed(fileId); } finally { releaseSharedLock(); } } private void logPageChanges(ODurablePage localPage, long fileId, long pageIndex, boolean isNewPage, OOperationUnitId unitId, OLogSequenceNumber startLSN) throws IOException { if (writeAheadLog != null) { OPageChanges pageChanges = localPage.getPageChanges(); if (pageChanges.isEmpty()) return; OLogSequenceNumber prevLsn; if (isNewPage) prevLsn = startLSN; else prevLsn = localPage.getLsn(); OLogSequenceNumber lsn = writeAheadLog.log(new OUpdatePageRecord(pageIndex, fileId, unitId, pageChanges, prevLsn)); localPage.setLsn(lsn); } } }
1no label
core_src_main_java_com_orientechnologies_orient_core_storage_impl_local_paginated_OClusterPositionMap.java
4,662
private final PercolatorType countPercolator = new PercolatorType() { @Override public byte id() { return 0x01; } @Override public ReduceResult reduce(List<PercolateShardResponse> shardResults) { long finalCount = 0; for (PercolateShardResponse shardResponse : shardResults) { finalCount += shardResponse.count(); } assert !shardResults.isEmpty(); InternalFacets reducedFacets = reduceFacets(shardResults); InternalAggregations reducedAggregations = reduceAggregations(shardResults); return new ReduceResult(finalCount, reducedFacets, reducedAggregations); } @Override public PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context) { long count = 0; Lucene.ExistsCollector collector = new Lucene.ExistsCollector(); for (Map.Entry<HashedBytesRef, Query> entry : context.percolateQueries().entrySet()) { collector.reset(); try { context.docSearcher().search(entry.getValue(), collector); } catch (IOException e) { logger.warn("[" + entry.getKey() + "] failed to execute query", e); } if (collector.exists()) { count++; } } return new PercolateShardResponse(count, context, request.index(), request.shardId()); } };
1no label
src_main_java_org_elasticsearch_percolator_PercolatorService.java