Unnamed: 0
int64 0
6.45k
| func
stringlengths 29
253k
| target
class label 2
classes | project
stringlengths 36
167
|
---|---|---|---|
1,105 | @RunWith(HazelcastParallelClassRunner.class)
@Category(QuickTest.class)
public class SemaphoreConfigTest {
@Test
public void testSetInitialPermits() {
SemaphoreConfig semaphoreConfig = new SemaphoreConfig().setInitialPermits(1234);
assertTrue(semaphoreConfig.getInitialPermits() == 1234);
}
@Test
public void shouldAcceptZeroInitialPermits() {
SemaphoreConfig semaphoreConfig = new SemaphoreConfig().setInitialPermits(0);
assertTrue(semaphoreConfig.getInitialPermits() == 0);
}
@Test
public void shouldAcceptNegativeInitialPermits() {
SemaphoreConfig semaphoreConfig = new SemaphoreConfig().setInitialPermits(-1234);
assertTrue(semaphoreConfig.getInitialPermits() == -1234);
}
} | 0true
| hazelcast_src_test_java_com_hazelcast_config_SemaphoreConfigTest.java |
3,136 | public class InternalEngineModule extends AbstractModule {
@Override
protected void configure() {
bind(Engine.class).to(InternalEngine.class).asEagerSingleton();
}
} | 0true
| src_main_java_org_elasticsearch_index_engine_internal_InternalEngineModule.java |
250 | fCollapseComments= new FoldingAction(getResourceBundle(), "Projection.CollapseComments.") {
public void run() {
if (editor instanceof CeylonEditor) {
ProjectionAnnotationModel pam = ((CeylonEditor) editor).getCeylonSourceViewer()
.getProjectionAnnotationModel();
for (@SuppressWarnings("unchecked")
Iterator<ProjectionAnnotation> iter=pam.getAnnotationIterator(); iter.hasNext();) {
ProjectionAnnotation pa = iter.next();
if (pa instanceof CeylonProjectionAnnotation) {
int tt = ((CeylonProjectionAnnotation) pa).getTokenType();
if (tt==CeylonLexer.MULTI_COMMENT ||
tt==CeylonLexer.LINE_COMMENT) {
pam.collapse(pa);
}
}
}
}
}
}; | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_editor_FoldingActionGroup.java |
19 | static class ByteVertex extends Vertex {
private final LongObjectMap<ConcurrentSkipListSet<ByteEntry>> tx;
private final SortedSet<ByteEntry> set;
ByteVertex(long id, LongObjectMap<ConcurrentSkipListSet<ByteEntry>> tx) {
super(id);
this.tx = tx;
this.set = (SortedSet<ByteEntry>) tx.get(id);
}
@Override
public Iterable<Vertex> getNeighbors(final int value) {
// SortedSet<ByteEntry> set = (SortedSet<ByteEntry>) tx.get(id);
return Iterables.transform(Iterables.filter(set, new Predicate<ByteEntry>() {
@Override
public boolean apply(@Nullable ByteEntry entry) {
return !CHECK_VALUE || entry.value.getInt(0) == value;
}
}), new Function<ByteEntry, Vertex>() {
@Override
public Vertex apply(@Nullable ByteEntry entry) {
return new ByteVertex(entry.key.getLong(8), tx);
}
});
}
} | 0true
| titan-test_src_main_java_com_thinkaurelius_titan_TestByteBuffer.java |
679 | private final class EntryIterator implements Iterator<Entry<K, V>> {
private int currentIndex;
private EntryIterator(int currentIndex) {
this.currentIndex = currentIndex;
}
@Override
public boolean hasNext() {
return currentIndex < size();
}
@Override
public Entry<K, V> next() {
if (currentIndex >= size())
throw new NoSuchElementException("Iterator was reached last element");
final Entry<K, V> entry = getEntry(currentIndex);
currentIndex++;
return entry;
}
@Override
public void remove() {
throw new UnsupportedOperationException("Remove operation is not supported");
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_index_hashindex_local_OHashIndexBucket.java |
3,516 | public class MergeContext {
private final DocumentMapper documentMapper;
private final DocumentMapper.MergeFlags mergeFlags;
private final List<String> mergeConflicts = Lists.newArrayList();
public MergeContext(DocumentMapper documentMapper, DocumentMapper.MergeFlags mergeFlags) {
this.documentMapper = documentMapper;
this.mergeFlags = mergeFlags;
}
public DocumentMapper docMapper() {
return documentMapper;
}
public DocumentMapper.MergeFlags mergeFlags() {
return mergeFlags;
}
public void addConflict(String mergeFailure) {
mergeConflicts.add(mergeFailure);
}
public boolean hasConflicts() {
return !mergeConflicts.isEmpty();
}
public String[] buildConflicts() {
return mergeConflicts.toArray(new String[mergeConflicts.size()]);
}
} | 0true
| src_main_java_org_elasticsearch_index_mapper_MergeContext.java |
1,741 | public class CompressorFactory {
private static final LZFCompressor LZF = new LZFCompressor();
private static final Compressor[] compressors;
private static final ImmutableMap<String, Compressor> compressorsByType;
private static Compressor defaultCompressor;
static {
List<Compressor> compressorsX = Lists.newArrayList();
compressorsX.add(LZF);
compressors = compressorsX.toArray(new Compressor[compressorsX.size()]);
MapBuilder<String, Compressor> compressorsByTypeX = MapBuilder.newMapBuilder();
for (Compressor compressor : compressors) {
compressorsByTypeX.put(compressor.type(), compressor);
}
compressorsByType = compressorsByTypeX.immutableMap();
defaultCompressor = LZF;
}
public static synchronized void configure(Settings settings) {
for (Compressor compressor : compressors) {
compressor.configure(settings);
}
String defaultType = settings.get("compress.default.type", "lzf").toLowerCase(Locale.ENGLISH);
boolean found = false;
for (Compressor compressor : compressors) {
if (defaultType.equalsIgnoreCase(compressor.type())) {
defaultCompressor = compressor;
found = true;
break;
}
}
if (!found) {
Loggers.getLogger(CompressorFactory.class).warn("failed to find default type [{}]", defaultType);
}
}
public static synchronized void setDefaultCompressor(Compressor defaultCompressor) {
CompressorFactory.defaultCompressor = defaultCompressor;
}
public static Compressor defaultCompressor() {
return defaultCompressor;
}
public static boolean isCompressed(BytesReference bytes) {
return compressor(bytes) != null;
}
public static boolean isCompressed(byte[] data) {
return compressor(data, 0, data.length) != null;
}
public static boolean isCompressed(byte[] data, int offset, int length) {
return compressor(data, offset, length) != null;
}
public static boolean isCompressed(IndexInput in) throws IOException {
return compressor(in) != null;
}
@Nullable
public static Compressor compressor(BytesReference bytes) {
for (Compressor compressor : compressors) {
if (compressor.isCompressed(bytes)) {
return compressor;
}
}
return null;
}
@Nullable
public static Compressor compressor(byte[] data) {
return compressor(data, 0, data.length);
}
@Nullable
public static Compressor compressor(byte[] data, int offset, int length) {
for (Compressor compressor : compressors) {
if (compressor.isCompressed(data, offset, length)) {
return compressor;
}
}
return null;
}
@Nullable
public static Compressor compressor(ChannelBuffer buffer) {
for (Compressor compressor : compressors) {
if (compressor.isCompressed(buffer)) {
return compressor;
}
}
return null;
}
@Nullable
public static Compressor compressor(IndexInput in) throws IOException {
for (Compressor compressor : compressors) {
if (compressor.isCompressed(in)) {
return compressor;
}
}
return null;
}
public static Compressor compressor(String type) {
return compressorsByType.get(type);
}
/**
* Uncompress the provided data, data can be detected as compressed using {@link #isCompressed(byte[], int, int)}.
*/
public static BytesReference uncompressIfNeeded(BytesReference bytes) throws IOException {
Compressor compressor = compressor(bytes);
if (compressor != null) {
if (bytes.hasArray()) {
return new BytesArray(compressor.uncompress(bytes.array(), bytes.arrayOffset(), bytes.length()));
}
StreamInput compressed = compressor.streamInput(bytes.streamInput());
BytesStreamOutput bStream = new BytesStreamOutput();
Streams.copy(compressed, bStream);
compressed.close();
return bStream.bytes();
}
return bytes;
}
} | 0true
| src_main_java_org_elasticsearch_common_compress_CompressorFactory.java |
1,603 | Set<SystemLog> sorted = new TreeSet<SystemLog>(new Comparator<SystemLog>() {
public int compare(SystemLog o1, SystemLog o2) {
long thisVal = o1.date;
long anotherVal = o2.date;
return (thisVal < anotherVal ? -1 : (thisVal == anotherVal ? 0 : 1));
}
}); | 0true
| hazelcast_src_main_java_com_hazelcast_logging_SystemLogService.java |
1,804 | interface CreationListener {
void notify(Errors errors);
} | 0true
| src_main_java_org_elasticsearch_common_inject_BindingProcessor.java |
4,680 | final static class MatchAndSort extends QueryCollector {
private final TopScoreDocCollector topDocsCollector;
MatchAndSort(ESLogger logger, PercolateContext context) {
super(logger, context);
// TODO: Use TopFieldCollector.create(...) for ascending and decending scoring?
topDocsCollector = TopScoreDocCollector.create(context.size, false);
}
@Override
public void collect(int doc) throws IOException {
final Query query = getQuery(doc);
if (query == null) {
// log???
return;
}
// run the query
try {
collector.reset();
searcher.search(query, collector);
if (collector.exists()) {
topDocsCollector.collect(doc);
if (facetAndAggregatorCollector != null) {
facetAndAggregatorCollector.collect(doc);
}
}
} catch (IOException e) {
logger.warn("[" + spare.bytes.utf8ToString() + "] failed to execute query", e);
}
}
@Override
public void setNextReader(AtomicReaderContext context) throws IOException {
super.setNextReader(context);
topDocsCollector.setNextReader(context);
}
@Override
public void setScorer(Scorer scorer) throws IOException {
topDocsCollector.setScorer(scorer);
}
TopDocs topDocs() {
return topDocsCollector.topDocs();
}
} | 1no label
| src_main_java_org_elasticsearch_percolator_QueryCollector.java |
2,173 | static class IteratorBasedIterator extends DocIdSetIterator {
final class Item {
public final DocIdSetIterator iter;
public int doc;
public Item(DocIdSetIterator iter) {
this.iter = iter;
this.doc = -1;
}
}
private int _curDoc;
private final Item[] _heap;
private int _size;
private final long cost;
IteratorBasedIterator(DocIdSet[] sets) throws IOException {
_curDoc = -1;
_heap = new Item[sets.length];
_size = 0;
long cost = 0;
for (DocIdSet set : sets) {
DocIdSetIterator iterator = set.iterator();
if (iterator != null) {
_heap[_size++] = new Item(iterator);
cost += iterator.cost();
}
}
this.cost = cost;
if (_size == 0) _curDoc = DocIdSetIterator.NO_MORE_DOCS;
}
@Override
public final int docID() {
return _curDoc;
}
@Override
public final int nextDoc() throws IOException {
if (_curDoc == DocIdSetIterator.NO_MORE_DOCS) return DocIdSetIterator.NO_MORE_DOCS;
Item top = _heap[0];
while (true) {
DocIdSetIterator topIter = top.iter;
int docid;
if ((docid = topIter.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
top.doc = docid;
heapAdjust();
} else {
heapRemoveRoot();
if (_size == 0) return (_curDoc = DocIdSetIterator.NO_MORE_DOCS);
}
top = _heap[0];
int topDoc = top.doc;
if (topDoc > _curDoc) {
return (_curDoc = topDoc);
}
}
}
@Override
public final int advance(int target) throws IOException {
if (_curDoc == DocIdSetIterator.NO_MORE_DOCS) return DocIdSetIterator.NO_MORE_DOCS;
if (target <= _curDoc) target = _curDoc + 1;
Item top = _heap[0];
while (true) {
DocIdSetIterator topIter = top.iter;
int docid;
if ((docid = topIter.advance(target)) != DocIdSetIterator.NO_MORE_DOCS) {
top.doc = docid;
heapAdjust();
} else {
heapRemoveRoot();
if (_size == 0) return (_curDoc = DocIdSetIterator.NO_MORE_DOCS);
}
top = _heap[0];
int topDoc = top.doc;
if (topDoc >= target) {
return (_curDoc = topDoc);
}
}
}
// Organize subScorers into a min heap with scorers generating the earlest document on top.
/*
private final void heapify() {
int size = _size;
for (int i=(size>>1)-1; i>=0; i--)
heapAdjust(i);
}
*/
/* The subtree of subScorers at root is a min heap except possibly for its root element.
* Bubble the root down as required to make the subtree a heap.
*/
private final void heapAdjust() {
final Item[] heap = _heap;
final Item top = heap[0];
final int doc = top.doc;
final int size = _size;
int i = 0;
while (true) {
int lchild = (i << 1) + 1;
if (lchild >= size) break;
Item left = heap[lchild];
int ldoc = left.doc;
int rchild = lchild + 1;
if (rchild < size) {
Item right = heap[rchild];
int rdoc = right.doc;
if (rdoc <= ldoc) {
if (doc <= rdoc) break;
heap[i] = right;
i = rchild;
continue;
}
}
if (doc <= ldoc) break;
heap[i] = left;
i = lchild;
}
heap[i] = top;
}
// Remove the root Scorer from subScorers and re-establish it as a heap
private void heapRemoveRoot() {
_size--;
if (_size > 0) {
Item tmp = _heap[0];
_heap[0] = _heap[_size];
_heap[_size] = tmp; // keep the finished iterator at the end for debugging
heapAdjust();
}
}
@Override
public long cost() {
return cost;
}
} | 0true
| src_main_java_org_elasticsearch_common_lucene_docset_OrDocIdSet.java |
3,134 | public class QueueIterator<E> implements Iterator<E> {
private final Iterator<Data> iterator;
private final SerializationService serializationService;
private final boolean binary;
public QueueIterator(Iterator<Data> iterator, SerializationService serializationService, boolean binary) {
this.iterator = iterator;
this.serializationService = serializationService;
this.binary = binary;
}
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public E next() {
Data data = iterator.next();
if (binary) {
return (E) data;
}
return (E) serializationService.toObject(data);
}
@Override
public void remove() {
iterator.remove();
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_queue_proxy_QueueIterator.java |
158 | private final Function<String, Locker> ASTYANAX_RECIPE_LOCKER_CREATOR = new Function<String, Locker>() {
@Override
public Locker apply(String lockerName) {
String expectedManagerName = "com.thinkaurelius.titan.diskstorage.cassandra.astyanax.AstyanaxStoreManager";
String actualManagerName = storeManager.getClass().getCanonicalName();
// Require AstyanaxStoreManager
Preconditions.checkArgument(expectedManagerName.equals(actualManagerName),
"Astyanax Recipe locker is only supported with the Astyanax storage backend (configured:"
+ actualManagerName + " != required:" + expectedManagerName + ")");
try {
Class<?> c = storeManager.getClass();
Method method = c.getMethod("openLocker", String.class);
Object o = method.invoke(storeManager, lockerName);
return (Locker) o;
} catch (NoSuchMethodException e) {
throw new IllegalArgumentException("Could not find method when configuring locking with Astyanax Recipes");
} catch (IllegalAccessException e) {
throw new IllegalArgumentException("Could not access method when configuring locking with Astyanax Recipes", e);
} catch (InvocationTargetException e) {
throw new IllegalArgumentException("Could not invoke method when configuring locking with Astyanax Recipes", e);
}
}
}; | 0true
| titan-core_src_main_java_com_thinkaurelius_titan_diskstorage_Backend.java |
235 | .registerHookValue(profilerPrefix + "enabled", "Cache enabled", METRIC_TYPE.ENABLED, new OProfilerHookValue() {
public Object getValue() {
return isEnabled();
}
}, profilerMetadataPrefix + "enabled"); | 0true
| core_src_main_java_com_orientechnologies_orient_core_cache_OAbstractRecordCache.java |
540 | public class DeleteMappingRequest extends AcknowledgedRequest<DeleteMappingRequest> {
private String[] indices;
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false);
private String[] types;
DeleteMappingRequest() {
}
/**
* Constructs a new delete mapping request against one or more indices. If nothing is set then
* it will be executed against all indices.
*/
public DeleteMappingRequest(String... indices) {
this.indices = indices;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (CollectionUtils.isEmpty(types)) {
validationException = addValidationError("mapping type is missing", validationException);
} else {
validationException = checkForEmptyString(validationException, types);
}
if (CollectionUtils.isEmpty(indices)) {
validationException = addValidationError("index is missing", validationException);
} else {
validationException = checkForEmptyString(validationException, indices);
}
return validationException;
}
private ActionRequestValidationException checkForEmptyString(ActionRequestValidationException validationException, String[] strings) {
boolean containsEmptyString = false;
for (String string : strings) {
if (!Strings.hasText(string)) {
containsEmptyString = true;
}
}
if (containsEmptyString) {
validationException = addValidationError("types must not contain empty strings", validationException);
}
return validationException;
}
/**
* Sets the indices this delete mapping operation will execute on.
*/
public DeleteMappingRequest indices(String[] indices) {
this.indices = indices;
return this;
}
/**
* The indices the mappings will be removed from.
*/
public String[] indices() {
return indices;
}
public IndicesOptions indicesOptions() {
return indicesOptions;
}
public DeleteMappingRequest indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
/**
* The mapping types.
*/
public String[] types() {
return types;
}
/**
* The type of the mappings to remove.
*/
public DeleteMappingRequest types(String... types) {
this.types = types;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
indices = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
types = in.readStringArray();
readTimeout(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArrayNullable(indices);
indicesOptions.writeIndicesOptions(out);
out.writeStringArrayNullable(types);
writeTimeout(out);
}
} | 0true
| src_main_java_org_elasticsearch_action_admin_indices_mapping_delete_DeleteMappingRequest.java |
560 | public class RandomLB extends AbstractLoadBalancer {
private final Random random = new Random();
@Override
public Member next() {
Member[] members = getMembers();
if (members == null || members.length == 0) {
return null;
}
int index = random.nextInt(members.length);
return members[index];
}
} | 0true
| hazelcast-client_src_main_java_com_hazelcast_client_util_RandomLB.java |
2,998 | public static class FilterCacheValueWeigher implements Weigher<WeightedFilterCache.FilterCacheKey, DocIdSet> {
@Override
public int weigh(FilterCacheKey key, DocIdSet value) {
int weight = (int) Math.min(DocIdSets.sizeInBytes(value), Integer.MAX_VALUE);
return weight == 0 ? 1 : weight;
}
} | 0true
| src_main_java_org_elasticsearch_index_cache_filter_weighted_WeightedFilterCache.java |
157 | public class ConcurrentLinkedDeque<E>
extends AbstractCollection<E>
implements Deque<E>, java.io.Serializable {
/*
* This is an implementation of a concurrent lock-free deque
* supporting interior removes but not interior insertions, as
* required to support the entire Deque interface.
*
* We extend the techniques developed for ConcurrentLinkedQueue and
* LinkedTransferQueue (see the internal docs for those classes).
* Understanding the ConcurrentLinkedQueue implementation is a
* prerequisite for understanding the implementation of this class.
*
* The data structure is a symmetrical doubly-linked "GC-robust"
* linked list of nodes. We minimize the number of volatile writes
* using two techniques: advancing multiple hops with a single CAS
* and mixing volatile and non-volatile writes of the same memory
* locations.
*
* A node contains the expected E ("item") and links to predecessor
* ("prev") and successor ("next") nodes:
*
* class Node<E> { volatile Node<E> prev, next; volatile E item; }
*
* A node p is considered "live" if it contains a non-null item
* (p.item != null). When an item is CASed to null, the item is
* atomically logically deleted from the collection.
*
* At any time, there is precisely one "first" node with a null
* prev reference that terminates any chain of prev references
* starting at a live node. Similarly there is precisely one
* "last" node terminating any chain of next references starting at
* a live node. The "first" and "last" nodes may or may not be live.
* The "first" and "last" nodes are always mutually reachable.
*
* A new element is added atomically by CASing the null prev or
* next reference in the first or last node to a fresh node
* containing the element. The element's node atomically becomes
* "live" at that point.
*
* A node is considered "active" if it is a live node, or the
* first or last node. Active nodes cannot be unlinked.
*
* A "self-link" is a next or prev reference that is the same node:
* p.prev == p or p.next == p
* Self-links are used in the node unlinking process. Active nodes
* never have self-links.
*
* A node p is active if and only if:
*
* p.item != null ||
* (p.prev == null && p.next != p) ||
* (p.next == null && p.prev != p)
*
* The deque object has two node references, "head" and "tail".
* The head and tail are only approximations to the first and last
* nodes of the deque. The first node can always be found by
* following prev pointers from head; likewise for tail. However,
* it is permissible for head and tail to be referring to deleted
* nodes that have been unlinked and so may not be reachable from
* any live node.
*
* There are 3 stages of node deletion;
* "logical deletion", "unlinking", and "gc-unlinking".
*
* 1. "logical deletion" by CASing item to null atomically removes
* the element from the collection, and makes the containing node
* eligible for unlinking.
*
* 2. "unlinking" makes a deleted node unreachable from active
* nodes, and thus eventually reclaimable by GC. Unlinked nodes
* may remain reachable indefinitely from an iterator.
*
* Physical node unlinking is merely an optimization (albeit a
* critical one), and so can be performed at our convenience. At
* any time, the set of live nodes maintained by prev and next
* links are identical, that is, the live nodes found via next
* links from the first node is equal to the elements found via
* prev links from the last node. However, this is not true for
* nodes that have already been logically deleted - such nodes may
* be reachable in one direction only.
*
* 3. "gc-unlinking" takes unlinking further by making active
* nodes unreachable from deleted nodes, making it easier for the
* GC to reclaim future deleted nodes. This step makes the data
* structure "gc-robust", as first described in detail by Boehm
* (http://portal.acm.org/citation.cfm?doid=503272.503282).
*
* GC-unlinked nodes may remain reachable indefinitely from an
* iterator, but unlike unlinked nodes, are never reachable from
* head or tail.
*
* Making the data structure GC-robust will eliminate the risk of
* unbounded memory retention with conservative GCs and is likely
* to improve performance with generational GCs.
*
* When a node is dequeued at either end, e.g. via poll(), we would
* like to break any references from the node to active nodes. We
* develop further the use of self-links that was very effective in
* other concurrent collection classes. The idea is to replace
* prev and next pointers with special values that are interpreted
* to mean off-the-list-at-one-end. These are approximations, but
* good enough to preserve the properties we want in our
* traversals, e.g. we guarantee that a traversal will never visit
* the same element twice, but we don't guarantee whether a
* traversal that runs out of elements will be able to see more
* elements later after enqueues at that end. Doing gc-unlinking
* safely is particularly tricky, since any node can be in use
* indefinitely (for example by an iterator). We must ensure that
* the nodes pointed at by head/tail never get gc-unlinked, since
* head/tail are needed to get "back on track" by other nodes that
* are gc-unlinked. gc-unlinking accounts for much of the
* implementation complexity.
*
* Since neither unlinking nor gc-unlinking are necessary for
* correctness, there are many implementation choices regarding
* frequency (eagerness) of these operations. Since volatile
* reads are likely to be much cheaper than CASes, saving CASes by
* unlinking multiple adjacent nodes at a time may be a win.
* gc-unlinking can be performed rarely and still be effective,
* since it is most important that long chains of deleted nodes
* are occasionally broken.
*
* The actual representation we use is that p.next == p means to
* goto the first node (which in turn is reached by following prev
* pointers from head), and p.next == null && p.prev == p means
* that the iteration is at an end and that p is a (static final)
* dummy node, NEXT_TERMINATOR, and not the last active node.
* Finishing the iteration when encountering such a TERMINATOR is
* good enough for read-only traversals, so such traversals can use
* p.next == null as the termination condition. When we need to
* find the last (active) node, for enqueueing a new node, we need
* to check whether we have reached a TERMINATOR node; if so,
* restart traversal from tail.
*
* The implementation is completely directionally symmetrical,
* except that most public methods that iterate through the list
* follow next pointers ("forward" direction).
*
* We believe (without full proof) that all single-element deque
* operations (e.g., addFirst, peekLast, pollLast) are linearizable
* (see Herlihy and Shavit's book). However, some combinations of
* operations are known not to be linearizable. In particular,
* when an addFirst(A) is racing with pollFirst() removing B, it is
* possible for an observer iterating over the elements to observe
* A B C and subsequently observe A C, even though no interior
* removes are ever performed. Nevertheless, iterators behave
* reasonably, providing the "weakly consistent" guarantees.
*
* Empirically, microbenchmarks suggest that this class adds about
* 40% overhead relative to ConcurrentLinkedQueue, which feels as
* good as we can hope for.
*/
private static final long serialVersionUID = 876323262645176354L;
/**
* A node from which the first node on list (that is, the unique node p
* with p.prev == null && p.next != p) can be reached in O(1) time.
* Invariants:
* - the first node is always O(1) reachable from head via prev links
* - all live nodes are reachable from the first node via succ()
* - head != null
* - (tmp = head).next != tmp || tmp != head
* - head is never gc-unlinked (but may be unlinked)
* Non-invariants:
* - head.item may or may not be null
* - head may not be reachable from the first or last node, or from tail
*/
private transient volatile Node<E> head;
/**
* A node from which the last node on list (that is, the unique node p
* with p.next == null && p.prev != p) can be reached in O(1) time.
* Invariants:
* - the last node is always O(1) reachable from tail via next links
* - all live nodes are reachable from the last node via pred()
* - tail != null
* - tail is never gc-unlinked (but may be unlinked)
* Non-invariants:
* - tail.item may or may not be null
* - tail may not be reachable from the first or last node, or from head
*/
private transient volatile Node<E> tail;
private static final Node<Object> PREV_TERMINATOR, NEXT_TERMINATOR;
@SuppressWarnings("unchecked")
Node<E> prevTerminator() {
return (Node<E>) PREV_TERMINATOR;
}
@SuppressWarnings("unchecked")
Node<E> nextTerminator() {
return (Node<E>) NEXT_TERMINATOR;
}
static final class Node<E> {
volatile Node<E> prev;
volatile E item;
volatile Node<E> next;
Node() { // default constructor for NEXT_TERMINATOR, PREV_TERMINATOR
}
/**
* Constructs a new node. Uses relaxed write because item can
* only be seen after publication via casNext or casPrev.
*/
Node(E item) {
UNSAFE.putObject(this, itemOffset, item);
}
boolean casItem(E cmp, E val) {
return UNSAFE.compareAndSwapObject(this, itemOffset, cmp, val);
}
void lazySetNext(Node<E> val) {
UNSAFE.putOrderedObject(this, nextOffset, val);
}
boolean casNext(Node<E> cmp, Node<E> val) {
return UNSAFE.compareAndSwapObject(this, nextOffset, cmp, val);
}
void lazySetPrev(Node<E> val) {
UNSAFE.putOrderedObject(this, prevOffset, val);
}
boolean casPrev(Node<E> cmp, Node<E> val) {
return UNSAFE.compareAndSwapObject(this, prevOffset, cmp, val);
}
// Unsafe mechanics
private static final sun.misc.Unsafe UNSAFE;
private static final long prevOffset;
private static final long itemOffset;
private static final long nextOffset;
static {
try {
UNSAFE = getUnsafe();
Class<?> k = Node.class;
prevOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("prev"));
itemOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("item"));
nextOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("next"));
} catch (Exception e) {
throw new Error(e);
}
}
}
/**
* Links e as first element.
*/
private void linkFirst(E e) {
checkNotNull(e);
final Node<E> newNode = new Node<E>(e);
restartFromHead:
for (;;)
for (Node<E> h = head, p = h, q;;) {
if ((q = p.prev) != null &&
(q = (p = q).prev) != null)
// Check for head updates every other hop.
// If p == q, we are sure to follow head instead.
p = (h != (h = head)) ? h : q;
else if (p.next == p) // PREV_TERMINATOR
continue restartFromHead;
else {
// p is first node
newNode.lazySetNext(p); // CAS piggyback
if (p.casPrev(null, newNode)) {
// Successful CAS is the linearization point
// for e to become an element of this deque,
// and for newNode to become "live".
if (p != h) // hop two nodes at a time
casHead(h, newNode); // Failure is OK.
return;
}
// Lost CAS race to another thread; re-read prev
}
}
}
/**
* Links e as last element.
*/
private void linkLast(E e) {
checkNotNull(e);
final Node<E> newNode = new Node<E>(e);
restartFromTail:
for (;;)
for (Node<E> t = tail, p = t, q;;) {
if ((q = p.next) != null &&
(q = (p = q).next) != null)
// Check for tail updates every other hop.
// If p == q, we are sure to follow tail instead.
p = (t != (t = tail)) ? t : q;
else if (p.prev == p) // NEXT_TERMINATOR
continue restartFromTail;
else {
// p is last node
newNode.lazySetPrev(p); // CAS piggyback
if (p.casNext(null, newNode)) {
// Successful CAS is the linearization point
// for e to become an element of this deque,
// and for newNode to become "live".
if (p != t) // hop two nodes at a time
casTail(t, newNode); // Failure is OK.
return;
}
// Lost CAS race to another thread; re-read next
}
}
}
private static final int HOPS = 2;
/**
* Unlinks non-null node x.
*/
void unlink(Node<E> x) {
// assert x != null;
// assert x.item == null;
// assert x != PREV_TERMINATOR;
// assert x != NEXT_TERMINATOR;
final Node<E> prev = x.prev;
final Node<E> next = x.next;
if (prev == null) {
unlinkFirst(x, next);
} else if (next == null) {
unlinkLast(x, prev);
} else {
// Unlink interior node.
//
// This is the common case, since a series of polls at the
// same end will be "interior" removes, except perhaps for
// the first one, since end nodes cannot be unlinked.
//
// At any time, all active nodes are mutually reachable by
// following a sequence of either next or prev pointers.
//
// Our strategy is to find the unique active predecessor
// and successor of x. Try to fix up their links so that
// they point to each other, leaving x unreachable from
// active nodes. If successful, and if x has no live
// predecessor/successor, we additionally try to gc-unlink,
// leaving active nodes unreachable from x, by rechecking
// that the status of predecessor and successor are
// unchanged and ensuring that x is not reachable from
// tail/head, before setting x's prev/next links to their
// logical approximate replacements, self/TERMINATOR.
Node<E> activePred, activeSucc;
boolean isFirst, isLast;
int hops = 1;
// Find active predecessor
for (Node<E> p = prev; ; ++hops) {
if (p.item != null) {
activePred = p;
isFirst = false;
break;
}
Node<E> q = p.prev;
if (q == null) {
if (p.next == p)
return;
activePred = p;
isFirst = true;
break;
}
else if (p == q)
return;
else
p = q;
}
// Find active successor
for (Node<E> p = next; ; ++hops) {
if (p.item != null) {
activeSucc = p;
isLast = false;
break;
}
Node<E> q = p.next;
if (q == null) {
if (p.prev == p)
return;
activeSucc = p;
isLast = true;
break;
}
else if (p == q)
return;
else
p = q;
}
// TODO: better HOP heuristics
if (hops < HOPS
// always squeeze out interior deleted nodes
&& (isFirst | isLast))
return;
// Squeeze out deleted nodes between activePred and
// activeSucc, including x.
skipDeletedSuccessors(activePred);
skipDeletedPredecessors(activeSucc);
// Try to gc-unlink, if possible
if ((isFirst | isLast) &&
// Recheck expected state of predecessor and successor
(activePred.next == activeSucc) &&
(activeSucc.prev == activePred) &&
(isFirst ? activePred.prev == null : activePred.item != null) &&
(isLast ? activeSucc.next == null : activeSucc.item != null)) {
updateHead(); // Ensure x is not reachable from head
updateTail(); // Ensure x is not reachable from tail
// Finally, actually gc-unlink
x.lazySetPrev(isFirst ? prevTerminator() : x);
x.lazySetNext(isLast ? nextTerminator() : x);
}
}
}
/**
* Unlinks non-null first node.
*/
private void unlinkFirst(Node<E> first, Node<E> next) {
// assert first != null;
// assert next != null;
// assert first.item == null;
for (Node<E> o = null, p = next, q;;) {
if (p.item != null || (q = p.next) == null) {
if (o != null && p.prev != p && first.casNext(next, p)) {
skipDeletedPredecessors(p);
if (first.prev == null &&
(p.next == null || p.item != null) &&
p.prev == first) {
updateHead(); // Ensure o is not reachable from head
updateTail(); // Ensure o is not reachable from tail
// Finally, actually gc-unlink
o.lazySetNext(o);
o.lazySetPrev(prevTerminator());
}
}
return;
}
else if (p == q)
return;
else {
o = p;
p = q;
}
}
}
/**
* Unlinks non-null last node.
*/
private void unlinkLast(Node<E> last, Node<E> prev) {
// assert last != null;
// assert prev != null;
// assert last.item == null;
for (Node<E> o = null, p = prev, q;;) {
if (p.item != null || (q = p.prev) == null) {
if (o != null && p.next != p && last.casPrev(prev, p)) {
skipDeletedSuccessors(p);
if (last.next == null &&
(p.prev == null || p.item != null) &&
p.next == last) {
updateHead(); // Ensure o is not reachable from head
updateTail(); // Ensure o is not reachable from tail
// Finally, actually gc-unlink
o.lazySetPrev(o);
o.lazySetNext(nextTerminator());
}
}
return;
}
else if (p == q)
return;
else {
o = p;
p = q;
}
}
}
/**
* Guarantees that any node which was unlinked before a call to
* this method will be unreachable from head after it returns.
* Does not guarantee to eliminate slack, only that head will
* point to a node that was active while this method was running.
*/
private final void updateHead() {
// Either head already points to an active node, or we keep
// trying to cas it to the first node until it does.
Node<E> h, p, q;
restartFromHead:
while ((h = head).item == null && (p = h.prev) != null) {
for (;;) {
if ((q = p.prev) == null ||
(q = (p = q).prev) == null) {
// It is possible that p is PREV_TERMINATOR,
// but if so, the CAS is guaranteed to fail.
if (casHead(h, p))
return;
else
continue restartFromHead;
}
else if (h != head)
continue restartFromHead;
else
p = q;
}
}
}
/**
* Guarantees that any node which was unlinked before a call to
* this method will be unreachable from tail after it returns.
* Does not guarantee to eliminate slack, only that tail will
* point to a node that was active while this method was running.
*/
private final void updateTail() {
// Either tail already points to an active node, or we keep
// trying to cas it to the last node until it does.
Node<E> t, p, q;
restartFromTail:
while ((t = tail).item == null && (p = t.next) != null) {
for (;;) {
if ((q = p.next) == null ||
(q = (p = q).next) == null) {
// It is possible that p is NEXT_TERMINATOR,
// but if so, the CAS is guaranteed to fail.
if (casTail(t, p))
return;
else
continue restartFromTail;
}
else if (t != tail)
continue restartFromTail;
else
p = q;
}
}
}
private void skipDeletedPredecessors(Node<E> x) {
whileActive:
do {
Node<E> prev = x.prev;
// assert prev != null;
// assert x != NEXT_TERMINATOR;
// assert x != PREV_TERMINATOR;
Node<E> p = prev;
findActive:
for (;;) {
if (p.item != null)
break findActive;
Node<E> q = p.prev;
if (q == null) {
if (p.next == p)
continue whileActive;
break findActive;
}
else if (p == q)
continue whileActive;
else
p = q;
}
// found active CAS target
if (prev == p || x.casPrev(prev, p))
return;
} while (x.item != null || x.next == null);
}
private void skipDeletedSuccessors(Node<E> x) {
whileActive:
do {
Node<E> next = x.next;
// assert next != null;
// assert x != NEXT_TERMINATOR;
// assert x != PREV_TERMINATOR;
Node<E> p = next;
findActive:
for (;;) {
if (p.item != null)
break findActive;
Node<E> q = p.next;
if (q == null) {
if (p.prev == p)
continue whileActive;
break findActive;
}
else if (p == q)
continue whileActive;
else
p = q;
}
// found active CAS target
if (next == p || x.casNext(next, p))
return;
} while (x.item != null || x.prev == null);
}
/**
* Returns the successor of p, or the first node if p.next has been
* linked to self, which will only be true if traversing with a
* stale pointer that is now off the list.
*/
final Node<E> succ(Node<E> p) {
// TODO: should we skip deleted nodes here?
Node<E> q = p.next;
return (p == q) ? first() : q;
}
/**
* Returns the predecessor of p, or the last node if p.prev has been
* linked to self, which will only be true if traversing with a
* stale pointer that is now off the list.
*/
final Node<E> pred(Node<E> p) {
Node<E> q = p.prev;
return (p == q) ? last() : q;
}
/**
* Returns the first node, the unique node p for which:
* p.prev == null && p.next != p
* The returned node may or may not be logically deleted.
* Guarantees that head is set to the returned node.
*/
Node<E> first() {
restartFromHead:
for (;;)
for (Node<E> h = head, p = h, q;;) {
if ((q = p.prev) != null &&
(q = (p = q).prev) != null)
// Check for head updates every other hop.
// If p == q, we are sure to follow head instead.
p = (h != (h = head)) ? h : q;
else if (p == h
// It is possible that p is PREV_TERMINATOR,
// but if so, the CAS is guaranteed to fail.
|| casHead(h, p))
return p;
else
continue restartFromHead;
}
}
/**
* Returns the last node, the unique node p for which:
* p.next == null && p.prev != p
* The returned node may or may not be logically deleted.
* Guarantees that tail is set to the returned node.
*/
Node<E> last() {
restartFromTail:
for (;;)
for (Node<E> t = tail, p = t, q;;) {
if ((q = p.next) != null &&
(q = (p = q).next) != null)
// Check for tail updates every other hop.
// If p == q, we are sure to follow tail instead.
p = (t != (t = tail)) ? t : q;
else if (p == t
// It is possible that p is NEXT_TERMINATOR,
// but if so, the CAS is guaranteed to fail.
|| casTail(t, p))
return p;
else
continue restartFromTail;
}
}
// Minor convenience utilities
/**
* Throws NullPointerException if argument is null.
*
* @param v the element
*/
private static void checkNotNull(Object v) {
if (v == null)
throw new NullPointerException();
}
/**
* Returns element unless it is null, in which case throws
* NoSuchElementException.
*
* @param v the element
* @return the element
*/
private E screenNullResult(E v) {
if (v == null)
throw new NoSuchElementException();
return v;
}
/**
* Creates an array list and fills it with elements of this list.
* Used by toArray.
*
* @return the array list
*/
private ArrayList<E> toArrayList() {
ArrayList<E> list = new ArrayList<E>();
for (Node<E> p = first(); p != null; p = succ(p)) {
E item = p.item;
if (item != null)
list.add(item);
}
return list;
}
/**
* Constructs an empty deque.
*/
public ConcurrentLinkedDeque() {
head = tail = new Node<E>(null);
}
/**
* Constructs a deque initially containing the elements of
* the given collection, added in traversal order of the
* collection's iterator.
*
* @param c the collection of elements to initially contain
* @throws NullPointerException if the specified collection or any
* of its elements are null
*/
public ConcurrentLinkedDeque(Collection<? extends E> c) {
// Copy c into a private chain of Nodes
Node<E> h = null, t = null;
for (E e : c) {
checkNotNull(e);
Node<E> newNode = new Node<E>(e);
if (h == null)
h = t = newNode;
else {
t.lazySetNext(newNode);
newNode.lazySetPrev(t);
t = newNode;
}
}
initHeadTail(h, t);
}
/**
* Initializes head and tail, ensuring invariants hold.
*/
private void initHeadTail(Node<E> h, Node<E> t) {
if (h == t) {
if (h == null)
h = t = new Node<E>(null);
else {
// Avoid edge case of a single Node with non-null item.
Node<E> newNode = new Node<E>(null);
t.lazySetNext(newNode);
newNode.lazySetPrev(t);
t = newNode;
}
}
head = h;
tail = t;
}
/**
* Inserts the specified element at the front of this deque.
* As the deque is unbounded, this method will never throw
* {@link IllegalStateException}.
*
* @throws NullPointerException if the specified element is null
*/
public void addFirst(E e) {
linkFirst(e);
}
/**
* Inserts the specified element at the end of this deque.
* As the deque is unbounded, this method will never throw
* {@link IllegalStateException}.
*
* <p>This method is equivalent to {@link #add}.
*
* @throws NullPointerException if the specified element is null
*/
public void addLast(E e) {
linkLast(e);
}
/**
* Inserts the specified element at the front of this deque.
* As the deque is unbounded, this method will never return {@code false}.
*
* @return {@code true} (as specified by {@link Deque#offerFirst})
* @throws NullPointerException if the specified element is null
*/
public boolean offerFirst(E e) {
linkFirst(e);
return true;
}
/**
* Inserts the specified element at the end of this deque.
* As the deque is unbounded, this method will never return {@code false}.
*
* <p>This method is equivalent to {@link #add}.
*
* @return {@code true} (as specified by {@link Deque#offerLast})
* @throws NullPointerException if the specified element is null
*/
public boolean offerLast(E e) {
linkLast(e);
return true;
}
public E peekFirst() {
for (Node<E> p = first(); p != null; p = succ(p)) {
E item = p.item;
if (item != null)
return item;
}
return null;
}
public E peekLast() {
for (Node<E> p = last(); p != null; p = pred(p)) {
E item = p.item;
if (item != null)
return item;
}
return null;
}
/**
* @throws NoSuchElementException {@inheritDoc}
*/
public E getFirst() {
return screenNullResult(peekFirst());
}
/**
* @throws NoSuchElementException {@inheritDoc}
*/
public E getLast() {
return screenNullResult(peekLast());
}
public E pollFirst() {
for (Node<E> p = first(); p != null; p = succ(p)) {
E item = p.item;
if (item != null && p.casItem(item, null)) {
unlink(p);
return item;
}
}
return null;
}
public E pollLast() {
for (Node<E> p = last(); p != null; p = pred(p)) {
E item = p.item;
if (item != null && p.casItem(item, null)) {
unlink(p);
return item;
}
}
return null;
}
/**
* @throws NoSuchElementException {@inheritDoc}
*/
public E removeFirst() {
return screenNullResult(pollFirst());
}
/**
* @throws NoSuchElementException {@inheritDoc}
*/
public E removeLast() {
return screenNullResult(pollLast());
}
// *** Queue and stack methods ***
/**
* Inserts the specified element at the tail of this deque.
* As the deque is unbounded, this method will never return {@code false}.
*
* @return {@code true} (as specified by {@link Queue#offer})
* @throws NullPointerException if the specified element is null
*/
public boolean offer(E e) {
return offerLast(e);
}
/**
* Inserts the specified element at the tail of this deque.
* As the deque is unbounded, this method will never throw
* {@link IllegalStateException} or return {@code false}.
*
* @return {@code true} (as specified by {@link Collection#add})
* @throws NullPointerException if the specified element is null
*/
public boolean add(E e) {
return offerLast(e);
}
public E poll() { return pollFirst(); }
public E remove() { return removeFirst(); }
public E peek() { return peekFirst(); }
public E element() { return getFirst(); }
public void push(E e) { addFirst(e); }
public E pop() { return removeFirst(); }
/**
* Removes the first element {@code e} such that
* {@code o.equals(e)}, if such an element exists in this deque.
* If the deque does not contain the element, it is unchanged.
*
* @param o element to be removed from this deque, if present
* @return {@code true} if the deque contained the specified element
* @throws NullPointerException if the specified element is null
*/
public boolean removeFirstOccurrence(Object o) {
checkNotNull(o);
for (Node<E> p = first(); p != null; p = succ(p)) {
E item = p.item;
if (item != null && o.equals(item) && p.casItem(item, null)) {
unlink(p);
return true;
}
}
return false;
}
/**
* Removes the last element {@code e} such that
* {@code o.equals(e)}, if such an element exists in this deque.
* If the deque does not contain the element, it is unchanged.
*
* @param o element to be removed from this deque, if present
* @return {@code true} if the deque contained the specified element
* @throws NullPointerException if the specified element is null
*/
public boolean removeLastOccurrence(Object o) {
checkNotNull(o);
for (Node<E> p = last(); p != null; p = pred(p)) {
E item = p.item;
if (item != null && o.equals(item) && p.casItem(item, null)) {
unlink(p);
return true;
}
}
return false;
}
/**
* Returns {@code true} if this deque contains at least one
* element {@code e} such that {@code o.equals(e)}.
*
* @param o element whose presence in this deque is to be tested
* @return {@code true} if this deque contains the specified element
*/
public boolean contains(Object o) {
if (o == null) return false;
for (Node<E> p = first(); p != null; p = succ(p)) {
E item = p.item;
if (item != null && o.equals(item))
return true;
}
return false;
}
/**
* Returns {@code true} if this collection contains no elements.
*
* @return {@code true} if this collection contains no elements
*/
public boolean isEmpty() {
return peekFirst() == null;
}
/**
* Returns the number of elements in this deque. If this deque
* contains more than {@code Integer.MAX_VALUE} elements, it
* returns {@code Integer.MAX_VALUE}.
*
* <p>Beware that, unlike in most collections, this method is
* <em>NOT</em> a constant-time operation. Because of the
* asynchronous nature of these deques, determining the current
* number of elements requires traversing them all to count them.
* Additionally, it is possible for the size to change during
* execution of this method, in which case the returned result
* will be inaccurate. Thus, this method is typically not very
* useful in concurrent applications.
*
* @return the number of elements in this deque
*/
public int size() {
int count = 0;
for (Node<E> p = first(); p != null; p = succ(p))
if (p.item != null)
// Collection.size() spec says to max out
if (++count == Integer.MAX_VALUE)
break;
return count;
}
/**
* Removes the first element {@code e} such that
* {@code o.equals(e)}, if such an element exists in this deque.
* If the deque does not contain the element, it is unchanged.
*
* @param o element to be removed from this deque, if present
* @return {@code true} if the deque contained the specified element
* @throws NullPointerException if the specified element is null
*/
public boolean remove(Object o) {
return removeFirstOccurrence(o);
}
/**
* Appends all of the elements in the specified collection to the end of
* this deque, in the order that they are returned by the specified
* collection's iterator. Attempts to {@code addAll} of a deque to
* itself result in {@code IllegalArgumentException}.
*
* @param c the elements to be inserted into this deque
* @return {@code true} if this deque changed as a result of the call
* @throws NullPointerException if the specified collection or any
* of its elements are null
* @throws IllegalArgumentException if the collection is this deque
*/
public boolean addAll(Collection<? extends E> c) {
if (c == this)
// As historically specified in AbstractQueue#addAll
throw new IllegalArgumentException();
// Copy c into a private chain of Nodes
Node<E> beginningOfTheEnd = null, last = null;
for (E e : c) {
checkNotNull(e);
Node<E> newNode = new Node<E>(e);
if (beginningOfTheEnd == null)
beginningOfTheEnd = last = newNode;
else {
last.lazySetNext(newNode);
newNode.lazySetPrev(last);
last = newNode;
}
}
if (beginningOfTheEnd == null)
return false;
// Atomically append the chain at the tail of this collection
restartFromTail:
for (;;)
for (Node<E> t = tail, p = t, q;;) {
if ((q = p.next) != null &&
(q = (p = q).next) != null)
// Check for tail updates every other hop.
// If p == q, we are sure to follow tail instead.
p = (t != (t = tail)) ? t : q;
else if (p.prev == p) // NEXT_TERMINATOR
continue restartFromTail;
else {
// p is last node
beginningOfTheEnd.lazySetPrev(p); // CAS piggyback
if (p.casNext(null, beginningOfTheEnd)) {
// Successful CAS is the linearization point
// for all elements to be added to this deque.
if (!casTail(t, last)) {
// Try a little harder to update tail,
// since we may be adding many elements.
t = tail;
if (last.next == null)
casTail(t, last);
}
return true;
}
// Lost CAS race to another thread; re-read next
}
}
}
/**
* Removes all of the elements from this deque.
*/
public void clear() {
while (pollFirst() != null)
;
}
/**
* Returns an array containing all of the elements in this deque, in
* proper sequence (from first to last element).
*
* <p>The returned array will be "safe" in that no references to it are
* maintained by this deque. (In other words, this method must allocate
* a new array). The caller is thus free to modify the returned array.
*
* <p>This method acts as bridge between array-based and collection-based
* APIs.
*
* @return an array containing all of the elements in this deque
*/
public Object[] toArray() {
return toArrayList().toArray();
}
/**
* Returns an array containing all of the elements in this deque,
* in proper sequence (from first to last element); the runtime
* type of the returned array is that of the specified array. If
* the deque fits in the specified array, it is returned therein.
* Otherwise, a new array is allocated with the runtime type of
* the specified array and the size of this deque.
*
* <p>If this deque fits in the specified array with room to spare
* (i.e., the array has more elements than this deque), the element in
* the array immediately following the end of the deque is set to
* {@code null}.
*
* <p>Like the {@link #toArray()} method, this method acts as
* bridge between array-based and collection-based APIs. Further,
* this method allows precise control over the runtime type of the
* output array, and may, under certain circumstances, be used to
* save allocation costs.
*
* <p>Suppose {@code x} is a deque known to contain only strings.
* The following code can be used to dump the deque into a newly
* allocated array of {@code String}:
*
* <pre> {@code String[] y = x.toArray(new String[0]);}</pre>
*
* Note that {@code toArray(new Object[0])} is identical in function to
* {@code toArray()}.
*
* @param a the array into which the elements of the deque are to
* be stored, if it is big enough; otherwise, a new array of the
* same runtime type is allocated for this purpose
* @return an array containing all of the elements in this deque
* @throws ArrayStoreException if the runtime type of the specified array
* is not a supertype of the runtime type of every element in
* this deque
* @throws NullPointerException if the specified array is null
*/
public <T> T[] toArray(T[] a) {
return toArrayList().toArray(a);
}
/**
* Returns an iterator over the elements in this deque in proper sequence.
* The elements will be returned in order from first (head) to last (tail).
*
* <p>The returned iterator is a "weakly consistent" iterator that
* will never throw {@link java.util.ConcurrentModificationException
* ConcurrentModificationException}, and guarantees to traverse
* elements as they existed upon construction of the iterator, and
* may (but is not guaranteed to) reflect any modifications
* subsequent to construction.
*
* @return an iterator over the elements in this deque in proper sequence
*/
public Iterator<E> iterator() {
return new Itr();
}
/**
* Returns an iterator over the elements in this deque in reverse
* sequential order. The elements will be returned in order from
* last (tail) to first (head).
*
* <p>The returned iterator is a "weakly consistent" iterator that
* will never throw {@link java.util.ConcurrentModificationException
* ConcurrentModificationException}, and guarantees to traverse
* elements as they existed upon construction of the iterator, and
* may (but is not guaranteed to) reflect any modifications
* subsequent to construction.
*
* @return an iterator over the elements in this deque in reverse order
*/
public Iterator<E> descendingIterator() {
return new DescendingItr();
}
private abstract class AbstractItr implements Iterator<E> {
/**
* Next node to return item for.
*/
private Node<E> nextNode;
/**
* nextItem holds on to item fields because once we claim
* that an element exists in hasNext(), we must return it in
* the following next() call even if it was in the process of
* being removed when hasNext() was called.
*/
private E nextItem;
/**
* Node returned by most recent call to next. Needed by remove.
* Reset to null if this element is deleted by a call to remove.
*/
private Node<E> lastRet;
abstract Node<E> startNode();
abstract Node<E> nextNode(Node<E> p);
AbstractItr() {
advance();
}
/**
* Sets nextNode and nextItem to next valid node, or to null
* if no such.
*/
private void advance() {
lastRet = nextNode;
Node<E> p = (nextNode == null) ? startNode() : nextNode(nextNode);
for (;; p = nextNode(p)) {
if (p == null) {
// p might be active end or TERMINATOR node; both are OK
nextNode = null;
nextItem = null;
break;
}
E item = p.item;
if (item != null) {
nextNode = p;
nextItem = item;
break;
}
}
}
public boolean hasNext() {
return nextItem != null;
}
public E next() {
E item = nextItem;
if (item == null) throw new NoSuchElementException();
advance();
return item;
}
public void remove() {
Node<E> l = lastRet;
if (l == null) throw new IllegalStateException();
l.item = null;
unlink(l);
lastRet = null;
}
}
/** Forward iterator */
private class Itr extends AbstractItr {
Node<E> startNode() { return first(); }
Node<E> nextNode(Node<E> p) { return succ(p); }
}
/** Descending iterator */
private class DescendingItr extends AbstractItr {
Node<E> startNode() { return last(); }
Node<E> nextNode(Node<E> p) { return pred(p); }
}
/**
* Saves the state to a stream (that is, serializes it).
*
* @serialData All of the elements (each an {@code E}) in
* the proper order, followed by a null
* @param s the stream
*/
private void writeObject(java.io.ObjectOutputStream s)
throws java.io.IOException {
// Write out any hidden stuff
s.defaultWriteObject();
// Write out all elements in the proper order.
for (Node<E> p = first(); p != null; p = succ(p)) {
E item = p.item;
if (item != null)
s.writeObject(item);
}
// Use trailing null as sentinel
s.writeObject(null);
}
/**
* Reconstitutes the instance from a stream (that is, deserializes it).
* @param s the stream
*/
private void readObject(java.io.ObjectInputStream s)
throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
// Read in elements until trailing null sentinel found
Node<E> h = null, t = null;
Object item;
while ((item = s.readObject()) != null) {
@SuppressWarnings("unchecked")
Node<E> newNode = new Node<E>((E) item);
if (h == null)
h = t = newNode;
else {
t.lazySetNext(newNode);
newNode.lazySetPrev(t);
t = newNode;
}
}
initHeadTail(h, t);
}
private boolean casHead(Node<E> cmp, Node<E> val) {
return UNSAFE.compareAndSwapObject(this, headOffset, cmp, val);
}
private boolean casTail(Node<E> cmp, Node<E> val) {
return UNSAFE.compareAndSwapObject(this, tailOffset, cmp, val);
}
// Unsafe mechanics
private static final sun.misc.Unsafe UNSAFE;
private static final long headOffset;
private static final long tailOffset;
static {
PREV_TERMINATOR = new Node<Object>();
PREV_TERMINATOR.next = PREV_TERMINATOR;
NEXT_TERMINATOR = new Node<Object>();
NEXT_TERMINATOR.prev = NEXT_TERMINATOR;
try {
UNSAFE = getUnsafe();
Class<?> k = ConcurrentLinkedDeque.class;
headOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("head"));
tailOffset = UNSAFE.objectFieldOffset
(k.getDeclaredField("tail"));
} catch (Exception e) {
throw new Error(e);
}
}
/**
* Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package.
* Replace with a simple call to Unsafe.getUnsafe when integrating
* into a jdk.
*
* @return a sun.misc.Unsafe
*/
static sun.misc.Unsafe getUnsafe() {
try {
return sun.misc.Unsafe.getUnsafe();
} catch (SecurityException tryReflectionInstead) {}
try {
return java.security.AccessController.doPrivileged
(new java.security.PrivilegedExceptionAction<sun.misc.Unsafe>() {
public sun.misc.Unsafe run() throws Exception {
Class<sun.misc.Unsafe> k = sun.misc.Unsafe.class;
for (java.lang.reflect.Field f : k.getDeclaredFields()) {
f.setAccessible(true);
Object x = f.get(null);
if (k.isInstance(x))
return k.cast(x);
}
throw new NoSuchFieldError("the Unsafe");
}});
} catch (java.security.PrivilegedActionException e) {
throw new RuntimeException("Could not initialize intrinsics",
e.getCause());
}
}
} | 0true
| src_main_java_jsr166y_ConcurrentLinkedDeque.java |
126 | public enum METRIC_TYPE {
CHRONO, COUNTER, STAT, SIZE, ENABLED, TEXT
} | 0true
| commons_src_main_java_com_orientechnologies_common_profiler_OProfilerMBean.java |
2,642 | transportService.sendRequest(requestingNode, MulticastPingResponseRequestHandler.ACTION, multicastPingResponse, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
public void handleException(TransportException exp) {
logger.warn("failed to receive confirmation on sent ping response to [{}]", exp, requestingNode);
}
}); | 0true
| src_main_java_org_elasticsearch_discovery_zen_ping_multicast_MulticastZenPing.java |
918 | public class PlainActionFuture<T> extends AdapterActionFuture<T, T> {
public static <T> PlainActionFuture<T> newFuture() {
return new PlainActionFuture<T>();
}
@Override
protected T convert(T listenerResponse) {
return listenerResponse;
}
} | 0true
| src_main_java_org_elasticsearch_action_support_PlainActionFuture.java |
1,596 | public class ThrottlingAllocationDecider extends AllocationDecider {
public static final String CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = "cluster.routing.allocation.node_initial_primaries_recoveries";
public static final String CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = "cluster.routing.allocation.node_concurrent_recoveries";
public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = 2;
public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = 4;
private volatile int primariesInitialRecoveries;
private volatile int concurrentRecoveries;
@Inject
public ThrottlingAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) {
super(settings);
this.primariesInitialRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES);
this.concurrentRecoveries = settings.getAsInt("cluster.routing.allocation.concurrent_recoveries", settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES));
logger.debug("using node_concurrent_recoveries [{}], node_initial_primaries_recoveries [{}]", concurrentRecoveries, primariesInitialRecoveries);
nodeSettingsService.addListener(new ApplySettings());
}
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
if (shardRouting.primary()) {
assert shardRouting.unassigned() || shardRouting.active();
if (shardRouting.unassigned()) {
// primary is unassigned, means we are going to do recovery from gateway
// count *just the primary* currently doing recovery on the node and check against concurrent_recoveries
int primariesInRecovery = 0;
for (MutableShardRouting shard : node) {
// when a primary shard is INITIALIZING, it can be because of *initial recovery* or *relocation from another node*
// we only count initial recoveries here, so we need to make sure that relocating node is null
if (shard.state() == ShardRoutingState.INITIALIZING && shard.primary() && shard.relocatingNodeId() == null) {
primariesInRecovery++;
}
}
if (primariesInRecovery >= primariesInitialRecoveries) {
return allocation.decision(Decision.THROTTLE, "too many primaries currently recovering [%d], limit: [%d]",
primariesInRecovery, primariesInitialRecoveries);
} else {
return allocation.decision(Decision.YES, "below primary recovery limit of [%d]", primariesInitialRecoveries);
}
}
}
// either primary or replica doing recovery (from peer shard)
// count the number of recoveries on the node, its for both target (INITIALIZING) and source (RELOCATING)
return canAllocate(node, allocation);
}
public Decision canAllocate(RoutingNode node, RoutingAllocation allocation) {
int currentRecoveries = 0;
for (MutableShardRouting shard : node) {
if (shard.state() == ShardRoutingState.INITIALIZING || shard.state() == ShardRoutingState.RELOCATING) {
currentRecoveries++;
}
}
if (currentRecoveries >= concurrentRecoveries) {
return allocation.decision(Decision.THROTTLE, "too many shards currently recovering [%d], limit: [%d]",
currentRecoveries, concurrentRecoveries);
} else {
return allocation.decision(Decision.YES, "below shard recovery limit of [%d]", concurrentRecoveries);
}
}
class ApplySettings implements NodeSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
int primariesInitialRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, ThrottlingAllocationDecider.this.primariesInitialRecoveries);
if (primariesInitialRecoveries != ThrottlingAllocationDecider.this.primariesInitialRecoveries) {
logger.info("updating [cluster.routing.allocation.node_initial_primaries_recoveries] from [{}] to [{}]", ThrottlingAllocationDecider.this.primariesInitialRecoveries, primariesInitialRecoveries);
ThrottlingAllocationDecider.this.primariesInitialRecoveries = primariesInitialRecoveries;
}
int concurrentRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, ThrottlingAllocationDecider.this.concurrentRecoveries);
if (concurrentRecoveries != ThrottlingAllocationDecider.this.concurrentRecoveries) {
logger.info("updating [cluster.routing.allocation.node_concurrent_recoveries] from [{}] to [{}]", ThrottlingAllocationDecider.this.concurrentRecoveries, concurrentRecoveries);
ThrottlingAllocationDecider.this.concurrentRecoveries = concurrentRecoveries;
}
}
}
} | 1no label
| src_main_java_org_elasticsearch_cluster_routing_allocation_decider_ThrottlingAllocationDecider.java |
274 | private final class GotoListener implements KeyListener {
@Override
public void keyReleased(KeyEvent e) {}
@Override
public void keyPressed(KeyEvent e) {
if (e.character == 0x1B) // ESC
dispose();
if (EditorUtil.triggersBinding(e, getCommandBinding())) {
e.doit=false;
dispose();
gotoNode(referencedNode, pc.getProject());
}
}
} | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_editor_PeekDefinitionPopup.java |
683 | public interface CategoryProductXref extends Serializable {
/**
* Gets the category.
*
* @return the category
*/
Category getCategory();
/**
* Sets the category.
*
* @param category the new category
*/
void setCategory(Category category);
/**
* Gets the product.
*
* @return the product
*/
Product getProduct();
/**
* Sets the product.
*
* @param product the new product
*/
void setProduct(Product product);
/**
* Gets the display order.
*
* @return the display order
*/
Long getDisplayOrder();
/**
* Sets the display order.
*
* @param displayOrder the new display order
*/
void setDisplayOrder(Long displayOrder);
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_catalog_domain_CategoryProductXref.java |
1,387 | @XmlRootElement(name = "categories")
@XmlAccessorType(value = XmlAccessType.FIELD)
public class CategoriesWrapper extends BaseWrapper implements APIWrapper<List<Category>> {
@XmlElement(name = "category")
protected List<CategoryWrapper> categories = new ArrayList<CategoryWrapper>();
@Override
public void wrapDetails(List<Category> cats, HttpServletRequest request) {
for (Category category : cats) {
CategoryWrapper wrapper = (CategoryWrapper) context.getBean(CategoryWrapper.class.getName());
wrapper.wrapSummary(category, request);
categories.add(wrapper);
}
}
@Override
public void wrapSummary(List<Category> cats, HttpServletRequest request) {
wrapDetails(cats, request);
}
} | 0true
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_api_wrapper_CategoriesWrapper.java |
822 | public class MultiSearchRequestTests extends ElasticsearchTestCase {
@Test
public void simpleAdd() throws Exception {
byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch1.json");
MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, false, null, null, null);
assertThat(request.requests().size(), equalTo(5));
assertThat(request.requests().get(0).indices()[0], equalTo("test"));
assertThat(request.requests().get(0).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true)));
assertThat(request.requests().get(0).types().length, equalTo(0));
assertThat(request.requests().get(1).indices()[0], equalTo("test"));
assertThat(request.requests().get(1).indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, true)));
assertThat(request.requests().get(1).types()[0], equalTo("type1"));
assertThat(request.requests().get(2).indices(), nullValue());
assertThat(request.requests().get(2).types().length, equalTo(0));
assertThat(request.requests().get(3).indices(), nullValue());
assertThat(request.requests().get(3).types().length, equalTo(0));
assertThat(request.requests().get(3).searchType(), equalTo(SearchType.COUNT));
assertThat(request.requests().get(4).indices(), nullValue());
assertThat(request.requests().get(4).types().length, equalTo(0));
}
@Test
public void simpleAdd2() throws Exception {
byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch2.json");
MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, false, null, null, null);
assertThat(request.requests().size(), equalTo(5));
assertThat(request.requests().get(0).indices()[0], equalTo("test"));
assertThat(request.requests().get(0).types().length, equalTo(0));
assertThat(request.requests().get(1).indices()[0], equalTo("test"));
assertThat(request.requests().get(1).types()[0], equalTo("type1"));
assertThat(request.requests().get(2).indices(), nullValue());
assertThat(request.requests().get(2).types().length, equalTo(0));
assertThat(request.requests().get(3).indices(), nullValue());
assertThat(request.requests().get(3).types().length, equalTo(0));
assertThat(request.requests().get(3).searchType(), equalTo(SearchType.COUNT));
assertThat(request.requests().get(4).indices(), nullValue());
assertThat(request.requests().get(4).types().length, equalTo(0));
}
@Test
public void simpleAdd3() throws Exception {
byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch3.json");
MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, false, null, null, null);
assertThat(request.requests().size(), equalTo(4));
assertThat(request.requests().get(0).indices()[0], equalTo("test0"));
assertThat(request.requests().get(0).indices()[1], equalTo("test1"));
assertThat(request.requests().get(1).indices()[0], equalTo("test2"));
assertThat(request.requests().get(1).indices()[1], equalTo("test3"));
assertThat(request.requests().get(1).types()[0], equalTo("type1"));
assertThat(request.requests().get(2).indices()[0], equalTo("test4"));
assertThat(request.requests().get(2).indices()[1], equalTo("test1"));
assertThat(request.requests().get(2).types()[0], equalTo("type2"));
assertThat(request.requests().get(2).types()[1], equalTo("type1"));
assertThat(request.requests().get(3).indices(), nullValue());
assertThat(request.requests().get(3).types().length, equalTo(0));
assertThat(request.requests().get(3).searchType(), equalTo(SearchType.COUNT));
}
} | 0true
| src_test_java_org_elasticsearch_action_search_MultiSearchRequestTests.java |
1,472 | public class BroadleafSocialRegisterController extends BroadleafRegisterController {
//Pre-populate portions of the RegisterCustomerForm from ProviderSignInUtils.getConnection();
public String register(RegisterCustomerForm registerCustomerForm, HttpServletRequest request,
HttpServletResponse response, Model model) {
Connection<?> connection = ProviderSignInUtils.getConnection(new ServletWebRequest(request));
if (connection != null) {
UserProfile userProfile = connection.fetchUserProfile();
Customer customer = registerCustomerForm.getCustomer();
customer.setFirstName(userProfile.getFirstName());
customer.setLastName(userProfile.getLastName());
customer.setEmailAddress(userProfile.getEmail());
if (isUseEmailForLogin()){
customer.setUsername(userProfile.getEmail());
} else {
customer.setUsername(userProfile.getUsername());
}
}
return super.register(registerCustomerForm, request, response, model);
}
//Calls ProviderSignInUtils.handlePostSignUp() after a successful registration
public String processRegister(RegisterCustomerForm registerCustomerForm, BindingResult errors,
HttpServletRequest request, HttpServletResponse response, Model model)
throws ServiceException, PricingException {
if (isUseEmailForLogin()) {
Customer customer = registerCustomerForm.getCustomer();
customer.setUsername(customer.getEmailAddress());
}
registerCustomerValidator.validate(registerCustomerForm, errors, isUseEmailForLogin());
if (!errors.hasErrors()) {
Customer newCustomer = customerService.registerCustomer(registerCustomerForm.getCustomer(),
registerCustomerForm.getPassword(), registerCustomerForm.getPasswordConfirm());
assert(newCustomer != null);
ProviderSignInUtils.handlePostSignUp(newCustomer.getUsername(), new ServletWebRequest(request));
// The next line needs to use the customer from the input form and not the customer returned after registration
// so that we still have the unencoded password for use by the authentication mechanism.
loginService.loginCustomer(registerCustomerForm.getCustomer());
// Need to ensure that the Cart on CartState is owned by the newly registered customer.
Order cart = CartState.getCart();
if (cart != null && !(cart instanceof NullOrderImpl) && cart.getEmailAddress() == null) {
cart.setEmailAddress(newCustomer.getEmailAddress());
orderService.save(cart, false);
}
String redirectUrl = registerCustomerForm.getRedirectUrl();
if (StringUtils.isNotBlank(redirectUrl) && redirectUrl.contains(":")) {
redirectUrl = null;
}
return StringUtils.isBlank(redirectUrl) ? getRegisterSuccessView() : "redirect:" + redirectUrl;
} else {
return getRegisterView();
}
}
} | 0true
| core_broadleaf-framework-web_src_main_java_org_broadleafcommerce_core_web_controller_account_BroadleafSocialRegisterController.java |
3,380 | public static class Builder implements IndexFieldData.Builder {
private NumericType numericType;
public Builder setNumericType(NumericType numericType) {
this.numericType = numericType;
return this;
}
@Override
public IndexFieldData<AtomicNumericFieldData> build(Index index, @IndexSettings Settings indexSettings, FieldMapper<?> mapper,
IndexFieldDataCache cache, CircuitBreakerService breakerService) {
return new PackedArrayIndexFieldData(index, indexSettings, mapper.names(), mapper.fieldDataType(), cache, numericType, breakerService);
}
} | 0true
| src_main_java_org_elasticsearch_index_fielddata_plain_PackedArrayIndexFieldData.java |
440 | @Deprecated
public @interface AdminPresentationAdornedTargetCollectionOverride {
/**
* The name of the property whose AdminPresentation annotation should be overwritten
*
* @return the name of the property that should be overwritten
*/
String name();
/**
* The AdminPresentation to overwrite the property with
*
* @return the AdminPresentation being mapped to the attribute
*/
AdminPresentationAdornedTargetCollection value();
} | 0true
| common_src_main_java_org_broadleafcommerce_common_presentation_override_AdminPresentationAdornedTargetCollectionOverride.java |
1,993 | private static class AndMatcher<T> extends AbstractMatcher<T> implements Serializable {
private final Matcher<? super T> a, b;
public AndMatcher(Matcher<? super T> a, Matcher<? super T> b) {
this.a = a;
this.b = b;
}
public boolean matches(T t) {
return a.matches(t) && b.matches(t);
}
@Override
public boolean equals(Object other) {
return other instanceof AndMatcher
&& ((AndMatcher) other).a.equals(a)
&& ((AndMatcher) other).b.equals(b);
}
@Override
public int hashCode() {
return 41 * (a.hashCode() ^ b.hashCode());
}
@Override
public String toString() {
return "and(" + a + ", " + b + ")";
}
private static final long serialVersionUID = 0;
} | 0true
| src_main_java_org_elasticsearch_common_inject_matcher_AbstractMatcher.java |
722 | @RunWith(HazelcastParallelClassRunner.class)
@Category(QuickTest.class)
public class ListTest extends HazelcastTestSupport {
@Test
@ClientCompatibleTest
public void testListMethods() throws Exception {
Config config = new Config();
final String name = "defList";
final int count = 100;
final int insCount = 2;
TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(insCount);
final HazelcastInstance[] instances = factory.newInstances(config);
for (int i=0; i<count; i++){
assertTrue(getList(instances, name).add("item"+i));
}
// Iterator iter = getList(instances, name).iterator();
// int item = 0;
// while (iter.hasNext()){
// assertEquals("item"+item++, iter.next());
// }
// assertEquals(count, getList(instances, name).size());
assertEquals("item0", getList(instances, name).get(0));
assertEquals(count, getList(instances, name).size());
getList(instances, name).add(0, "item");
assertEquals(count+1, getList(instances, name).size());
assertEquals("item", getList(instances, name).get(0));
assertEquals("item0", getList(instances, name).get(1));
assertTrue(getList(instances, name).remove("item99"));
assertFalse(getList(instances, name).remove("item99"));
assertEquals(count, getList(instances, name).size());
assertEquals("item",getList(instances, name).set(0, "newItem"));
assertEquals("newItem",getList(instances, name).get(0));
getList(instances, name).clear();
assertEquals(0, getList(instances, name).size());
List list = new ArrayList();
list.add("item-1");
list.add("item-2");
assertTrue(getList(instances, name).addAll(list));
assertEquals("item-1", getList(instances, name).get(0));
assertEquals("item-2", getList(instances, name).get(1));
assertTrue(getList(instances, name).addAll(1,list));
assertEquals("item-1", getList(instances, name).get(0));
assertEquals("item-1", getList(instances, name).get(1));
assertEquals("item-2", getList(instances, name).get(2));
assertEquals("item-2", getList(instances, name).get(3));
assertEquals(4, getList(instances, name).size());
assertEquals(0, getList(instances, name).indexOf("item-1"));
assertEquals(1, getList(instances, name).lastIndexOf("item-1"));
assertEquals(2, getList(instances, name).indexOf("item-2"));
assertEquals(3, getList(instances, name).lastIndexOf("item-2"));
assertEquals(4, getList(instances, name).size());
assertTrue(getList(instances, name).containsAll(list));
list.add("asd");
assertFalse(getList(instances, name).containsAll(list));
assertTrue(getList(instances, name).contains("item-1"));
assertFalse(getList(instances, name).contains("item"));
list = getList(instances, name).subList(1, 3);
assertEquals(2, list.size());
assertEquals("item-1", list.get(0));
assertEquals("item-2", list.get(1));
final ListIterator listIterator = getList(instances, name).listIterator(1);
assertTrue(listIterator.hasPrevious());
assertEquals("item-1", listIterator.next());
assertEquals("item-2", listIterator.next());
assertEquals("item-2", listIterator.next());
assertFalse(listIterator.hasNext());
list = new ArrayList();
list.add("item1");
list.add("item2");
assertFalse(getList(instances, name).removeAll(list));
assertEquals(4, getList(instances, name).size());
list.add("item-1");
assertTrue(getList(instances, name).removeAll(list));
assertEquals(2, getList(instances, name).size());
list.clear();
list.add("item-2");
assertFalse(getList(instances, name).retainAll(list));
assertEquals(2, getList(instances, name).size());
list.set(0, "item");
assertTrue(getList(instances, name).add("item"));
assertTrue(getList(instances, name).retainAll(list));
assertEquals(1, getList(instances, name).size());
assertEquals("item", getList(instances, name).get(0));
}
@Test
public void testListener() throws Exception {
Config config = new Config();
final String name = "defList";
final int count = 10;
final int insCount = 4;
TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(insCount);
final HazelcastInstance[] instances = factory.newInstances(config);
final CountDownLatch latchAdd = new CountDownLatch(count);
final CountDownLatch latchRemove = new CountDownLatch(count);
ItemListener listener = new ItemListener() {
public void itemAdded(ItemEvent item) {
latchAdd.countDown();
}
public void itemRemoved(ItemEvent item) {
latchRemove.countDown();
}
};
getList(instances, name).addItemListener(listener, true);
for (int i = 0; i < count; i++) {
getList(instances, name).add("item" + i);
}
for (int i = 0; i < count; i++) {
getList(instances, name).remove("item"+i);
}
assertTrue(latchAdd.await(5, TimeUnit.SECONDS));
assertTrue(latchRemove.await(5, TimeUnit.SECONDS));
}
@Test
public void testAddRemoveList(){
Config config = new Config();
final String name = "defList";
final int insCount = 2;
TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(insCount);
final HazelcastInstance[] instances = factory.newInstances(config);
TransactionContext context = instances[0].newTransactionContext();
assertTrue(instances[1].getList(name).add("value1"));
try {
context.beginTransaction();
TransactionalList l = context.getList(name);
assertEquals(1, l.size());
assertTrue(l.add("value1"));
assertEquals(2, l.size());
assertFalse(l.remove("value2"));
assertEquals(2, l.size());
assertTrue(l.remove("value1"));
assertEquals(1, l.size());
context.commitTransaction();
} catch (Exception e){
fail(e.getMessage());
context.rollbackTransaction();
}
assertEquals(1, instances[1].getList(name).size());
}
@Test
public void testMigration(){
Config config = new Config();
final String name = "defList";
config.addListConfig(new ListConfig().setName(name).setBackupCount(1));
final int insCount = 4;
TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(insCount);
HazelcastInstance instance1 = factory.newHazelcastInstance(config);
IList list = instance1.getList(name);
for (int i=0; i<100; i++){
list.add("item"+i);
}
HazelcastInstance instance2 = factory.newHazelcastInstance(config);
assertEquals(100, instance2.getList(name).size());
HazelcastInstance instance3 = factory.newHazelcastInstance(config);
assertEquals(100, instance3.getList(name).size());
instance1.shutdown();
assertEquals(100, instance3.getList(name).size());
list = instance2.getList(name);
for (int i=0; i<100; i++){
list.add("item-"+i);
}
instance2.shutdown();
assertEquals(200, instance3.getList(name).size());
instance1 = factory.newHazelcastInstance(config);
assertEquals(200, instance1.getList(name).size());
instance3.shutdown();
assertEquals(200, instance1.getList(name).size());
}
@Test
public void testMaxSize(){
Config config = new Config();
final String name = "defList";
config.addListConfig(new ListConfig().setName(name).setBackupCount(1).setMaxSize(100));
final int insCount = 2;
TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(insCount);
HazelcastInstance instance1 = factory.newHazelcastInstance(config);
HazelcastInstance instance2 = factory.newHazelcastInstance(config);
IList list = instance1.getList(name);
for (int i=0; i<100; i++){
assertTrue(list.add("item"+i));
}
assertFalse(list.add("item"));
assertNotNull(list.remove(0));
assertTrue(list.add("item"));
}
private IList getList(HazelcastInstance[] instances, String name){
final Random rnd = new Random();
return instances[rnd.nextInt(instances.length)].getList(name);
}
} | 0true
| hazelcast_src_test_java_com_hazelcast_collection_ListTest.java |
3,390 | abstract class SortedSetDVAtomicFieldData {
private final AtomicReader reader;
private final String field;
private volatile IntArray hashes;
SortedSetDVAtomicFieldData(AtomicReader reader, String field) {
this.reader = reader;
this.field = field;
}
public boolean isMultiValued() {
// we could compute it when loading the values for the first time and then cache it but it would defeat the point of
// doc values which is to make loading faster
return true;
}
public int getNumDocs() {
return reader.maxDoc();
}
public long getNumberUniqueValues() {
final SortedSetDocValues values = getValuesNoException(reader, field);
return values.getValueCount();
}
public long getMemorySizeInBytes() {
// There is no API to access memory usage per-field and RamUsageEstimator can't help since there are often references
// from a per-field instance to all other instances handled by the same format
return -1L;
}
public void close() {
// no-op
}
public org.elasticsearch.index.fielddata.BytesValues.WithOrdinals getBytesValues(boolean needsHashes) {
final SortedSetDocValues values = getValuesNoException(reader, field);
return new SortedSetValues(reader, field, values);
}
public org.elasticsearch.index.fielddata.BytesValues.WithOrdinals getHashedBytesValues() {
final SortedSetDocValues values = getValuesNoException(reader, field);
if (hashes == null) {
synchronized (this) {
if (hashes == null) {
final long valueCount = values.getValueCount();
final IntArray hashes = BigArrays.newIntArray(1L + valueCount);
BytesRef scratch = new BytesRef(16);
hashes.set(0, scratch.hashCode());
for (long i = 0; i < valueCount; ++i) {
values.lookupOrd(i, scratch);
hashes.set(1L + i, scratch.hashCode());
}
this.hashes = hashes;
}
}
}
return new SortedSetHashedValues(reader, field, values, hashes);
}
private static SortedSetDocValues getValuesNoException(AtomicReader reader, String field) {
try {
SortedSetDocValues values = reader.getSortedSetDocValues(field);
if (values == null) {
// This field has not been populated
assert reader.getFieldInfos().fieldInfo(field) == null;
values = SortedSetDocValues.EMPTY;
}
return values;
} catch (IOException e) {
throw new ElasticsearchIllegalStateException("Couldn't load doc values", e);
}
}
static class SortedSetValues extends BytesValues.WithOrdinals {
protected final SortedSetDocValues values;
SortedSetValues(AtomicReader reader, String field, SortedSetDocValues values) {
super(new SortedSetDocs(new SortedSetOrdinals(reader, field, values.getValueCount()), values));
this.values = values;
}
@Override
public BytesRef getValueByOrd(long ord) {
assert ord != Ordinals.MISSING_ORDINAL;
values.lookupOrd(ord - 1, scratch);
return scratch;
}
@Override
public BytesRef nextValue() {
values.lookupOrd(ordinals.nextOrd()-1, scratch);
return scratch;
}
}
static final class SortedSetHashedValues extends SortedSetValues {
private final IntArray hashes;
SortedSetHashedValues(AtomicReader reader, String field, SortedSetDocValues values, IntArray hashes) {
super(reader, field, values);
this.hashes = hashes;
}
@Override
public int currentValueHash() {
assert ordinals.currentOrd() >= 0;
return hashes.get(ordinals.currentOrd());
}
}
static final class SortedSetOrdinals implements Ordinals {
// We don't store SortedSetDocValues as a member because Ordinals must be thread-safe
private final AtomicReader reader;
private final String field;
private final long numOrds;
public SortedSetOrdinals(AtomicReader reader, String field, long numOrds) {
super();
this.reader = reader;
this.field = field;
this.numOrds = numOrds;
}
@Override
public long getMemorySizeInBytes() {
// Ordinals can't be distinguished from the atomic field data instance
return -1;
}
@Override
public boolean isMultiValued() {
return true;
}
@Override
public int getNumDocs() {
return reader.maxDoc();
}
@Override
public long getNumOrds() {
return numOrds;
}
@Override
public long getMaxOrd() {
return 1 + numOrds;
}
@Override
public Docs ordinals() {
final SortedSetDocValues values = getValuesNoException(reader, field);
assert values.getValueCount() == numOrds;
return new SortedSetDocs(this, values);
}
}
static class SortedSetDocs implements Ordinals.Docs {
private final SortedSetOrdinals ordinals;
private final SortedSetDocValues values;
private final LongsRef longScratch;
private int ordIndex = Integer.MAX_VALUE;
private long currentOrdinal = -1;
SortedSetDocs(SortedSetOrdinals ordinals, SortedSetDocValues values) {
this.ordinals = ordinals;
this.values = values;
longScratch = new LongsRef(8);
}
@Override
public Ordinals ordinals() {
return ordinals;
}
@Override
public int getNumDocs() {
return ordinals.getNumDocs();
}
@Override
public long getNumOrds() {
return ordinals.getNumOrds();
}
@Override
public long getMaxOrd() {
return ordinals.getMaxOrd();
}
@Override
public boolean isMultiValued() {
return ordinals.isMultiValued();
}
@Override
public long getOrd(int docId) {
values.setDocument(docId);
return currentOrdinal = 1 + values.nextOrd();
}
@Override
public LongsRef getOrds(int docId) {
values.setDocument(docId);
longScratch.offset = 0;
longScratch.length = 0;
for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) {
longScratch.longs = ArrayUtil.grow(longScratch.longs, longScratch.length + 1);
longScratch.longs[longScratch.length++] = 1 + ord;
}
return longScratch;
}
@Override
public long nextOrd() {
assert ordIndex < longScratch.length;
return currentOrdinal = longScratch.longs[ordIndex++];
}
@Override
public int setDocument(int docId) {
// For now, we consume all ords and pass them to the iter instead of doing it in a streaming way because Lucene's
// SORTED_SET doc values are cached per thread, you can't have a fully independent instance
final LongsRef ords = getOrds(docId);
ordIndex = 0;
return ords.length;
}
@Override
public long currentOrd() {
return currentOrdinal;
}
}
} | 0true
| src_main_java_org_elasticsearch_index_fielddata_plain_SortedSetDVAtomicFieldData.java |
595 | public class PlotConstants {
/*
* Default Plot Properties.
*/
public static final int DEFAULT_NUMBER_OF_SUBPLOTS = 1;
public static final boolean LOCAL_CONTROLS_ENABLED_BY_DEFAULT = true;
public static final YAxisMaximumLocationSetting DEFAULT_Y_AXIS_MAX_LOCATION_SETTING = YAxisMaximumLocationSetting.MAXIMUM_AT_TOP;
public static final NonTimeAxisSubsequentBoundsSetting DEFAULT_NON_TIME_AXIS_MIN_SUBSEQUENT_SETTING = NonTimeAxisSubsequentBoundsSetting.SEMI_FIXED;
public static final NonTimeAxisSubsequentBoundsSetting DEFAULT_NON_TIME_AXIS_MAX_SUBSEQUENT_SETTING = NonTimeAxisSubsequentBoundsSetting.SEMI_FIXED;
public static final int MILLISECONDS_IN_SECOND = 1000;
public static final int MILLISECONDS_IN_MIN = MILLISECONDS_IN_SECOND * 60;
public static final int MILLISECONDS_IN_HOUR = MILLISECONDS_IN_MIN * 60;
public static final int MILLISECONDS_IN_DAY = MILLISECONDS_IN_HOUR * 24;
public static final int DEFAUlT_PLOT_SPAN = 30 * 60 * 1000; // 30 mins in Milliseconds
public static final Color ROLL_OVER_PLOT_LINE_COLOR = Color.white;
public static final int DEFAULT_TIME_AXIS_FONT_SIZE = 10;
public static final Font DEFAULT_TIME_AXIS_FONT = new Font("Arial", Font.PLAIN, DEFAULT_TIME_AXIS_FONT_SIZE);
public static final int DEFAULT_PLOTLINE_THICKNESS = 1;
public static final int SELECTED_LINE_THICKNESS = 2;
public static final Color DEFAULT_PLOT_FRAME_BACKGROUND_COLOR = new Color(51, 51, 51);
public static final Color DEFAULT_PLOT_AREA_BACKGROUND_COLOR = Color.black;
public static final int DEFAULT_TIME_AXIS_INTERCEPT = 0;
public static final Color DEFAULT_TIME_AXIS_COLOR = Color.white;
public static final Color DEFAULT_TIME_AXIS_LABEL_COLOR = Color.white;
public static final Color DEFAULT_NON_TIME_AXIS_COLOR= Color.white;
public static final Color DEFAULT_GRID_LINE_COLOR = Color.LIGHT_GRAY;
public static final int DEFAULT_MIN_SAMPLES_FOR_AUTO_SCALE = 0;
public static final double DEFAULT_TIME_AXIS_PADDING = 0.25;
public static final double DEFAULT_TIME_AXIS_PADDING_JUMP_MIN = 0.05;
public static final double DEFAULT_TIME_AXIS_PADDING_JUMP_MAX = 0.25;
public static final double DEFAULT_TIME_AXIS_PADDING_SCRUNCH_MIN = 0.20;
public static final double DEFAULT_TIME_AXIS_PADDING_SCRUNCH_MAX = 0.25;
public static final double DEFAULT_NON_TIME_AXIS_PADDING_MAX = 0.05;
public static final double DEFAULT_NON_TIME_AXIS_PADDING_MIN = 0.05;
public static final double DEFAULT_NON_TIME_AXIS_MIN_VALUE = 0;
public static final double DEFAULT_NON_TIME_AXIS_MAX_VALUE = 1;
public static final long DEFAULT_TIME_AXIS_MIN_VALUE = new GregorianCalendar().getTimeInMillis();
public static final long DEFAULT_TIME_AXIS_MAX_VALUE= DEFAULT_TIME_AXIS_MIN_VALUE + DEFAUlT_PLOT_SPAN;
public static final int MAX_NUMBER_OF_DATA_ITEMS_ON_A_PLOT = 30;
public static final int MAX_NUMBER_SUBPLOTS = 10;
public static final PlotLineDrawingFlags DEFAULT_PLOT_LINE_DRAW = new PlotLineDrawingFlags(true, false);
public static final int MAJOR_TICK_MARK_LENGTH = 3;
public static final int MINOR_TICK_MARK_LENGTH = 1;
public static final String GMT = "GMT";
public static final String DEFAULT_TIME_ZONE = GMT;
public static final String DEFAULT_TIME_AXIS_DATA_FORMAT = "DDD/HH:mm:ss"; // add a z to see the time zone.
// Field names for persistence
public static final String TIME_AXIS_SETTING = "PlotTimeAxisSetting";
public static final String X_AXIS_MAXIMUM_LOCATION_SETTING = "PlotXAxisMaximumLocation";
public static final String Y_AXIS_MAXIMUM_LOCATION_SETTING = "PlotYAxisMaximumLocation";
public static final String TIME_AXIS_SUBSEQUENT_SETTING = "PlotTimeAxisSubsequentSetting";
public static final String NON_TIME_AXIS_SUBSEQUENT_MIN_SETTING = "PlotNonTimeAxisSubsequentMinSetting";
public static final String NON_TIME_AXIS_SUBSEQUENT_MAX_SETTING = "PlotNonTimeAxisSubsequentMaxSetting";
public static final String NON_TIME_MAX = "NonTimeMax";
public static final String NON_TIME_MIN = "NonTimeMin";
public static final String TIME_MAX = "TimeMax";
public static final String TIME_MIN = "TimeMin";
public static final String TIME_PADDING = "TimePadding";
public static final String NON_TIME_MIN_PADDING = "NonTimeMinPadding";
public static final String NON_TIME_MAX_PADDING = "NonTimeMaxPadding";
public static final String GROUP_BY_ORDINAL_POSITION = "GroupByOrdinalPosition";
public static final String PIN_TIME_AXIS = "PinTimeAxis";
public static final String DRAW_LINES = "PlotLineDrawLines";
public static final String DRAW_MARKERS = "PlotLineDrawMarkers";
public static final String DRAW_CHARACTERS = "PlotLineDrawCharacters";
public static final String CONNECTION_TYPE = "PlotLineConnectionType";
public static final String COLOR_ASSIGNMENTS = "PlotColorAssignments";
public static final String LINE_SETTINGS = "PlotLineSettings";
// Delay before firing a request for data at a higher resolution on a window.
public final static int RESIZE_TIMER = 200; // in milliseconds.
// Limit button border settings
public static final int ARROW_BUTTON_BORDER_STYLE_TOP = 1;
public static final int ARROW_BUTTON_BORDER_STYLE_LEFT = 0;
public static final int ARROW_BUTTON_BORDER_STYLE_BOTTOM = 0;
public static final int ARROW_BUTTON_BORDER_STYLE_RIGHT = 0;
// The size below which the plot will not go before it starts to truncate the legends.
public static final int MINIMUM_PLOT_WIDTH = 200; //200;
public static final int MINIMUM_PLOT_HEIGHT = 100;
public static final int Y_AXIS_WHEN_NON_TIME_LABEL_WIDTH = 28;
// Legends
public final static Color LEGEND_BACKGROUND_COLOR = DEFAULT_PLOT_FRAME_BACKGROUND_COLOR;
public static final int PLOT_LEGEND_BUFFER = 5;
public static final int PLOT_LEGEND_WIDTH = 120;
public static final int PLOT_MINIMUM_LEGEND_WIDTH = 40;
public static final int PLOT_LEGEND_OFFSET_FROM_LEFT_HAND_SIDE = 0;
public static final String LEGEND_NEWLINE_CHARACTER = "\n";
public static final String LEGEND_ELLIPSES = "...";
public static final int MAXIMUM_LEGEND_TEXT_SIZE = 20; //maximum width of a legend
public static final DecimalFormat DECIMAL_FORMAT = new DecimalFormat("#0.000");
// Sync line
public static final Color TIME_SYNC_LINE_COLOR = Color.orange;
public static final int TIME_SYNC_LINE_WIDTH = 2;
public static final int SYNC_LINE_STYLE = 9; // ChartConstants.LS_DASH_DOT;
public static final int SHIFT_KEY_MASK = InputEvent.SHIFT_MASK;
public static final int ALT_KEY_MASK = InputEvent.ALT_MASK;
public static final int CTL_KEY_MASK = InputEvent.CTRL_MASK;
// Data Cursor
public static final Color DATA_CURSOR_COLOR = new Color(235, 235, 235);//new Color(51, 102, 153);
public static final int SLOPE_LINE_STYLE = 0; // ChartConstants.LS_SOLID;
public static final int SLOPE_LINE_WIDTH = 1;
public static final String SLOPE_UNIT = "/min";
public static final String REGRESSION_LINE = "RegressionLine";
public static final int NUMBER_REGRESSION_POINTS = 20;
public static final int SLOPE_UNIT_DIVIDER_IN_MS = PlotConstants.MILLISECONDS_IN_MIN; // per second.
public final static float dash1[] = {10.0f};
// Data Compression
// Sets the default value for data compression which can be overridden by the client.
public static final boolean COMPRESSION_ENABLED_BY_DEFAULT = true;
public static final int MAXIMUM_PLOT_DATA_BUFFER_SLIZE_REQUEST_SIZE = 12 * MILLISECONDS_IN_HOUR ;
// Panning and zooming controls
public static final double PANNING_NON_TIME_AXIS_PERCENTAGE = 25;
public static final double PANNING_TIME_AXIS_PERCENTAGE = 25;
public static final double ZOOMING_NON_TIME_AXIS_PERCENTAGE = 10;
public static final double ZOOMING_TIME_AXIS_PERCENTAGE = 10;
public static final int zoomingTimeAxisIncrementInMiliseconds = 30 * MILLISECONDS_IN_SECOND;
public static final int zoomingNonTimeAxisIncrement = 10;
public static final int LOCAL_CONTROL_HEIGHT = 25;
public static final int LOCAL_CONTROL_WIDTH = 28;
/**
* Orientation of the time axis.
*/
public enum AxisOrientationSetting {
X_AXIS_AS_TIME, Y_AXIS_AS_TIME
}
public enum AxisBounds {
MAX, MIN
}
public enum XAxisMaximumLocationSetting {
MAXIMUM_AT_RIGHT, MAXIMUM_AT_LEFT
}
public enum YAxisMaximumLocationSetting {
MAXIMUM_AT_TOP, MAXIMUM_AT_BOTTOM
}
/**
* Subsequent modes on the time axis.
*/
public enum TimeAxisSubsequentBoundsSetting {
JUMP, SCRUNCH
}
/**
* Subsequent modes on the non-time axis
*/
public enum NonTimeAxisSubsequentBoundsSetting {
AUTO, FIXED, SEMI_FIXED
}
/**
* State that limit alarms can be in.
*/
public enum LimitAlarmState{
NO_ALARM, ALARM_RAISED, ALARM_OPENED_BY_USER, ALARM_CLOSED_BY_USER
}
/**
* Panning actions
*/
public enum PanDirection {
PAN_LOWER_X_AXIS, PAN_HIGHER_X_AXIS, PAN_LOWER_Y_AXIS, PAN_HIGHER_Y_AXIS;
}
/**
* Zoom actions
*/
public enum ZoomDirection {
ZOOM_IN_HIGH_Y_AXIS, ZOOM_OUT_HIGH_Y_AXIS,
ZOOM_IN_CENTER_Y_AXIS, ZOOM_OUT_CENTER_Y_AXIS,
ZOOM_IN_LOW_Y_AXIS, ZOOM_OUT_LOW_Y_AXIS,
ZOOM_IN_LEFT_X_AXIS, ZOOM_OUT_LEFT_X_AXIS,
ZOOM_IN_CENTER_X_AXIS, ZOOM_OUT_CENTER_X_AXIS,
ZOOM_IN_RIGHT_X_AXIS, ZOOM_OUT_RIGHT_X_AXIS;
}
public enum AxisType {
TIME_IN_JUMP_MODE (DEFAULT_TIME_AXIS_PADDING_JUMP_MIN,
DEFAULT_TIME_AXIS_PADDING_JUMP_MAX),
TIME_IN_SCRUNCH_MODE (DEFAULT_TIME_AXIS_PADDING_SCRUNCH_MIN,
DEFAULT_TIME_AXIS_PADDING_SCRUNCH_MAX),
NON_TIME (DEFAULT_NON_TIME_AXIS_PADDING_MIN,
DEFAULT_NON_TIME_AXIS_PADDING_MAX);
private final double minimumDefaultPadding;
private final double maximumDefaultPadding;
AxisType(double minPadding, double maxPadding) {
this.minimumDefaultPadding = minPadding;
this.maximumDefaultPadding = maxPadding;
}
public double getMinimumDefaultPadding() {
return minimumDefaultPadding;
}
public String getMinimumDefaultPaddingAsText() {
String percentString = NumberFormat.getPercentInstance().format(this.minimumDefaultPadding);
return percentString.substring(0, percentString.length()-1);
}
public double getMaximumDefaultPadding() {
return maximumDefaultPadding;
}
public String getMaximumDefaultPaddingAsText() {
String percentString = NumberFormat.getPercentInstance().format(this.maximumDefaultPadding);
return percentString.substring(0, percentString.length()-1);
}
}
/**
* DISPLAY_ONLY optimizes the plot buffering for displaying multiple plots with the minimum buffer wait.
* Switching to USER_INTERACTION mode deepens and widens the plot buffer to support user interactions such
* as panning and zooming.
*/
public enum PlotDisplayState {
DISPLAY_ONLY, USER_INTERACTION;
}
/**
* Indicates whether we will be drawing plot lines, point markers, or both.
*/
public static class PlotLineDrawingFlags {
private boolean line, markers;
public PlotLineDrawingFlags(boolean line, boolean markers) {
this.line = line;
this.markers = markers;
}
public boolean drawLine() {
return line;
}
public boolean drawMarkers() {
return markers;
}
}
/**
* Indicates how to connect plot point with lines.
*/
public enum PlotLineConnectionType {
DIRECT, STEP_X_THEN_Y
}
/**
* Params for Labeling Algorithm
*/
/**
* The regular expression defining the delimiter pattern between words.
* Words are delimited by a sequence of one or more spaces or underscores.
*/
public static final String WORD_DELIMITERS = "[ _]+";
/**
* The compiled regular expression defining the delimiter pattern between
* words.
*/
public static final Pattern WORD_DELIMITER_PATTERN = Pattern.compile(WORD_DELIMITERS);
/**
* The separator to use when concatenating words together to form labels.
*/
public static final String WORD_SEPARATOR = " ";
/**
* The maximum thickness for a plot line's stroke
*/
public static final int MAX_LINE_THICKNESS = 5;
} | 1no label
| fastPlotViews_src_main_java_gov_nasa_arc_mct_fastplot_bridge_PlotConstants.java |
403 | public class TransportCreateSnapshotAction extends TransportMasterNodeOperationAction<CreateSnapshotRequest, CreateSnapshotResponse> {
private final SnapshotsService snapshotsService;
@Inject
public TransportCreateSnapshotAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, SnapshotsService snapshotsService) {
super(settings, transportService, clusterService, threadPool);
this.snapshotsService = snapshotsService;
}
@Override
protected String executor() {
return ThreadPool.Names.SNAPSHOT;
}
@Override
protected String transportAction() {
return CreateSnapshotAction.NAME;
}
@Override
protected CreateSnapshotRequest newRequest() {
return new CreateSnapshotRequest();
}
@Override
protected CreateSnapshotResponse newResponse() {
return new CreateSnapshotResponse();
}
@Override
protected ClusterBlockException checkBlock(CreateSnapshotRequest request, ClusterState state) {
return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, "");
}
@Override
protected void masterOperation(final CreateSnapshotRequest request, ClusterState state, final ActionListener<CreateSnapshotResponse> listener) throws ElasticsearchException {
SnapshotsService.SnapshotRequest snapshotRequest =
new SnapshotsService.SnapshotRequest("create_snapshot[" + request.snapshot() + "]", request.snapshot(), request.repository())
.indices(request.indices())
.indicesOptions(request.indicesOptions())
.partial(request.partial())
.settings(request.settings())
.includeGlobalState(request.includeGlobalState())
.masterNodeTimeout(request.masterNodeTimeout());
snapshotsService.createSnapshot(snapshotRequest, new SnapshotsService.CreateSnapshotListener() {
@Override
public void onResponse() {
if (request.waitForCompletion()) {
snapshotsService.addListener(new SnapshotsService.SnapshotCompletionListener() {
SnapshotId snapshotId = new SnapshotId(request.repository(), request.snapshot());
@Override
public void onSnapshotCompletion(SnapshotId snapshotId, SnapshotInfo snapshot) {
if (this.snapshotId.equals(snapshotId)) {
listener.onResponse(new CreateSnapshotResponse(snapshot));
snapshotsService.removeListener(this);
}
}
@Override
public void onSnapshotFailure(SnapshotId snapshotId, Throwable t) {
if (this.snapshotId.equals(snapshotId)) {
listener.onFailure(t);
snapshotsService.removeListener(this);
}
}
});
} else {
listener.onResponse(new CreateSnapshotResponse());
}
}
@Override
public void onFailure(Throwable t) {
listener.onFailure(t);
}
});
}
} | 1no label
| src_main_java_org_elasticsearch_action_admin_cluster_snapshots_create_TransportCreateSnapshotAction.java |
355 | future.andThen(new ExecutionCallback<Map<String, List<Integer>>>() {
@Override
public void onResponse(Map<String, List<Integer>> response) {
listenerResults.putAll(response);
semaphore.release();
}
@Override
public void onFailure(Throwable t) {
semaphore.release();
}
}); | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_mapreduce_ClientMapReduceTest.java |
3,283 | private static class OrdinalsStore {
private static final int PAGE_SIZE = 1 << 12;
/**
* Number of slots at <code>level</code>
*/
private static int numSlots(int level) {
return 1 << level;
}
private static int slotsMask(int level) {
return numSlots(level) - 1;
}
/**
* Encode the position for the given level and offset. The idea is to encode the level using unary coding in the lower bits and
* then the offset in the higher bits.
*/
private static long position(int level, long offset) {
assert level >= 1;
return (1 << (level - 1)) | (offset << level);
}
/**
* Decode the level from an encoded position.
*/
private static int level(long position) {
return 1 + Long.numberOfTrailingZeros(position);
}
/**
* Decode the offset from the position.
*/
private static long offset(long position, int level) {
return position >>> level;
}
/**
* Get the ID of the slice given an offset.
*/
private static long sliceID(int level, long offset) {
return offset >>> level;
}
/**
* Compute the first offset of the given slice.
*/
private static long startOffset(int level, long slice) {
return slice << level;
}
/**
* Compute the number of ordinals stored for a value given its current position.
*/
private static int numOrdinals(int level, long offset) {
return (1 << level) + (int) (offset & slotsMask(level));
}
// Current position
private PagedGrowableWriter positions;
// First level (0) of ordinals and pointers to the next level
private final GrowableWriter firstOrdinals;
private PagedGrowableWriter firstNextLevelSlices;
// Ordinals and pointers for other levels, starting at 1
private final PagedGrowableWriter[] ordinals;
private final PagedGrowableWriter[] nextLevelSlices;
private final int[] sizes;
private final int startBitsPerValue;
private final float acceptableOverheadRatio;
OrdinalsStore(int maxDoc, int startBitsPerValue, float acceptableOverheadRatio) {
this.startBitsPerValue = startBitsPerValue;
this.acceptableOverheadRatio = acceptableOverheadRatio;
positions = new PagedGrowableWriter(maxDoc, PAGE_SIZE, startBitsPerValue, acceptableOverheadRatio);
firstOrdinals = new GrowableWriter(startBitsPerValue, maxDoc, acceptableOverheadRatio);
// over allocate in order to never worry about the array sizes, 24 entries would allow to store several millions of ordinals per doc...
ordinals = new PagedGrowableWriter[24];
nextLevelSlices = new PagedGrowableWriter[24];
sizes = new int[24];
Arrays.fill(sizes, 1); // reserve the 1st slice on every level
}
/**
* Allocate a new slice and return its ID.
*/
private long newSlice(int level) {
final long newSlice = sizes[level]++;
// Lazily allocate ordinals
if (ordinals[level] == null) {
ordinals[level] = new PagedGrowableWriter(8L * numSlots(level), PAGE_SIZE, startBitsPerValue, acceptableOverheadRatio);
} else {
ordinals[level] = ordinals[level].grow(sizes[level] * numSlots(level));
if (nextLevelSlices[level] != null) {
nextLevelSlices[level] = nextLevelSlices[level].grow(sizes[level]);
}
}
return newSlice;
}
public int addOrdinal(int docID, long ordinal) {
final long position = positions.get(docID);
if (position == 0L) { // on the first level
// 0 or 1 ordinal
if (firstOrdinals.get(docID) == 0L) {
firstOrdinals.set(docID, ordinal);
return 1;
} else {
final long newSlice = newSlice(1);
if (firstNextLevelSlices == null) {
firstNextLevelSlices = new PagedGrowableWriter(firstOrdinals.size(), PAGE_SIZE, 3, acceptableOverheadRatio);
}
firstNextLevelSlices.set(docID, newSlice);
final long offset = startOffset(1, newSlice);
ordinals[1].set(offset, ordinal);
positions.set(docID, position(1, offset)); // current position is on the 1st level and not allocated yet
return 2;
}
} else {
int level = level(position);
long offset = offset(position, level);
assert offset != 0L;
if (((offset + 1) & slotsMask(level)) == 0L) {
// reached the end of the slice, allocate a new one on the next level
final long newSlice = newSlice(level + 1);
if (nextLevelSlices[level] == null) {
nextLevelSlices[level] = new PagedGrowableWriter(sizes[level], PAGE_SIZE, 1, acceptableOverheadRatio);
}
nextLevelSlices[level].set(sliceID(level, offset), newSlice);
++level;
offset = startOffset(level, newSlice);
assert (offset & slotsMask(level)) == 0L;
} else {
// just go to the next slot
++offset;
}
ordinals[level].set(offset, ordinal);
final long newPosition = position(level, offset);
positions.set(docID, newPosition);
return numOrdinals(level, offset);
}
}
public void appendOrdinals(int docID, LongsRef ords) {
// First level
final long firstOrd = firstOrdinals.get(docID);
if (firstOrd == 0L) {
return;
}
ords.longs = ArrayUtil.grow(ords.longs, ords.offset + ords.length + 1);
ords.longs[ords.offset + ords.length++] = firstOrd;
if (firstNextLevelSlices == null) {
return;
}
long sliceID = firstNextLevelSlices.get(docID);
if (sliceID == 0L) {
return;
}
// Other levels
for (int level = 1; ; ++level) {
final int numSlots = numSlots(level);
ords.longs = ArrayUtil.grow(ords.longs, ords.offset + ords.length + numSlots);
final long offset = startOffset(level, sliceID);
for (int j = 0; j < numSlots; ++j) {
final long ord = ordinals[level].get(offset + j);
if (ord == 0L) {
return;
}
ords.longs[ords.offset + ords.length++] = ord;
}
if (nextLevelSlices[level] == null) {
return;
}
sliceID = nextLevelSlices[level].get(sliceID);
if (sliceID == 0L) {
return;
}
}
}
} | 0true
| src_main_java_org_elasticsearch_index_fielddata_ordinals_OrdinalsBuilder.java |
3,596 | private ThreadLocal<NumericTokenStream> tokenStream = new ThreadLocal<NumericTokenStream>() {
@Override
protected NumericTokenStream initialValue() {
return new NumericTokenStream(precisionStep);
}
}; | 0true
| src_main_java_org_elasticsearch_index_mapper_core_NumberFieldMapper.java |
710 | public interface ProductOption extends Serializable {
/**
* Returns unique identifier of the product option.
* @return
*/
public Long getId();
/**
* Sets the unique identifier of the product option.
* @param id
*/
public void setId(Long id);
/**
* Returns the option type. For example, "color", "size", etc.
* These are used primarily to determine how the UI should prompt for and
* validate the product option.
*
* @return
*/
public ProductOptionType getType();
/**
* Sets the option type. This is primarily used for
* display to render the option selection.
*
* @param type
*/
public void setType(ProductOptionType type);
/**
* Gets the attribute name for where the ProductOptionValue selected for
* this ProductOption is stored in the OrderItemAttributes for the
* OrderItem
*
* @return the name of the OrderItemAttribute to store the selected
* ProductOptionValue in the Order domain
* @see {@link OrderItemAttribute}, {@link OrderItem}
*/
public String getAttributeName();
/**
* Sets the attribute name that will be used in storing the selected
* ProductOptionValue for this ProductOption
*
* @param name - the name of the OrderItemAttribute to store the selected
* ProductOptionValue in the Order domain
*/
public void setAttributeName(String name);
/**
* The label to show to the user when selecting from the available
* {@link ProductOptionValue}s. This might be "Color" or "Size"
*
* @return
*/
public String getLabel();
/**
* Sets the label to show the user when selecting from the available
* {@link ProductOptionValue}s
*
* @param label
*/
public void setLabel(String label);
/**
*
* @return whether or not this ProductOption is required
*/
public Boolean getRequired();
/**
* Sets whether or not
* @param required
*/
public void setRequired(Boolean required);
/**
* Gets the display order of this option in relation to the other {@link ProductOption}s
*
* @return
*/
public Integer getDisplayOrder();
/**
* Gets the display order of this option in relation to the other {@link ProductOption}s
*
* @param displayOrder
*/
public void setDisplayOrder(Integer displayOrder);
/**
* Gets all the Products associated with this ProductOption
*
* @return the Products associated with this ProductOption
*/
public List<Product> getProducts();
/**
* Set the Products to associate with this ProductOption
*
* @param products
*/
public void setProducts(List<Product> products);
/**
* Gets the available values that a user can select for this ProductOption.
* This value will be stored in OrderItemAttributes at the OrderItem level. The
* OrderItemAttribute name will be whatever was returned from {@link #getAttributeName()}
*
* @return the allowed values for this ProductOption
*/
public List<ProductOptionValue> getAllowedValues();
/**
* Set the allowed values for this ProductOption
*
* @param allowedValues
*/
public void setAllowedValues(List<ProductOptionValue> allowedValues);
public Boolean getUseInSkuGeneration();
public ProductOptionValidationType getProductOptionValidationType();
public void setProductOptionValidationType(ProductOptionValidationType productOptionValidationType);
public void setUseInSkuGeneration(Boolean useInSkuGeneration);
void setErrorMessage(String errorMessage);
void setErrorCode(String errorCode);
String getErrorMessage();
String getValidationString();
void setValidationString(String validationString);
String getErrorCode();
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_catalog_domain_ProductOption.java |
1,692 | @Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_ADMIN_PASSWORD_TOKEN")
public class ForgotPasswordSecurityTokenImpl implements ForgotPasswordSecurityToken {
private static final long serialVersionUID = 1L;
@Id
@Column(name = "PASSWORD_TOKEN", nullable = false)
protected String token;
@Column(name = "CREATE_DATE", nullable = false)
@Temporal(TemporalType.TIMESTAMP)
protected Date createDate;
@Column(name = "TOKEN_USED_DATE")
@Temporal(TemporalType.TIMESTAMP)
protected Date tokenUsedDate;
@Column(name = "ADMIN_USER_ID", nullable = false)
protected Long adminUserId;
@Column(name = "TOKEN_USED_FLAG", nullable = false)
protected boolean tokenUsedFlag;
public String getToken() {
return token;
}
public void setToken(String token) {
this.token = token;
}
public Date getCreateDate() {
return createDate;
}
public void setCreateDate(Date createDate) {
this.createDate = createDate;
}
public Date getTokenUsedDate() {
return tokenUsedDate;
}
public void setTokenUsedDate(Date tokenUsedDate) {
this.tokenUsedDate = tokenUsedDate;
}
public Long getAdminUserId() {
return adminUserId;
}
public void setAdminUserId(Long adminUserId) {
this.adminUserId = adminUserId;
}
public boolean isTokenUsedFlag() {
return tokenUsedFlag;
}
public void setTokenUsedFlag(boolean tokenUsedFlag) {
this.tokenUsedFlag = tokenUsedFlag;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ForgotPasswordSecurityTokenImpl that = (ForgotPasswordSecurityTokenImpl) o;
if (token != null ? !token.equals(that.token) : that.token != null) return false;
return true;
}
@Override
public int hashCode() {
return token != null ? token.hashCode() : 0;
}
} | 1no label
| admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_server_security_domain_ForgotPasswordSecurityTokenImpl.java |
363 | public class HBaseIDAuthorityTest extends IDAuthorityTest {
public HBaseIDAuthorityTest(WriteConfiguration baseConfig) {
super(baseConfig);
}
@BeforeClass
public static void startHBase() throws IOException {
HBaseStorageSetup.startHBase();
}
@AfterClass
public static void stopHBase() {
// Workaround for https://issues.apache.org/jira/browse/HBASE-10312
if (VersionInfo.getVersion().startsWith("0.96"))
HBaseStorageSetup.killIfRunning();
}
public KeyColumnValueStoreManager openStorageManager() throws BackendException {
return new HBaseStoreManager(HBaseStorageSetup.getHBaseConfiguration());
}
} | 0true
| titan-hbase-parent_titan-hbase-core_src_test_java_com_thinkaurelius_titan_diskstorage_hbase_HBaseIDAuthorityTest.java |
3,508 | class InternalFieldMapperListener extends FieldMapperListener {
@Override
public void fieldMapper(FieldMapper fieldMapper) {
addFieldMappers(Arrays.asList(fieldMapper));
}
@Override
public void fieldMappers(Iterable<FieldMapper> fieldMappers) {
addFieldMappers(fieldMappers);
}
} | 0true
| src_main_java_org_elasticsearch_index_mapper_MapperService.java |
55 | new Visitor() {
@Override
public void visit(Tree.StaticMemberOrTypeExpression that) {
Tree.TypeArguments tal = that.getTypeArguments();
Integer startIndex = tal==null ?
null : that.getTypeArguments().getStartIndex();
if (startIndex!=null && startIndex2!=null &&
startIndex.intValue()==startIndex2.intValue()) {
ProducedReference pr = that.getTarget();
Declaration d = that.getDeclaration();
if (d instanceof Functional && pr!=null) {
try {
String pref = document.get(that.getStartIndex(),
that.getStopIndex()-that.getStartIndex()+1);
addInvocationProposals(offset, pref, cpc, result, d,
pr, scope, null, typeArgText, false);
}
catch (BadLocationException e) {
e.printStackTrace();
}
}
}
super.visit(that);
}
public void visit(Tree.SimpleType that) {
Tree.TypeArgumentList tal = that.getTypeArgumentList();
Integer startIndex = tal==null ? null : tal.getStartIndex();
if (startIndex!=null && startIndex2!=null &&
startIndex.intValue()==startIndex2.intValue()) {
Declaration d = that.getDeclarationModel();
if (d instanceof Functional) {
try {
String pref = document.get(that.getStartIndex(),
that.getStopIndex()-that.getStartIndex()+1);
addInvocationProposals(offset, pref, cpc, result, d,
that.getTypeModel(), scope, null, typeArgText,
false);
}
catch (BadLocationException e) {
e.printStackTrace();
}
}
}
super.visit(that);
}
}.visit(cpc.getRootNode()); | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_TypeArgumentListCompletions.java |
653 | public class TransportGetIndexTemplatesAction extends TransportMasterNodeReadOperationAction<GetIndexTemplatesRequest, GetIndexTemplatesResponse> {
@Inject
public TransportGetIndexTemplatesAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
super(settings, transportService, clusterService, threadPool);
}
@Override
protected String transportAction() {
return GetIndexTemplatesAction.NAME;
}
@Override
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected GetIndexTemplatesRequest newRequest() {
return new GetIndexTemplatesRequest();
}
@Override
protected GetIndexTemplatesResponse newResponse() {
return new GetIndexTemplatesResponse();
}
@Override
protected void masterOperation(GetIndexTemplatesRequest request, ClusterState state, ActionListener<GetIndexTemplatesResponse> listener) throws ElasticsearchException {
List<IndexTemplateMetaData> results;
// If we did not ask for a specific name, then we return all templates
if (request.names().length == 0) {
results = Lists.newArrayList(state.metaData().templates().values().toArray(IndexTemplateMetaData.class));
} else {
results = Lists.newArrayList();
}
for (String name : request.names()) {
if (Regex.isSimpleMatchPattern(name)) {
for (ObjectObjectCursor<String, IndexTemplateMetaData> entry : state.metaData().templates()) {
if (Regex.simpleMatch(name, entry.key)) {
results.add(entry.value);
}
}
} else if (state.metaData().templates().containsKey(name)) {
results.add(state.metaData().templates().get(name));
}
}
listener.onResponse(new GetIndexTemplatesResponse(results));
}
} | 1no label
| src_main_java_org_elasticsearch_action_admin_indices_template_get_TransportGetIndexTemplatesAction.java |
1,305 | public static final class RemoteDBRunner {
public static void main(String[] args) throws Exception {
OGlobalConfiguration.CACHE_LEVEL1_ENABLED.setValue(false);
OGlobalConfiguration.CACHE_LEVEL1_SIZE.setValue(0);
OGlobalConfiguration.CACHE_LEVEL2_ENABLED.setValue(false);
OGlobalConfiguration.CACHE_LEVEL2_SIZE.setValue(0);
OServer server = OServerMain.create();
server.startup(RemoteDBRunner.class
.getResourceAsStream("/com/orientechnologies/orient/core/storage/impl/local/paginated/db-update-config.xml"));
server.activate();
while (true)
;
}
} | 0true
| server_src_test_java_com_orientechnologies_orient_core_storage_impl_local_paginated_LocalPaginatedStorageUpdateCrashRestore.java |
696 | class LRUEntry {
OCacheEntry cacheEntry;
long hashCode;
LRUEntry next;
LRUEntry after;
LRUEntry before;
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
LRUEntry lruEntry = (LRUEntry) o;
if (!cacheEntry.equals(lruEntry.cacheEntry))
return false;
return true;
}
@Override
public int hashCode() {
return cacheEntry.hashCode();
}
@Override
public String toString() {
return "LRUEntry{" + "cacheEntry=" + cacheEntry + ", hashCode=" + hashCode + '}';
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_index_hashindex_local_cache_LRUEntry.java |
1,170 | public static enum ORDER {
/**
* Used when order compared to other operator can not be evaluated or has no consequences.
*/
UNKNOWNED,
/**
* Used when this operator must be before the other one
*/
BEFORE,
/**
* Used when this operator must be after the other one
*/
AFTER,
/**
* Used when this operator is equal the other one
*/
EQUAL
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_sql_operator_OQueryOperator.java |
157 | public abstract class AbstractStructuredContentRuleProcessor implements StructuredContentRuleProcessor {
private static final Log LOG = LogFactory.getLog(AbstractStructuredContentRuleProcessor.class);
private Map expressionCache = Collections.synchronizedMap(new LRUMap(1000));
private ParserContext parserContext;
private Map<String, String> contextClassNames = new HashMap<String, String> ();
/**
* Having a parser context that imports the classes speeds MVEL by up to 60%.
* @return
*/
protected ParserContext getParserContext() {
if (parserContext == null) {
parserContext = new ParserContext();
parserContext.addImport("MVEL", MVEL.class);
parserContext.addImport("MvelHelper", MvelHelper.class);
/* Getting errors when the following is in place.
for (String key : contextClassNames.keySet()) {
String className = contextClassNames.get(key);
try {
Class c = Class.forName(className);
parserContext.addImport(key, c);
} catch (ClassNotFoundException e) {
LOG.error("Error resolving classname while setting up MVEL context, rule processing based on the key " + key + " will not be optimized", e);
}
} */
}
return parserContext;
}
/**
* Helpful method for processing a boolean MVEL expression and associated arguments.
*
* Caches the expression in an LRUCache.
* @param expression
* @param vars
* @return the result of the expression
*/
protected Boolean executeExpression(String expression, Map<String, Object> vars) {
Serializable exp = (Serializable) expressionCache.get(expression);
vars.put("MVEL", MVEL.class);
if (exp == null) {
try {
exp = MVEL.compileExpression(expression, getParserContext());
} catch (CompileException ce) {
LOG.warn("Compile exception processing phrase: " + expression,ce);
return Boolean.FALSE;
}
expressionCache.put(expression, exp);
}
try {
return (Boolean) MVEL.executeExpression(exp, vars);
} catch (Exception e) {
LOG.error(e);
}
return false;
}
/**
* List of class names to add to the MVEL ParserContext.
*
* @return
* @see {@link ParserContext}
*/
public Map<String, String> getContextClassNames() {
return contextClassNames;
}
/**
* List of class names to add to the MVEL ParserContext.
*
* @return
* @see {@link ParserContext}
*/
public void setContextClassNames(Map<String, String> contextClassNames) {
this.contextClassNames = contextClassNames;
}
} | 0true
| admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_structure_service_AbstractStructuredContentRuleProcessor.java |
2,248 | return new TokenStream() {
boolean finished = true;
final CharTermAttribute term = addAttribute(CharTermAttribute.class);
final PayloadAttribute payload = addAttribute(PayloadAttribute.class);
@Override
public boolean incrementToken() throws IOException {
if (finished) {
return false;
}
term.setEmpty().append(uid);
payload.setPayload(new BytesRef(Numbers.longToBytes(version)));
finished = true;
return true;
}
@Override
public void reset() throws IOException {
finished = false;
}
}; | 0true
| src_test_java_org_elasticsearch_common_lucene_uid_VersionsTests.java |
1,773 | map.addEntryListener(new EntryAdapter<Object, Object>() {
@Override
public void entryEvicted(EntryEvent<Object, Object> event) {
count.incrementAndGet();
}
}, true); | 0true
| hazelcast_src_test_java_com_hazelcast_map_EvictionTest.java |
11 | static final class AsyncApply<T,U> extends Async {
final T arg;
final Fun<? super T,? extends U> fn;
final CompletableFuture<U> dst;
AsyncApply(T arg, Fun<? super T,? extends U> fn,
CompletableFuture<U> dst) {
this.arg = arg; this.fn = fn; this.dst = dst;
}
public final boolean exec() {
CompletableFuture<U> d; U u; Throwable ex;
if ((d = this.dst) != null && d.result == null) {
try {
u = fn.apply(arg);
ex = null;
} catch (Throwable rex) {
ex = rex;
u = null;
}
d.internalComplete(u, ex);
}
return true;
}
private static final long serialVersionUID = 5232453952276885070L;
} | 0true
| src_main_java_jsr166e_CompletableFuture.java |
4,876 | public class RestAllocationAction extends AbstractCatAction {
@Inject
public RestAllocationAction(Settings settings, Client client, RestController controller) {
super(settings, client);
controller.registerHandler(GET, "/_cat/allocation", this);
controller.registerHandler(GET, "/_cat/allocation/{nodes}", this);
}
@Override
void documentation(StringBuilder sb) {
sb.append("/_cat/allocation\n");
}
@Override
public void doRequest(final RestRequest request, final RestChannel channel) {
final String[] nodes = Strings.splitStringByCommaToArray(request.param("nodes"));
final ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
clusterStateRequest.clear().routingTable(true);
clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout()));
client.admin().cluster().state(clusterStateRequest, new ActionListener<ClusterStateResponse>() {
@Override
public void onResponse(final ClusterStateResponse state) {
NodesStatsRequest statsRequest = new NodesStatsRequest(nodes);
statsRequest.clear().fs(true);
client.admin().cluster().nodesStats(statsRequest, new ActionListener<NodesStatsResponse>() {
@Override
public void onResponse(NodesStatsResponse stats) {
try {
Table tab = buildTable(request, state, stats);
channel.sendResponse(RestTable.buildResponse(tab, request, channel));
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(new XContentThrowableRestResponse(request, e));
} catch (IOException e1) {
logger.error("Failed to send failure response", e1);
}
}
});
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(new XContentThrowableRestResponse(request, e));
} catch (IOException e1) {
logger.error("Failed to send failure response", e1);
}
}
});
}
@Override
Table getTableWithHeader(final RestRequest request) {
final Table table = new Table();
table.startHeaders();
table.addCell("shards", "alias:s;text-align:right;desc:number of shards on node");
table.addCell("disk.used", "alias:du,diskUsed;text-align:right;desc:disk used (total, not just ES)");
table.addCell("disk.avail", "alias:da,diskAvail;text-align:right;desc:disk available");
table.addCell("disk.total", "alias:dt,diskTotal;text-align:right;desc:total capacity of all volumes");
table.addCell("disk.percent", "alias:dp,diskPercent;text-align:right;desc:percent disk used");
table.addCell("host", "alias:h;desc:host of node");
table.addCell("ip", "desc:ip of node");
table.addCell("node", "alias:n;desc:name of node");
table.endHeaders();
return table;
}
private Table buildTable(RestRequest request, final ClusterStateResponse state, final NodesStatsResponse stats) {
final ObjectIntOpenHashMap<String> allocs = new ObjectIntOpenHashMap<String>();
for (ShardRouting shard : state.getState().routingTable().allShards()) {
String nodeId = "UNASSIGNED";
if (shard.assignedToNode()) {
nodeId = shard.currentNodeId();
}
allocs.addTo(nodeId, 1);
}
Table table = getTableWithHeader(request);
for (NodeStats nodeStats : stats.getNodes()) {
DiscoveryNode node = nodeStats.getNode();
int shardCount = 0;
if (allocs.containsKey(node.id())) {
shardCount = allocs.lget();
}
long used = nodeStats.getFs().getTotal().getTotal().bytes() - nodeStats.getFs().getTotal().getAvailable().bytes();
long avail = nodeStats.getFs().getTotal().getAvailable().bytes();
short diskPercent = -1;
if (used >= 0 && avail >= 0) {
diskPercent = (short) (used * 100 / (used + avail));
}
table.startRow();
table.addCell(shardCount);
table.addCell(used < 0 ? null : new ByteSizeValue(used));
table.addCell(avail < 0 ? null : new ByteSizeValue(avail));
table.addCell(nodeStats.getFs().getTotal().getTotal());
table.addCell(diskPercent < 0 ? null : diskPercent);
table.addCell(node == null ? null : node.getHostName());
table.addCell(node == null ? null : node.getHostAddress());
table.addCell(node == null ? "UNASSIGNED" : node.name());
table.endRow();
}
if (allocs.containsKey("UNASSIGNED")) {
table.startRow();
table.addCell(allocs.lget());
table.addCell(null);
table.addCell(null);
table.addCell(null);
table.addCell(null);
table.addCell(null);
table.addCell(null);
table.addCell("UNASSIGNED");
table.endRow();
}
return table;
}
} | 1no label
| src_main_java_org_elasticsearch_rest_action_cat_RestAllocationAction.java |
395 | private final Comparator<CacheRecord<K>> comparator = new Comparator<CacheRecord<K>>() {
public int compare(CacheRecord<K> o1, CacheRecord<K> o2) {
if (EvictionPolicy.LRU.equals(evictionPolicy)) {
return ((Long) o1.lastAccessTime).compareTo((o2.lastAccessTime));
} else if (EvictionPolicy.LFU.equals(evictionPolicy)) {
return ((Integer) o1.hit.get()).compareTo((o2.hit.get()));
}
return 0;
}
}; | 0true
| hazelcast-client_src_main_java_com_hazelcast_client_nearcache_ClientNearCache.java |
304 | public class OContextConfiguration {
private Map<String, Object> config = new HashMap<String, Object>(); ;
/**
* Empty constructor to create just a proxy for the OGlobalConfiguration. No values are setted.
*/
public OContextConfiguration() {
}
/**
* Initializes the context with custom parameters.
*
* @param iConfig
* Map of parameters of type Map<String, Object>.
*/
public OContextConfiguration(final Map<String, Object> iConfig) {
this.config = iConfig;
}
public OContextConfiguration(final OContextConfiguration iParent) {
if (iParent != null)
config.putAll(iParent.config);
}
public Object setValue(final OGlobalConfiguration iConfig, final Object iValue) {
return config.put(iConfig.getKey(), iValue);
}
public Object setValue(final String iName, final Object iValue) {
return config.put(iName, iValue);
}
public Object getValue(final OGlobalConfiguration iConfig) {
if (config != null && config.containsKey(iConfig.getKey()))
return config.get(iConfig.getKey());
return iConfig.getValue();
}
@SuppressWarnings("unchecked")
public <T> T getValue(final String iName, final T iDefaultValue) {
if (config != null && config.containsKey(iName))
return (T) config.get(iName);
final String sysProperty = System.getProperty(iName);
if (sysProperty != null)
return (T) sysProperty;
return iDefaultValue;
}
public boolean getValueAsBoolean(final OGlobalConfiguration iConfig) {
final Object v = getValue(iConfig);
return v instanceof Boolean ? ((Boolean) v).booleanValue() : Boolean.parseBoolean(v.toString());
}
public String getValueAsString(final String iName, final String iDefaultValue) {
return getValue(iName, iDefaultValue);
}
public String getValueAsString(final OGlobalConfiguration iConfig) {
final Object v = getValue(iConfig);
return v.toString();
}
public int getValueAsInteger(final OGlobalConfiguration iConfig) {
final Object v = getValue(iConfig);
return v instanceof Integer ? ((Integer) v).intValue() : Integer.parseInt(v.toString());
}
public long getValueAsLong(final OGlobalConfiguration iConfig) {
final Object v = getValue(iConfig);
return v instanceof Long ? ((Long) v).intValue() : Long.parseLong(v.toString());
}
public float getValueAsFloat(final OGlobalConfiguration iConfig) {
final Object v = getValue(iConfig);
return v instanceof Float ? ((Float) v).floatValue() : Float.parseFloat(v.toString());
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_config_OContextConfiguration.java |
1,250 | public interface FulfillmentPricingProvider {
/**
* Calculates the total cost for this FulfillmentGroup. Specific configurations for calculating
* this cost can come from {@link FulfillmentGroup#getFulfillmentOption()}. This method is invoked
* during the pricing workflow and will only be called if {@link #canCalculateCostForFulfillmentGroup(FulfillmentGroup, FulfillmentOption)}
* returns true. This should call {@link FulfillmentGroup#setShippingPrice(org.broadleafcommerce.common.money.Money)} to
* set the shipping price on <b>fulfillmentGroup</b>
*
* @param fulfillmentGroup - the {@link FulfillmentGroup} to calculate costs for
* @return the modified {@link FulfillmentGroup} with correct pricing. This is typically <b>fulfillmentGroup</b> after it
* has been modified
*/
public FulfillmentGroup calculateCostForFulfillmentGroup(FulfillmentGroup fulfillmentGroup) throws FulfillmentPriceException;
/**
* Whether or not this processor can provide a cost calculate for the given FulfillmentGroup and the given
* FulfillmentOption. This is not invoked directly by any workflow, but could instead be invoked via a controller
* that wants to display pricing to a user before the user actually picks a FulfillmentOption. The controller would
* inject an instance of FulfillmentPricingService and thus indirectly invoke this method for a particular option.
*
* @param fulfillmentGroup
* @param option - the candidate option a user might select based on the estimate
* @return <b>true</b> if this processor can estimate the costs, <b>false</b> otherwise
* @see {@link FulfillmentPricingService}, {@link FulfillmentOption}
*/
public boolean canCalculateCostForFulfillmentGroup(FulfillmentGroup fulfillmentGroup, FulfillmentOption option);
/**
* Estimates the cost for the fulfilling the given fulfillment group
* Estimates the cost for the fulfilling the given fulfillment group with the given options. The response should not include prices that the implementor of this interface
* cannot respond to. So, if the invoker of this method passes in several types of fulfillment options, the response should only contain prices for the fulfillment options
* that will would cause a call to
* {@link #canCalculateCostForFulfillmentGroup(org.broadleafcommerce.core.order.domain.FulfillmentGroup, org.broadleafcommerce.core.order.domain.FulfillmentOption)}
* to return true. This method may return null or it may return a non-null response with an empty map, indicating that no price estimate was available for the options given. This
* method SHOULD NOT throw an exception if it encounters a FulfillmentOption that it can not price. It should simply ignore that option.
*
* @param fulfillmentGroup - the group to estimate fulfillment costs for
* @param options - the candidate options that a user might select
* @return a DTO that represents pricing information that might be added to the fulfillment cost of <b>fulfillmentGroup</b> when
* {@link #calculateCostForFulfillmentGroup(FulfillmentGroup)} is invoked during the pricing workflow
* @see {@link FulfillmentPricingService}, {@link FulfillmentOption}
*/
public FulfillmentEstimationResponse estimateCostForFulfillmentGroup(FulfillmentGroup fulfillmentGroup, Set<FulfillmentOption> options) throws FulfillmentPriceException;
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_pricing_service_fulfillment_provider_FulfillmentPricingProvider.java |
391 | @SuppressWarnings({ "serial" })
public class ORecordLazyList extends ORecordTrackedList implements ORecordLazyMultiValue {
protected ORecordLazyListener listener;
protected final byte recordType;
protected ORecordMultiValueHelper.MULTIVALUE_CONTENT_TYPE contentType = MULTIVALUE_CONTENT_TYPE.EMPTY;
protected StringBuilder stream;
protected boolean autoConvertToRecord = true;
protected boolean marshalling = false;
protected boolean ridOnly = false;
public ORecordLazyList() {
super(null);
this.recordType = ODocument.RECORD_TYPE;
}
public ORecordLazyList(final ODocument iSourceRecord) {
super(iSourceRecord);
if (iSourceRecord != null) {
this.recordType = iSourceRecord.getRecordType();
if (!iSourceRecord.isLazyLoad())
// SET AS NON-LAZY LOAD THE COLLECTION TOO
autoConvertToRecord = false;
} else
this.recordType = ODocument.RECORD_TYPE;
}
public ORecordLazyList(final ODocument iSourceRecord, final Collection<? extends OIdentifiable> iOrigin) {
this(iSourceRecord);
if (iOrigin != null && !iOrigin.isEmpty())
addAll(iOrigin);
}
@SuppressWarnings("unchecked")
@Override
public boolean addAll(Collection<? extends OIdentifiable> c) {
final Iterator<OIdentifiable> it = (Iterator<OIdentifiable>) (c instanceof ORecordLazyMultiValue ? ((ORecordLazyMultiValue) c)
.rawIterator() : c.iterator());
while (it.hasNext())
add(it.next());
return true;
}
@Override
public boolean isEmpty() {
if (stream == null)
return super.isEmpty();
else
// AVOID TO LAZY LOAD IT, JUST CHECK IF STREAM IS EMPTY OR NULL
return stream.length() == 0;
}
/**
* @return iterator that just returns the elements without convertion.
*/
public Iterator<OIdentifiable> rawIterator() {
lazyLoad(false);
final Iterator<OIdentifiable> subIterator = new OLazyIterator<OIdentifiable>() {
private int pos = -1;
public boolean hasNext() {
return pos < size() - 1;
}
public OIdentifiable next() {
return ORecordLazyList.this.rawGet(++pos);
}
public void remove() {
ORecordLazyList.this.remove(pos);
}
public OIdentifiable update(final OIdentifiable iValue) {
return ORecordLazyList.this.set(pos, iValue);
}
};
return new OLazyRecordIterator(sourceRecord, subIterator, false);
}
public OIdentifiable rawGet(final int index) {
lazyLoad(false);
return super.get(index);
}
@Override
public OLazyIterator<OIdentifiable> iterator() {
lazyLoad(false);
return new OLazyRecordIterator(sourceRecord, new OLazyIteratorListWrapper<OIdentifiable>(super.listIterator()),
autoConvertToRecord);
}
@Override
public ListIterator<OIdentifiable> listIterator() {
lazyLoad(false);
return super.listIterator();
}
@Override
public ListIterator<OIdentifiable> listIterator(int index) {
lazyLoad(false);
return super.listIterator(index);
}
@Override
public boolean contains(final Object o) {
if (OGlobalConfiguration.LAZYSET_WORK_ON_STREAM.getValueAsBoolean() && getStreamedContent() != null)
return getStreamedContent().indexOf(((OIdentifiable) o).getIdentity().toString()) > -1;
lazyLoad(false);
return super.contains(o);
}
@Override
public boolean add(OIdentifiable e) {
if (e != null)
if ((ridOnly || contentType == MULTIVALUE_CONTENT_TYPE.ALL_RIDS || OGlobalConfiguration.LAZYSET_WORK_ON_STREAM
.getValueAsBoolean()) && e.getIdentity().isPersistent() && (e instanceof ODocument && !((ODocument) e).isDirty()))
// IT'S BETTER TO LEAVE ALL RIDS AND EXTRACT ONLY THIS ONE
e = e.getIdentity();
else
contentType = ORecordMultiValueHelper.updateContentType(contentType, e);
lazyLoad(true);
return super.add(e);
}
@Override
public void add(int index, OIdentifiable e) {
if (e != null)
if ((ridOnly || contentType == MULTIVALUE_CONTENT_TYPE.ALL_RIDS || OGlobalConfiguration.LAZYSET_WORK_ON_STREAM
.getValueAsBoolean()) && e.getIdentity().isPersistent() && (e instanceof ODocument && !((ODocument) e).isDirty()))
// IT'S BETTER TO LEAVE ALL RIDS AND EXTRACT ONLY THIS ONE
e = e.getIdentity();
else
contentType = ORecordMultiValueHelper.updateContentType(contentType, e);
lazyLoad(true);
super.add(index, e);
}
@Override
public OIdentifiable set(int index, OIdentifiable e) {
lazyLoad(true);
if (e != null)
if ((ridOnly || contentType == MULTIVALUE_CONTENT_TYPE.ALL_RIDS || OGlobalConfiguration.LAZYSET_WORK_ON_STREAM
.getValueAsBoolean()) && e.getIdentity().isPersistent() && (e instanceof ODocument && !((ODocument) e).isDirty()))
// IT'S BETTER TO LEAVE ALL RIDS AND EXTRACT ONLY THIS ONE
e = e.getIdentity();
else
contentType = ORecordMultiValueHelper.updateContentType(contentType, e);
return super.set(index, e);
}
@Override
public OIdentifiable get(final int index) {
lazyLoad(false);
if (autoConvertToRecord)
convertLink2Record(index);
return super.get(index);
}
@Override
public int indexOf(final Object o) {
lazyLoad(false);
return super.indexOf(o);
}
@Override
public int lastIndexOf(final Object o) {
lazyLoad(false);
return super.lastIndexOf(o);
}
@Override
public OIdentifiable remove(final int iIndex) {
lazyLoad(true);
return super.remove(iIndex);
}
@Override
public boolean remove(final Object iElement) {
final boolean result;
if (OGlobalConfiguration.LAZYSET_WORK_ON_STREAM.getValueAsBoolean() && getStreamedContent() != null) {
// WORK ON STREAM
final StringBuilder stream = getStreamedContent();
final String rid = ((OIdentifiable) iElement).getIdentity().toString();
int pos = stream.indexOf(rid);
if (pos > -1) {
fireCollectionChangedEvent(new OMultiValueChangeEvent<Integer, OIdentifiable>(OMultiValueChangeEvent.OChangeType.REMOVE,
pos, null, (OIdentifiable) iElement));
// FOUND: REMOVE IT DIRECTLY FROM STREAM
if (pos > 0)
pos--;
stream.delete(pos, pos + rid.length() + 1);
if (stream.length() == 0)
setStreamedContent(null);
result = true;
} else
result = false;
} else {
lazyLoad(true);
result = super.remove(iElement);
}
if (isEmpty())
contentType = MULTIVALUE_CONTENT_TYPE.EMPTY;
return result;
}
@Override
public void clear() {
lazyLoad(true);
super.clear();
contentType = MULTIVALUE_CONTENT_TYPE.EMPTY;
stream = null;
}
@Override
public int size() {
lazyLoad(false);
return super.size();
}
@SuppressWarnings("unchecked")
@Override
public <RET> RET setDirty() {
if (!marshalling)
return (RET) super.setDirty();
return (RET) this;
}
@Override
protected void fireCollectionChangedEvent(final OMultiValueChangeEvent<Integer, OIdentifiable> event) {
if (!marshalling)
super.fireCollectionChangedEvent(event);
}
@Override
public Object[] toArray() {
convertLinks2Records();
return super.toArray();
}
@Override
public <T> T[] toArray(final T[] a) {
lazyLoad(false);
convertLinks2Records();
return super.toArray(a);
}
public void convertLinks2Records() {
lazyLoad(false);
if (contentType == MULTIVALUE_CONTENT_TYPE.ALL_RECORDS || !autoConvertToRecord)
// PRECONDITIONS
return;
for (int i = 0; i < size(); ++i) {
try {
convertLink2Record(i);
} catch (ORecordNotFoundException e) {
// LEAVE THE RID DIRTY
}
}
contentType = MULTIVALUE_CONTENT_TYPE.ALL_RECORDS;
}
public boolean convertRecords2Links() {
if (contentType == MULTIVALUE_CONTENT_TYPE.ALL_RIDS || sourceRecord == null)
// PRECONDITIONS
return true;
boolean allConverted = true;
for (int i = 0; i < super.size(); ++i) {
try {
if (!convertRecord2Link(i))
allConverted = false;
} catch (ORecordNotFoundException e) {
// LEAVE THE RID DIRTY
}
}
if (allConverted)
contentType = MULTIVALUE_CONTENT_TYPE.ALL_RIDS;
return allConverted;
}
/**
* Convert the item requested from link to record.
*
* @param iIndex
* Position of the item to convert
*/
private void convertLink2Record(final int iIndex) {
if (ridOnly || !autoConvertToRecord)
// PRECONDITIONS
return;
final OIdentifiable o = super.get(iIndex);
if (contentType == MULTIVALUE_CONTENT_TYPE.ALL_RECORDS && !o.getIdentity().isNew())
// ALL RECORDS AND THE OBJECT IS NOT NEW, DO NOTHING
return;
if (o != null && o instanceof ORecordId) {
final ORecordId rid = (ORecordId) o;
marshalling = true;
try {
super.set(iIndex, rid.getRecord());
} catch (ORecordNotFoundException e) {
// IGNORE THIS
} finally {
marshalling = false;
}
}
}
/**
* Convert the item requested from record to link.
*
* @param iIndex
* Position of the item to convert
* @return <code>true</code> if conversion was successful.
*/
private boolean convertRecord2Link(final int iIndex) {
if (contentType == MULTIVALUE_CONTENT_TYPE.ALL_RIDS)
// PRECONDITIONS
return true;
final Object o = super.get(iIndex);
if (o != null && o instanceof OIdentifiable && ((OIdentifiable) o).getIdentity().isPersistent()) {
if (o instanceof ORecord<?> && !((ORecord<?>) o).isDirty()) {
marshalling = true;
try {
super.set(iIndex, ((ORecord<?>) o).getIdentity());
// CONVERTED
return true;
} catch (ORecordNotFoundException e) {
// IGNORE THIS
} finally {
marshalling = false;
}
} else if (o instanceof ORID)
// ALREADY CONVERTED
return true;
}
return false;
}
public boolean isAutoConvertToRecord() {
return autoConvertToRecord;
}
public void setAutoConvertToRecord(boolean convertToDocument) {
this.autoConvertToRecord = convertToDocument;
}
@Override
public String toString() {
if (stream == null)
return ORecordMultiValueHelper.toString(this);
else {
return "[NOT LOADED: " + stream + ']';
}
}
public byte getRecordType() {
return recordType;
}
public ORecordLazyList copy(final ODocument iSourceRecord) {
final ORecordLazyList copy = new ORecordLazyList(iSourceRecord);
copy.contentType = contentType;
copy.stream = stream;
copy.autoConvertToRecord = autoConvertToRecord;
final int tot = super.size();
for (int i = 0; i < tot; ++i)
copy.add(rawGet(i));
return copy;
}
public Iterator<OIdentifiable> newItemsIterator() {
return null;
}
public ORecordLazyList setStreamedContent(final StringBuilder iStream) {
if (iStream == null || iStream.length() == 0)
stream = null;
else {
// CREATE A COPY TO FREE ORIGINAL BUFFER
stream = iStream;
final int prevModCount = modCount;
reset();
modCount = prevModCount;
}
contentType = MULTIVALUE_CONTENT_TYPE.ALL_RIDS;
return this;
}
public StringBuilder getStreamedContent() {
return stream;
}
public ORecordLazyListener getListener() {
return listener;
}
public ORecordLazyList setListener(final ORecordLazyListener listener) {
this.listener = listener;
return this;
}
public boolean lazyLoad(final boolean iInvalidateStream) {
if (stream == null)
return false;
marshalling = true;
int currentModCount = modCount;
final List<String> items = OStringSerializerHelper.smartSplit(stream.toString(), OStringSerializerHelper.RECORD_SEPARATOR);
for (String item : items) {
if (item.length() == 0)
continue;
super.add(new ORecordId(item));
}
modCount = currentModCount;
marshalling = false;
// if (iInvalidateStream)
stream = null;
contentType = MULTIVALUE_CONTENT_TYPE.ALL_RIDS;
if (listener != null)
listener.onLazyLoad();
return true;
}
public boolean isRidOnly() {
return ridOnly;
}
public ORecordLazyList setRidOnly(boolean ridOnly) {
this.ridOnly = ridOnly;
return this;
}
public boolean detach() {
return convertRecords2Links();
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_db_record_ORecordLazyList.java |
1,101 | public final class SSLConfig {
private boolean enabled = false;
private String factoryClassName = null;
private Object factoryImplementation = null;
private Properties properties = new Properties();
/**
* Returns the name of the {@link com.hazelcast.nio.ssl.SSLContextFactory} implementation class
*
* @return the name of the class
*/
public String getFactoryClassName() {
return factoryClassName;
}
/**
* Sets the name for the {@link com.hazelcast.nio.ssl.SSLContextFactory} implementation class
*
* @param factoryClassName the name of the {@link com.hazelcast.nio.ssl.SSLContextFactory} implementation class to set
*/
public SSLConfig setFactoryClassName(String factoryClassName) {
this.factoryClassName = factoryClassName;
return this;
}
/**
* Returns if this configuration is enabled
*
* @return true if enabled, false otherwise
*/
public boolean isEnabled() {
return enabled;
}
/**
* Enables and disables this configuration
*
* @param enabled
*/
public SSLConfig setEnabled(boolean enabled) {
this.enabled = enabled;
return this;
}
/**
* Sets the {@link com.hazelcast.nio.ssl.SSLContextFactory} implementation object
*
* @param factoryImplementation factory implementation object
* @return this SSLConfig instance
*/
public SSLConfig setFactoryImplementation(Object factoryImplementation) {
this.factoryImplementation = factoryImplementation;
return this;
}
/**
* Returns the {@link com.hazelcast.nio.ssl.SSLContextFactory} implementation object
*
* @return {@link com.hazelcast.nio.ssl.SSLContextFactory} implementation object
*/
public Object getFactoryImplementation() {
return factoryImplementation;
}
/**
* Sets a property.
*
* @param name the name of the property to set.
* @param value the value of the property to set
* @return the updated SSLConfig
* @throws NullPointerException if name or value is null.
*/
public SSLConfig setProperty(String name, String value) {
properties.put(name, value);
return this;
}
/**
* Gets a property.
*
* @param name the name of the property to get.
* @return the value of the property, null if not found
* @throws NullPointerException if name is null.
*/
public String getProperty(String name) {
return properties.getProperty(name);
}
/**
* Gets all properties.
*
* @return the properties.
*/
public Properties getProperties() {
return properties;
}
/**
* Sets the properties.
*
* @param properties the properties to set.
* @return the updated SSLConfig.
* @throws IllegalArgumentException if properties is null.
*/
public SSLConfig setProperties(Properties properties) {
if(properties == null){
throw new IllegalArgumentException("properties can't be null");
}
this.properties = properties;
return this;
}
@Override
public String toString() {
return "SSLConfig{" +
"className='" + factoryClassName + '\'' +
", enabled=" + enabled +
", implementation=" + factoryImplementation +
", properties=" + properties +
'}';
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_config_SSLConfig.java |
1,986 | public class ToStringBuilder {
// Linked hash map ensures ordering.
final Map<String, Object> map = new LinkedHashMap<String, Object>();
final String name;
public ToStringBuilder(String name) {
this.name = name;
}
public ToStringBuilder(Class type) {
this.name = type.getSimpleName();
}
public ToStringBuilder add(String name, Object value) {
if (map.put(name, value) != null) {
throw new RuntimeException("Duplicate names: " + name);
}
return this;
}
@Override
public String toString() {
return name + map.toString().replace('{', '[').replace('}', ']');
}
} | 0true
| src_main_java_org_elasticsearch_common_inject_internal_ToStringBuilder.java |
604 | public final class IndexMetadata {
private final String name;
private final OIndexDefinition indexDefinition;
private final Set<String> clustersToIndex;
private final String type;
private final String algorithm;
private final String valueContainerAlgorithm;
public IndexMetadata(String name, OIndexDefinition indexDefinition, Set<String> clustersToIndex, String type, String algorithm,
String valueContainerAlgorithm) {
this.name = name;
this.indexDefinition = indexDefinition;
this.clustersToIndex = clustersToIndex;
this.type = type;
this.algorithm = algorithm;
this.valueContainerAlgorithm = valueContainerAlgorithm;
}
public String getName() {
return name;
}
public OIndexDefinition getIndexDefinition() {
return indexDefinition;
}
public Set<String> getClustersToIndex() {
return clustersToIndex;
}
public String getType() {
return type;
}
public String getAlgorithm() {
return algorithm;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
IndexMetadata that = (IndexMetadata) o;
if (algorithm != null ? !algorithm.equals(that.algorithm) : that.algorithm != null)
return false;
if (!clustersToIndex.equals(that.clustersToIndex))
return false;
if (indexDefinition != null ? !indexDefinition.equals(that.indexDefinition) : that.indexDefinition != null)
return false;
if (!name.equals(that.name))
return false;
if (!type.equals(that.type))
return false;
return true;
}
@Override
public int hashCode() {
int result = name.hashCode();
result = 31 * result + (indexDefinition != null ? indexDefinition.hashCode() : 0);
result = 31 * result + clustersToIndex.hashCode();
result = 31 * result + type.hashCode();
result = 31 * result + (algorithm != null ? algorithm.hashCode() : 0);
return result;
}
public String getValueContainerAlgorithm() {
return valueContainerAlgorithm;
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_index_OIndexInternal.java |
1,221 | public class PaymentSeed implements CompositePaymentResponse {
private Order order;
private Map<PaymentInfo, Referenced> infos;
private PaymentResponse paymentResponse;
private Money transactionAmount;
public PaymentSeed(Order order, Map<PaymentInfo, Referenced> infos, PaymentResponse paymentResponse) {
this.order = order;
this.infos = infos;
this.paymentResponse = paymentResponse;
}
public PaymentSeed(Order order, Map<PaymentInfo, Referenced> infos, PaymentResponse paymentResponse, Money transactionAmount) {
this.infos = infos;
this.order = order;
this.paymentResponse = paymentResponse;
this.transactionAmount = transactionAmount;
}
public Order getOrder() {
return order;
}
public Map<PaymentInfo, Referenced> getInfos() {
return infos;
}
public PaymentResponse getPaymentResponse() {
return paymentResponse;
}
public Money getTransactionAmount() {
return transactionAmount;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_payment_service_workflow_PaymentSeed.java |
554 | public class ClientTxnQueueProxy<E> extends ClientTxnProxy implements TransactionalQueue<E> {
public ClientTxnQueueProxy(String name, TransactionContextProxy proxy) {
super(name, proxy);
}
public boolean offer(E e) {
try {
return offer(e, 0, TimeUnit.MILLISECONDS);
} catch (InterruptedException e1) {
return false;
}
}
public boolean offer(E e, long timeout, TimeUnit unit) throws InterruptedException {
final Data data = toData(e);
TxnOfferRequest request = new TxnOfferRequest(getName(), unit.toMillis(timeout), data);
Boolean result = invoke(request);
return result;
}
public E poll() {
try {
return poll(0, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
return null;
}
}
public E poll(long timeout, TimeUnit unit) throws InterruptedException {
TxnPollRequest request = new TxnPollRequest(getName(), unit.toMillis(timeout));
return invoke(request);
}
@Override
public E peek() {
try {
return peek(0, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
return null;
}
}
@Override
public E peek(long timeout, TimeUnit unit) throws InterruptedException {
TxnPeekRequest request = new TxnPeekRequest(getName(), unit.toMillis(timeout));
return invoke(request);
}
public int size() {
TxnSizeRequest request = new TxnSizeRequest(getName());
Integer result = invoke(request);
return result;
}
public String getName() {
return (String) getId();
}
@Override
public String getServiceName() {
return QueueService.SERVICE_NAME;
}
void onDestroy() {
}
} | 0true
| hazelcast-client_src_main_java_com_hazelcast_client_txn_proxy_ClientTxnQueueProxy.java |
248 | service.submit(runnable, new ExecutionCallback() {
public void onResponse(Object response) {
result.set(response);
responseLatch.countDown();
}
public void onFailure(Throwable t) {
}
}); | 0true
| hazelcast-client_src_test_java_com_hazelcast_client_executor_ClientExecutorServiceSubmitTest.java |
804 | public interface FulfillmentGroupAdjustment extends Adjustment {
public FulfillmentGroup getFulfillmentGroup();
public void init(FulfillmentGroup fulfillmentGroup, Offer offer, String reason);
public void setValue(Money value);
public void setFulfillmentGroup(FulfillmentGroup fulfillmentGroup);
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_offer_domain_FulfillmentGroupAdjustment.java |
812 | @SuppressWarnings("unchecked")
public class OSchemaProxy extends OProxedResource<OSchemaShared> implements OSchema {
public OSchemaProxy(final OSchemaShared iDelegate, final ODatabaseRecord iDatabase) {
super(iDelegate, iDatabase);
}
public void create() {
setCurrentDatabaseInThreadLocal();
delegate.create();
}
public int countClasses() {
setCurrentDatabaseInThreadLocal();
return delegate.countClasses();
}
public OClass createClass(final Class<?> iClass) {
setCurrentDatabaseInThreadLocal();
return delegate.createClass(iClass);
}
public OClass createClass(final Class<?> iClass, final int iDefaultClusterId) {
setCurrentDatabaseInThreadLocal();
return delegate.createClass(iClass, iDefaultClusterId);
}
public OClass createClass(final String iClassName) {
setCurrentDatabaseInThreadLocal();
return delegate.createClass(iClassName);
}
public OClass getOrCreateClass(final String iClassName) {
setCurrentDatabaseInThreadLocal();
return delegate.getOrCreateClass(iClassName);
}
public OClass getOrCreateClass(final String iClassName, final OClass iSuperClass) {
setCurrentDatabaseInThreadLocal();
return delegate.getOrCreateClass(iClassName, iSuperClass);
}
public OClass createClass(final String iClassName, final OClass iSuperClass) {
setCurrentDatabaseInThreadLocal();
return delegate.createClass(iClassName, iSuperClass);
}
public OClass createClass(final String iClassName, final OClass iSuperClass, final CLUSTER_TYPE iType) {
setCurrentDatabaseInThreadLocal();
return delegate.createClass(iClassName, iSuperClass, iType);
}
public OClass createClass(final String iClassName, final int iDefaultClusterId) {
setCurrentDatabaseInThreadLocal();
return delegate.createClass(iClassName, iDefaultClusterId);
}
public OClass createClass(final String iClassName, final OClass iSuperClass, final int iDefaultClusterId) {
setCurrentDatabaseInThreadLocal();
return delegate.createClass(iClassName, iSuperClass, iDefaultClusterId);
}
public OClass createClass(final String iClassName, final OClass iSuperClass, final int[] iClusterIds) {
setCurrentDatabaseInThreadLocal();
return delegate.createClass(iClassName, iSuperClass, iClusterIds);
}
@Override
public OClass createAbstractClass(Class<?> iClass) {
setCurrentDatabaseInThreadLocal();
return delegate.createAbstractClass(iClass);
}
@Override
public OClass createAbstractClass(String iClassName) {
setCurrentDatabaseInThreadLocal();
return delegate.createAbstractClass(iClassName);
}
@Override
public OClass createAbstractClass(String iClassName, OClass iSuperClass) {
setCurrentDatabaseInThreadLocal();
return delegate.createAbstractClass(iClassName, iSuperClass);
}
public OClass createClassInternal(final String iClassName, final OClass iSuperClass, final int[] iClusterIds) {
setCurrentDatabaseInThreadLocal();
return delegate.createClassInternal(iClassName, iSuperClass, iClusterIds);
}
public void dropClass(final String iClassName) {
setCurrentDatabaseInThreadLocal();
delegate.dropClass(iClassName);
}
public void dropClassInternal(final String iClassName) {
setCurrentDatabaseInThreadLocal();
delegate.dropClassInternal(iClassName);
}
public boolean existsClass(final String iClassName) {
setCurrentDatabaseInThreadLocal();
return delegate.existsClass(iClassName);
}
public OClass getClass(final Class<?> iClass) {
setCurrentDatabaseInThreadLocal();
return delegate.getClass(iClass);
}
public OClass getClass(final String iClassName) {
setCurrentDatabaseInThreadLocal();
return delegate.getClass(iClassName);
}
public Collection<OClass> getClasses() {
setCurrentDatabaseInThreadLocal();
return delegate.getClasses();
}
public void load() {
setCurrentDatabaseInThreadLocal();
delegate.load();
}
public <RET extends ODocumentWrapper> RET reload() {
setCurrentDatabaseInThreadLocal();
return (RET) delegate.reload();
}
public <RET extends ODocumentWrapper> RET save() {
setCurrentDatabaseInThreadLocal();
return (RET) delegate.save();
}
public int getVersion() {
setCurrentDatabaseInThreadLocal();
return delegate.getVersion();
}
public void saveInternal() {
setCurrentDatabaseInThreadLocal();
delegate.saveInternal();
}
public ORID getIdentity() {
setCurrentDatabaseInThreadLocal();
return delegate.getIdentity();
}
public void close() {
}
public String toString() {
return delegate.toString();
}
@Override
public Set<OClass> getClassesRelyOnCluster(final String iClusterName) {
return delegate.getClassesRelyOnCluster(iClusterName);
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_metadata_schema_OSchemaProxy.java |
2,830 | public class CharFilterTests extends ElasticsearchTokenStreamTestCase {
@Test
public void testMappingCharFilter() throws Exception {
Index index = new Index("test");
Settings settings = settingsBuilder()
.put("index.analysis.char_filter.my_mapping.type", "mapping")
.putArray("index.analysis.char_filter.my_mapping.mappings", "ph=>f", "qu=>q")
.put("index.analysis.analyzer.custom_with_char_filter.tokenizer", "standard")
.putArray("index.analysis.analyzer.custom_with_char_filter.char_filter", "my_mapping")
.build();
Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
Injector injector = new ModulesBuilder().add(
new IndexSettingsModule(index, settings),
new IndexNameModule(index),
new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
.createChildInjector(parentInjector);
AnalysisService analysisService = injector.getInstance(AnalysisService.class);
NamedAnalyzer analyzer1 = analysisService.analyzer("custom_with_char_filter");
assertTokenStreamContents(analyzer1.tokenStream("test", "jeff quit phish"), new String[]{"jeff", "qit", "fish"});
// Repeat one more time to make sure that char filter is reinitialized correctly
assertTokenStreamContents(analyzer1.tokenStream("test", "jeff quit phish"), new String[]{"jeff", "qit", "fish"});
}
@Test
public void testHtmlStripCharFilter() throws Exception {
Index index = new Index("test");
Settings settings = settingsBuilder()
.put("index.analysis.analyzer.custom_with_char_filter.tokenizer", "standard")
.putArray("index.analysis.analyzer.custom_with_char_filter.char_filter", "html_strip")
.build();
Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
Injector injector = new ModulesBuilder().add(
new IndexSettingsModule(index, settings),
new IndexNameModule(index),
new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
.createChildInjector(parentInjector);
AnalysisService analysisService = injector.getInstance(AnalysisService.class);
NamedAnalyzer analyzer1 = analysisService.analyzer("custom_with_char_filter");
assertTokenStreamContents(analyzer1.tokenStream("test", "<b>hello</b>!"), new String[]{"hello"});
// Repeat one more time to make sure that char filter is reinitialized correctly
assertTokenStreamContents(analyzer1.tokenStream("test", "<b>hello</b>!"), new String[]{"hello"});
}
} | 0true
| src_test_java_org_elasticsearch_index_analysis_CharFilterTests.java |
28 | {
volatile private StateTransitionLogger logger = null;
@Override
public void listeningAt( URI me )
{
server.listeningAt( me );
if (logger == null)
{
logger = new StateTransitionLogger( logging );
server.addStateTransitionListener( logger );
}
}
@Override
public void channelOpened( URI to )
{
}
@Override
public void channelClosed( URI to )
{
}
} ); | 1no label
| enterprise_cluster_src_main_java_org_neo4j_cluster_client_ClusterClient.java |
1,606 | Collections.sort(lockInfos, new Comparator<LockInfo>() {
public int compare(LockInfo o1, LockInfo o2) {
int comp1 = Integer.valueOf(o2.getWaitingThreadCount()).compareTo(o1.getWaitingThreadCount());
if (comp1 == 0)
return Long.valueOf(o1.getAcquireTime()).compareTo(o2.getAcquireTime());
else return comp1;
}
}); | 0true
| hazelcast_src_main_java_com_hazelcast_management_ClusterRuntimeState.java |
1,127 | @Beta
public interface AsyncAtomicReference<E> extends IAtomicReference<E> {
ICompletableFuture<Boolean> asyncCompareAndSet(E expect, E update);
ICompletableFuture<E> asyncGet();
ICompletableFuture<Void> asyncSet(E newValue);
ICompletableFuture<E> asyncGetAndSet(E newValue);
ICompletableFuture<E> asyncSetAndGet(E update);
ICompletableFuture<Boolean> asyncIsNull();
ICompletableFuture<Void> asyncClear();
ICompletableFuture<Boolean> asyncContains(E value);
ICompletableFuture<Void> asyncAlter(IFunction<E, E> function);
ICompletableFuture<E> asyncAlterAndGet(IFunction<E, E> function);
ICompletableFuture<E> asyncGetAndAlter(IFunction<E, E> function);
<R> ICompletableFuture<R> asyncApply(IFunction<E, R> function);
} | 0true
| hazelcast_src_main_java_com_hazelcast_core_AsyncAtomicReference.java |
900 | threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
performFirstPhase(fShardIndex, shardIt);
}
}); | 1no label
| src_main_java_org_elasticsearch_action_search_type_TransportSearchTypeAction.java |
2,904 | public class NumericLongAnalyzer extends NumericAnalyzer<NumericLongTokenizer> {
private final static IntObjectOpenHashMap<NamedAnalyzer> builtIn;
static {
builtIn = new IntObjectOpenHashMap<NamedAnalyzer>();
builtIn.put(Integer.MAX_VALUE, new NamedAnalyzer("_long/max", AnalyzerScope.GLOBAL, new NumericLongAnalyzer(Integer.MAX_VALUE)));
for (int i = 0; i <= 64; i += 4) {
builtIn.put(i, new NamedAnalyzer("_long/" + i, AnalyzerScope.GLOBAL, new NumericLongAnalyzer(i)));
}
}
public static NamedAnalyzer buildNamedAnalyzer(int precisionStep) {
NamedAnalyzer namedAnalyzer = builtIn.get(precisionStep);
if (namedAnalyzer == null) {
namedAnalyzer = new NamedAnalyzer("_long/" + precisionStep, AnalyzerScope.INDEX, new NumericLongAnalyzer(precisionStep));
}
return namedAnalyzer;
}
private final int precisionStep;
public NumericLongAnalyzer() {
this(NumericUtils.PRECISION_STEP_DEFAULT);
}
public NumericLongAnalyzer(int precisionStep) {
this.precisionStep = precisionStep;
}
@Override
protected NumericLongTokenizer createNumericTokenizer(Reader reader, char[] buffer) throws IOException {
return new NumericLongTokenizer(reader, precisionStep, buffer);
}
} | 0true
| src_main_java_org_elasticsearch_index_analysis_NumericLongAnalyzer.java |
973 | public class OLinkSerializer implements OBinarySerializer<OIdentifiable> {
private static final int CLUSTER_POS_SIZE = OClusterPositionFactory.INSTANCE.getSerializedSize();
public static OLinkSerializer INSTANCE = new OLinkSerializer();
public static final byte ID = 9;
public static final int RID_SIZE = OShortSerializer.SHORT_SIZE + CLUSTER_POS_SIZE;
public int getObjectSize(final OIdentifiable rid, Object... hints) {
return RID_SIZE;
}
public void serialize(final OIdentifiable rid, final byte[] stream, final int startPosition, Object... hints) {
ORID r = rid.getIdentity();
short2bytes((short) r.getClusterId(), stream, startPosition);
System.arraycopy(r.getClusterPosition().toStream(), 0, stream, startPosition + OShortSerializer.SHORT_SIZE, CLUSTER_POS_SIZE);
}
public ORecordId deserialize(final byte[] stream, final int startPosition) {
return new ORecordId(bytes2short(stream, startPosition), OClusterPositionFactory.INSTANCE.fromStream(stream, startPosition
+ OShortSerializer.SHORT_SIZE));
}
public int getObjectSize(final byte[] stream, final int startPosition) {
return RID_SIZE;
}
public byte getId() {
return ID;
}
public int getObjectSizeNative(byte[] stream, int startPosition) {
return RID_SIZE;
}
public void serializeNative(OIdentifiable rid, byte[] stream, int startPosition, Object... hints) {
ORID r = rid.getIdentity();
OShortSerializer.INSTANCE.serializeNative((short) r.getClusterId(), stream, startPosition);
System.arraycopy(r.getClusterPosition().toStream(), 0, stream, startPosition + OShortSerializer.SHORT_SIZE, CLUSTER_POS_SIZE);
}
public ORecordId deserializeNative(byte[] stream, int startPosition) {
int clusterId = OShortSerializer.INSTANCE.deserializeNative(stream, startPosition);
OClusterPosition clusterPosition = OClusterPositionFactory.INSTANCE.fromStream(stream, startPosition
+ OShortSerializer.SHORT_SIZE);
return new ORecordId(clusterId, clusterPosition);
}
@Override
public void serializeInDirectMemory(OIdentifiable rid, ODirectMemoryPointer pointer, long offset, Object... hints) {
ORID r = rid.getIdentity();
OShortSerializer.INSTANCE.serializeInDirectMemory((short) r.getClusterId(), pointer, offset);
pointer.set(offset + OShortSerializer.SHORT_SIZE, r.getClusterPosition().toStream(), 0, CLUSTER_POS_SIZE);
}
@Override
public OIdentifiable deserializeFromDirectMemory(ODirectMemoryPointer pointer, long offset) {
int clusterId = OShortSerializer.INSTANCE.deserializeFromDirectMemory(pointer, offset);
OClusterPosition clusterPosition = OClusterPositionFactory.INSTANCE.fromStream(pointer.get(
offset + OShortSerializer.SHORT_SIZE, CLUSTER_POS_SIZE));
return new ORecordId(clusterId, clusterPosition);
}
@Override
public int getObjectSizeInDirectMemory(ODirectMemoryPointer pointer, long offset) {
return RID_SIZE;
}
public boolean isFixedLength() {
return true;
}
public int getFixedLength() {
return RID_SIZE;
}
@Override
public OIdentifiable preprocess(OIdentifiable value, Object... hints) {
if (value == null)
return null;
else
return value.getIdentity();
}
} | 0true
| core_src_main_java_com_orientechnologies_orient_core_serialization_serializer_binary_impl_OLinkSerializer.java |
1,271 | public static class Customer implements Serializable {
private int year;
private String name;
private byte[] field = new byte[100];
public Customer(int i, String s) {
this.year = i;
this.name = s;
}
} | 0true
| hazelcast_src_main_java_com_hazelcast_examples_AllTest.java |
3,708 | public static class Defaults {
public static final String NAME = VersionFieldMapper.NAME;
public static final float BOOST = 1.0f;
public static final FieldType FIELD_TYPE = NumericDocValuesField.TYPE;
} | 0true
| src_main_java_org_elasticsearch_index_mapper_internal_VersionFieldMapper.java |
1,883 | node.executeTransaction(options, new TransactionalTask<Boolean>() {
public Boolean execute(TransactionalTaskContext context) throws TransactionException {
final TransactionalMap<Object, Object> txMap = context.getMap(mapName);
PagingPredicate predicate = new PagingPredicate(5);
txMap.values(predicate);
return true;
}
}); | 0true
| hazelcast_src_test_java_com_hazelcast_map_MapTransactionTest.java |
187 | public abstract class RecursiveAction extends ForkJoinTask<Void> {
private static final long serialVersionUID = 5232453952276485070L;
/**
* The main computation performed by this task.
*/
protected abstract void compute();
/**
* Always returns {@code null}.
*
* @return {@code null} always
*/
public final Void getRawResult() { return null; }
/**
* Requires null completion value.
*/
protected final void setRawResult(Void mustBeNull) { }
/**
* Implements execution conventions for RecursiveActions.
*/
protected final boolean exec() {
compute();
return true;
}
} | 0true
| src_main_java_jsr166y_RecursiveAction.java |
3,537 | public class CompletionFieldMapperTests extends ElasticsearchTestCase {
@Test
public void testDefaultConfiguration() throws IOException {
String mapping = jsonBuilder().startObject().startObject("type1")
.startObject("properties").startObject("completion")
.field("type", "completion")
.endObject().endObject()
.endObject().endObject().string();
DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
FieldMapper fieldMapper = defaultMapper.mappers().name("completion").mapper();
assertThat(fieldMapper, instanceOf(CompletionFieldMapper.class));
CompletionFieldMapper completionFieldMapper = (CompletionFieldMapper) fieldMapper;
assertThat(completionFieldMapper.isStoringPayloads(), is(false));
}
@Test
public void testThatSerializationIncludesAllElements() throws Exception {
String mapping = jsonBuilder().startObject().startObject("type1")
.startObject("properties").startObject("completion")
.field("type", "completion")
.field("index_analyzer", "simple")
.field("search_analyzer", "standard")
.field("payloads", true)
.field("preserve_separators", false)
.field("preserve_position_increments", true)
.field("max_input_length", 14)
.endObject().endObject()
.endObject().endObject().string();
DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
FieldMapper fieldMapper = defaultMapper.mappers().name("completion").mapper();
assertThat(fieldMapper, instanceOf(CompletionFieldMapper.class));
CompletionFieldMapper completionFieldMapper = (CompletionFieldMapper) fieldMapper;
XContentBuilder builder = jsonBuilder().startObject();
completionFieldMapper.toXContent(builder, null).endObject();
builder.close();
Map<String, Object> serializedMap = JsonXContent.jsonXContent.createParser(builder.bytes()).mapAndClose();
Map<String, Object> configMap = (Map<String, Object>) serializedMap.get("completion");
assertThat(configMap.get("index_analyzer").toString(), is("simple"));
assertThat(configMap.get("search_analyzer").toString(), is("standard"));
assertThat(Boolean.valueOf(configMap.get("payloads").toString()), is(true));
assertThat(Boolean.valueOf(configMap.get("preserve_separators").toString()), is(false));
assertThat(Boolean.valueOf(configMap.get("preserve_position_increments").toString()), is(true));
assertThat(Integer.valueOf(configMap.get("max_input_length").toString()), is(14));
}
@Test
public void testThatSerializationCombinesToOneAnalyzerFieldIfBothAreEqual() throws Exception {
String mapping = jsonBuilder().startObject().startObject("type1")
.startObject("properties").startObject("completion")
.field("type", "completion")
.field("index_analyzer", "simple")
.field("search_analyzer", "simple")
.endObject().endObject()
.endObject().endObject().string();
DocumentMapper defaultMapper = MapperTestUtils.newParser().parse(mapping);
FieldMapper fieldMapper = defaultMapper.mappers().name("completion").mapper();
assertThat(fieldMapper, instanceOf(CompletionFieldMapper.class));
CompletionFieldMapper completionFieldMapper = (CompletionFieldMapper) fieldMapper;
XContentBuilder builder = jsonBuilder().startObject();
completionFieldMapper.toXContent(builder, null).endObject();
builder.close();
Map<String, Object> serializedMap = JsonXContent.jsonXContent.createParser(builder.bytes()).mapAndClose();
Map<String, Object> configMap = (Map<String, Object>) serializedMap.get("completion");
assertThat(configMap.get("analyzer").toString(), is("simple"));
}
} | 0true
| src_test_java_org_elasticsearch_index_mapper_completion_CompletionFieldMapperTests.java |
1,652 | public class Explicit<T> {
private final T value;
private final boolean explicit;
/**
* Create a value with an indication if this was an explicit choice
* @param value a setting value
* @param explicit true if the value passed is a conscious decision, false if using some kind of default
*/
public Explicit(T value, boolean explicit) {
this.value = value;
this.explicit = explicit;
}
public T value() {
return this.value;
}
/**
*
* @return true if the value passed is a conscious decision, false if using some kind of default
*/
public boolean explicit() {
return this.explicit;
}
} | 0true
| src_main_java_org_elasticsearch_common_Explicit.java |
876 | threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
for (AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
QuerySearchResult queryResult = firstResults.get(entry.index);
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
if (node.id().equals(nodes.localNodeId())) {
FetchSearchRequest fetchSearchRequest = new FetchSearchRequest(request, queryResult.id(), entry.value);
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
}
}
}
}); | 0true
| src_main_java_org_elasticsearch_action_search_type_TransportSearchQueryThenFetchAction.java |
2,467 | static class TieBreakingPrioritizedRunnable extends PrioritizedRunnable {
final Runnable runnable;
final long insertionOrder;
TieBreakingPrioritizedRunnable(PrioritizedRunnable runnable, long insertionOrder) {
this(runnable, runnable.priority(), insertionOrder);
}
TieBreakingPrioritizedRunnable(Runnable runnable, Priority priority, long insertionOrder) {
super(priority);
this.runnable = runnable;
this.insertionOrder = insertionOrder;
}
@Override
public void run() {
runnable.run();
}
@Override
public int compareTo(PrioritizedRunnable pr) {
int res = super.compareTo(pr);
if (res != 0 || !(pr instanceof TieBreakingPrioritizedRunnable)) {
return res;
}
return insertionOrder < ((TieBreakingPrioritizedRunnable) pr).insertionOrder ? -1 : 1;
}
} | 0true
| src_main_java_org_elasticsearch_common_util_concurrent_PrioritizedEsThreadPoolExecutor.java |
497 | public interface Catalog extends Serializable {
Long getId();
void setId(Long id);
String getName();
void setName(String name);
List<Site> getSites();
void setSites(List<Site> sites);
} | 0true
| common_src_main_java_org_broadleafcommerce_common_site_domain_Catalog.java |
3,399 | public abstract class FieldsVisitor extends StoredFieldVisitor {
protected BytesReference source;
protected Uid uid;
protected Map<String, List<Object>> fieldsValues;
public void postProcess(MapperService mapperService) {
if (uid != null) {
DocumentMapper documentMapper = mapperService.documentMapper(uid.type());
if (documentMapper != null) {
// we can derive the exact type for the mapping
postProcess(documentMapper);
return;
}
}
// can't derive exact mapping type
for (Map.Entry<String, List<Object>> entry : fields().entrySet()) {
FieldMappers fieldMappers = mapperService.indexName(entry.getKey());
if (fieldMappers == null) {
continue;
}
List<Object> fieldValues = entry.getValue();
for (int i = 0; i < fieldValues.size(); i++) {
fieldValues.set(i, fieldMappers.mapper().valueForSearch(fieldValues.get(i)));
}
}
}
public void postProcess(DocumentMapper documentMapper) {
for (Map.Entry<String, List<Object>> entry : fields().entrySet()) {
FieldMapper<?> fieldMapper = documentMapper.mappers().indexName(entry.getKey()).mapper();
if (fieldMapper == null) {
continue;
}
List<Object> fieldValues = entry.getValue();
for (int i = 0; i < fieldValues.size(); i++) {
fieldValues.set(i, fieldMapper.valueForSearch(fieldValues.get(i)));
}
}
}
@Override
public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException {
if (SourceFieldMapper.NAME.equals(fieldInfo.name)) {
source = new BytesArray(value);
} else {
addValue(fieldInfo.name, new BytesRef(value));
}
}
@Override
public void stringField(FieldInfo fieldInfo, String value) throws IOException {
if (UidFieldMapper.NAME.equals(fieldInfo.name)) {
uid = Uid.createUid(value);
} else {
addValue(fieldInfo.name, value);
}
}
@Override
public void intField(FieldInfo fieldInfo, int value) throws IOException {
addValue(fieldInfo.name, value);
}
@Override
public void longField(FieldInfo fieldInfo, long value) throws IOException {
addValue(fieldInfo.name, value);
}
@Override
public void floatField(FieldInfo fieldInfo, float value) throws IOException {
addValue(fieldInfo.name, value);
}
@Override
public void doubleField(FieldInfo fieldInfo, double value) throws IOException {
addValue(fieldInfo.name, value);
}
public BytesReference source() {
return source;
}
public Uid uid() {
return uid;
}
public Map<String, List<Object>> fields() {
return fieldsValues != null
? fieldsValues
: ImmutableMap.<String, List<Object>>of();
}
public void reset() {
if (fieldsValues != null) fieldsValues.clear();
source = null;
uid = null;
}
private void addValue(String name, Object value) {
if (fieldsValues == null) {
fieldsValues = newHashMap();
}
List<Object> values = fieldsValues.get(name);
if (values == null) {
values = new ArrayList<Object>(2);
fieldsValues.put(name, values);
}
values.add(value);
}
} | 0true
| src_main_java_org_elasticsearch_index_fieldvisitor_FieldsVisitor.java |
1,699 | runnable = new Runnable() { public void run() { map.set(null, "value"); } }; | 0true
| hazelcast_src_test_java_com_hazelcast_map_BasicMapTest.java |
63 | public interface IntByIntToInt { int apply(int a, int b); } | 0true
| src_main_java_jsr166e_ConcurrentHashMapV8.java |
169 | {
@Override
public boolean matchesSafely( LogEntry.OnePhaseCommit onePC )
{
return onePC != null && onePC.getIdentifier() == identifier && onePC.getTxId() == txId;
}
@Override
public void describeTo( Description description )
{
description.appendText( String.format( "1PC[%d, txId=%d, <Any Date>],", identifier, txId ) );
}
}; | 0true
| community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_xaframework_LogMatchers.java |
2,285 | public class NoneRecycler<T> extends AbstractRecycler<T> {
public NoneRecycler(C<T> c) {
super(c);
}
@Override
public V<T> obtain(int sizing) {
return new NV<T>(c.newInstance(sizing));
}
@Override
public void close() {
}
public static class NV<T> implements Recycler.V<T> {
T value;
NV(T value) {
this.value = value;
}
@Override
public T v() {
return value;
}
@Override
public boolean isRecycled() {
return false;
}
@Override
public boolean release() {
if (value == null) {
throw new ElasticsearchIllegalStateException("recycler entry already released...");
}
value = null;
return true;
}
}
} | 0true
| src_main_java_org_elasticsearch_common_recycler_NoneRecycler.java |
676 | constructors[COLLECTION_ROLLBACK_BACKUP] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
public IdentifiedDataSerializable createNew(Integer arg) {
return new CollectionRollbackBackupOperation();
}
}; | 0true
| hazelcast_src_main_java_com_hazelcast_collection_CollectionDataSerializerHook.java |
2,139 | public class ObjectRecordFactory implements RecordFactory<Object> {
private final SerializationService serializationService;
private final boolean statisticsEnabled;
public ObjectRecordFactory(MapConfig config, SerializationService serializationService) {
this.serializationService = serializationService;
this.statisticsEnabled = config.isStatisticsEnabled();
}
@Override
public InMemoryFormat getStorageFormat() {
return InMemoryFormat.OBJECT;
}
@Override
public Record<Object> newRecord(Data key, Object value) {
Object v = value;
if (value instanceof Data) {
v = serializationService.toObject(value);
}
return statisticsEnabled ? new ObjectRecordWithStats(key, value) : new ObjectRecord(key, v);
}
@Override
public void setValue(Record<Object> record, Object value) {
Object v = value;
if (value instanceof Data) {
v = serializationService.toObject(value);
}
record.setValue(v);
}
@Override
public boolean isEquals(Object value1, Object value2) {
Object v1 = value1 instanceof Data ? serializationService.toObject(value1) : value1;
Object v2 = value2 instanceof Data ? serializationService.toObject(value2) : value2;
if (v1 == null && v2 == null) {
return true;
}
if (v1 == null) {
return false;
}
if (v2 == null) {
return false;
}
return v1.equals(v2);
}
} | 1no label
| hazelcast_src_main_java_com_hazelcast_map_record_ObjectRecordFactory.java |
369 | public class HBaseLockStoreTest extends LockKeyColumnValueStoreTest {
@BeforeClass
public static void startHBase() throws IOException {
HBaseStorageSetup.startHBase();
}
@AfterClass
public static void stopHBase() {
// Workaround for https://issues.apache.org/jira/browse/HBASE-10312
if (VersionInfo.getVersion().startsWith("0.96"))
HBaseStorageSetup.killIfRunning();
}
public KeyColumnValueStoreManager openStorageManager(int idx) throws BackendException {
return new HBaseStoreManager(HBaseStorageSetup.getHBaseConfiguration());
}
} | 0true
| titan-hbase-parent_titan-hbase-core_src_test_java_com_thinkaurelius_titan_diskstorage_hbase_HBaseLockStoreTest.java |
1,224 | public class WorkflowPaymentContext implements ProcessContext {
public final static long serialVersionUID = 1L;
private boolean stopEntireProcess = false;
private CombinedPaymentContextSeed seedData;
public void setSeedData(Object seedObject) {
this.seedData = (CombinedPaymentContextSeed) seedObject;
}
public boolean stopProcess() {
this.stopEntireProcess = true;
return stopEntireProcess;
}
public boolean isStopped() {
return stopEntireProcess;
}
public CombinedPaymentContextSeed getSeedData() {
return seedData;
}
} | 0true
| core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_payment_service_workflow_WorkflowPaymentContext.java |
149 | public abstract class XaCommand
{
private boolean isRecovered = false;
/**
* Default implementation of rollback that does nothing. This method is not
* to undo any work done by the {@link #execute} method. Commands in a
* {@link XaTransaction} are either all rolled back or all executed, they're
* not linked together as usual execute/rollback methods.
* <p>
* Since a command only is in memory nothing has been made persistent so
* rollback usually don't have to do anything. Sometimes however a command
* needs to acquire resources when created (since the application thinks it
* has done the work when the command is created). For example, if a command
* creates some entity that has a primary id we need to generate that id
* upon command creation. But if the command is rolled back we should
* release that id. This is the place to do just that.
*/
public void rollback()
{
};
/**
* Executes the command and makes it persistent. This method must succeed,
* any protests about this command not being able to execute should be done
* before execution of any command within the transaction.
*/
public abstract void execute();
/**
* When a command is added to a transaction (usually when it is created) it
* must be written to the {@link XaLogicalLog}. This method should write
* all the data that is needed to re-create the command (see
* {@link XaCommandFactory}).
* <p>
* Write the data to the <CODE>fileChannel</CODE>, you can use the
* <CODE>buffer</CODE> supplied or create your own buffer since its capacity
* is very small (137 bytes or something). Acccess to writing commands is
* synchronized, only one command will be written at a time so if you need
* to write larger data sets the commands can share the same buffer.
* <p>
* Don't throw an <CODE>IOException</CODE> to imply something is wrong
* with the command. An exception should only be thrown here if there is a
* real IO failure. If something is wrong with this command it should have
* been detected when it was created.
* <p>
* Don't <CODE>force</CODE>, <CODE>position</CODE> or anything except
* normal forward <CODE>write</CODE> with the file channel.
*
* @param fileChannel
* The channel to the {@link XaLogicalLog}
* @param buffer
* A small byte buffer that can be used to write command data
* @throws IOException
* In case of *real* IO failure
*/
public abstract void writeToFile( LogBuffer buffer ) throws IOException;
/**
* If this command is created by the command factory during a recovery scan
* of the logical log this method will be called to mark the command as a
* "recovered command".
*/
protected void setRecovered()
{
isRecovered = true;
}
/**
* Returns wether or not this is a "recovered command".
*
* @return <CODE>true</CODE> if command was created during a recovery else
* <CODE>false</CODE> is returned
*/
public boolean isRecovered()
{
return isRecovered;
}
} | 0true
| community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_XaCommand.java |
456 | private static final Map<EntryMetaData,Object> metaData = new EntryMetaData.Map() {{
put(EntryMetaData.TIMESTAMP,Long.valueOf(101));
put(EntryMetaData.TTL, 42);
put(EntryMetaData.VISIBILITY,"SOS/K5a-89 SOS/sdf3");
}}; | 0true
| titan-test_src_test_java_com_thinkaurelius_titan_diskstorage_keycolumnvalue_StaticArrayEntryTest.java |
3,880 | public class IndicesQueryParser implements QueryParser {
public static final String NAME = "indices";
@Nullable
private final ClusterService clusterService;
@Inject
public IndicesQueryParser(@Nullable ClusterService clusterService) {
this.clusterService = clusterService;
}
@Override
public String[] names() {
return new String[]{NAME};
}
@Override
public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
Query query = null;
Query noMatchQuery = Queries.newMatchAllQuery();
boolean queryFound = false;
boolean indicesFound = false;
boolean currentIndexMatchesIndices = false;
String queryName = null;
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("query".equals(currentFieldName)) {
//TODO We are able to decide whether to parse the query or not only if indices in the query appears first
queryFound = true;
if (indicesFound && !currentIndexMatchesIndices) {
parseContext.parser().skipChildren(); // skip the query object without parsing it
} else {
query = parseContext.parseInnerQuery();
}
} else if ("no_match_query".equals(currentFieldName)) {
if (indicesFound && currentIndexMatchesIndices) {
parseContext.parser().skipChildren(); // skip the query object without parsing it
} else {
noMatchQuery = parseContext.parseInnerQuery();
}
} else {
throw new QueryParsingException(parseContext.index(), "[indices] query does not support [" + currentFieldName + "]");
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("indices".equals(currentFieldName)) {
if (indicesFound) {
throw new QueryParsingException(parseContext.index(), "[indices] indices or index already specified");
}
indicesFound = true;
Collection<String> indices = new ArrayList<String>();
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
String value = parser.textOrNull();
if (value == null) {
throw new QueryParsingException(parseContext.index(), "[indices] no value specified for 'indices' entry");
}
indices.add(value);
}
currentIndexMatchesIndices = matchesIndices(parseContext.index().name(), indices.toArray(new String[indices.size()]));
} else {
throw new QueryParsingException(parseContext.index(), "[indices] query does not support [" + currentFieldName + "]");
}
} else if (token.isValue()) {
if ("index".equals(currentFieldName)) {
if (indicesFound) {
throw new QueryParsingException(parseContext.index(), "[indices] indices or index already specified");
}
indicesFound = true;
currentIndexMatchesIndices = matchesIndices(parseContext.index().name(), parser.text());
} else if ("no_match_query".equals(currentFieldName)) {
String type = parser.text();
if ("all".equals(type)) {
noMatchQuery = Queries.newMatchAllQuery();
} else if ("none".equals(type)) {
noMatchQuery = Queries.newMatchNoDocsQuery();
}
} else if ("_name".equals(currentFieldName)) {
queryName = parser.text();
} else {
throw new QueryParsingException(parseContext.index(), "[indices] query does not support [" + currentFieldName + "]");
}
}
}
if (!queryFound) {
throw new QueryParsingException(parseContext.index(), "[indices] requires 'query' element");
}
if (!indicesFound) {
throw new QueryParsingException(parseContext.index(), "[indices] requires 'indices' or 'index' element");
}
Query chosenQuery;
if (currentIndexMatchesIndices) {
chosenQuery = query;
} else {
chosenQuery = noMatchQuery;
}
if (queryName != null) {
parseContext.addNamedQuery(queryName, chosenQuery);
}
return chosenQuery;
}
protected boolean matchesIndices(String currentIndex, String... indices) {
final String[] concreteIndices = clusterService.state().metaData().concreteIndicesIgnoreMissing(indices);
for (String index : concreteIndices) {
if (Regex.simpleMatch(index, currentIndex)) {
return true;
}
}
return false;
}
} | 1no label
| src_main_java_org_elasticsearch_index_query_IndicesQueryParser.java |
1,481 | public class OSQLFunctionOutV extends OSQLFunctionMove {
public static final String NAME = "outV";
public OSQLFunctionOutV() {
super(NAME, 0, 1);
}
@Override
protected Object move(final OrientBaseGraph graph, final OIdentifiable iRecord, final String[] iLabels) {
return e2v(graph, iRecord, Direction.OUT, iLabels);
}
} | 1no label
| graphdb_src_main_java_com_orientechnologies_orient_graph_sql_functions_OSQLFunctionOutV.java |
2,469 | executor.execute(new Runnable() {
@Override
public void run() {
try {
block.await();
} catch (InterruptedException e) {
fail();
}
}
@Override
public String toString() {
return "the blocking";
}
}); | 0true
| src_test_java_org_elasticsearch_common_util_concurrent_PrioritizedExecutorsTests.java |
1,901 | public interface SpawnModules {
Iterable<? extends Module> spawnModules();
} | 0true
| src_main_java_org_elasticsearch_common_inject_SpawnModules.java |
8 | public class LabelAbbreviationsTest {
@Test
public void getAbbreviation() throws SecurityException, NoSuchFieldException, IllegalArgumentException, IllegalAccessException {
AbbreviationsImpl availableAbbreviations = new AbbreviationsImpl("value");
availableAbbreviations.addPhrase("Amps", Collections.singletonList("A"));
availableAbbreviations.addPhrase("BCA1", Collections.<String>emptyList());
availableAbbreviations.addPhrase("Ch1", Collections.<String>emptyList());
availableAbbreviations.addPhrase("Serial", Collections.<String>emptyList());
AbbreviationSettings aSettings = new AbbreviationSettings("fullLabel", availableAbbreviations, new LabelAbbreviations());
String abbreviatedLabel = aSettings.getAbbreviatedLabel();
Assert.assertEquals(abbreviatedLabel, "Amps BCA1 Ch1 Serial");
LabelAbbreviations available2 = aSettings.getAbbreviations();
Assert.assertEquals(available2.getAbbreviation("BCA1"), "BCA1");
Assert.assertEquals(available2.getAbbreviation("Amps"), "Amps");
// Change the state of the control panel via currentAbbreviations
LabelAbbreviations currentAbbreviations = new LabelAbbreviations();
currentAbbreviations.addAbbreviation("Amps", "A | a | Amp");
currentAbbreviations.addAbbreviation("BCA1", "B | bca1");
currentAbbreviations.addAbbreviation("CAT", "C");
currentAbbreviations.addAbbreviation("DOG", "D");
currentAbbreviations.addAbbreviation("Ace", "ace");
currentAbbreviations.addAbbreviation("Abb", "a");
currentAbbreviations.addAbbreviation("Rabbit", "R");
AbbreviationSettings a2Settings = new AbbreviationSettings("fullLabel", availableAbbreviations, currentAbbreviations);
LabelAbbreviations available2afterSelect = a2Settings.getAbbreviations();
Assert.assertEquals(available2afterSelect.getAbbreviation("BCA1"), "B | bca1");
Assert.assertEquals(available2afterSelect.getAbbreviation("Amps"), "A | a | Amp");
Map<String, String> map = getAbbreviations(currentAbbreviations);
Assert.assertEquals(map.size(), 7);
}
private Map<String, String> getAbbreviations(
LabelAbbreviations currentAbbreviations) throws SecurityException, NoSuchFieldException, IllegalArgumentException, IllegalAccessException {
Field f = currentAbbreviations.getClass().getDeclaredField("abbreviations"); //NoSuchFieldException
f.setAccessible(true);
@SuppressWarnings("unchecked")
Map<String, String> map = (HashMap<String,String>) f.get(currentAbbreviations); //IllegalAccessException
return map;
}
} | 0true
| tableViews_src_test_java_gov_nasa_arc_mct_abbreviation_impl_LabelAbbreviationsTest.java |
137 | @Test
public class ByteSerializerTest {
private static final int FIELD_SIZE = 1;
private static final Byte OBJECT = 1;
private OByteSerializer byteSerializer;
byte[] stream = new byte[FIELD_SIZE];
@BeforeClass
public void beforeClass() {
byteSerializer = new OByteSerializer();
}
public void testFieldSize() {
Assert.assertEquals(byteSerializer.getObjectSize(null), FIELD_SIZE);
}
public void testSerialize() {
byteSerializer.serialize(OBJECT, stream, 0);
Assert.assertEquals(byteSerializer.deserialize(stream, 0), OBJECT);
}
public void testSerializeNative() {
byteSerializer.serializeNative(OBJECT, stream, 0);
Assert.assertEquals(byteSerializer.deserializeNative(stream, 0), OBJECT);
}
public void testNativeDirectMemoryCompatibility() {
byteSerializer.serializeNative(OBJECT, stream, 0);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(stream);
try {
Assert.assertEquals(byteSerializer.deserializeFromDirectMemory(pointer, 0), OBJECT);
} finally {
pointer.free();
}
}
} | 0true
| commons_src_test_java_com_orientechnologies_common_serialization_types_ByteSerializerTest.java |
1,334 | @ClusterScope(scope = SUITE)
public class AckTests extends ElasticsearchIntegrationTest {
@Override
protected Settings nodeSettings(int nodeOrdinal) {
//to test that the acknowledgement mechanism is working we better disable the wait for publish
//otherwise the operation is most likely acknowledged even if it doesn't support ack
return ImmutableSettings.builder().put("discovery.zen.publish_timeout", 0).build();
}
@Test
public void testUpdateSettingsAcknowledgement() {
createIndex("test");
assertAcked(client().admin().indices().prepareUpdateSettings("test")
.setSettings(ImmutableSettings.builder().put("refresh_interval", 9999)));
for (Client client : clients()) {
String refreshInterval = getLocalClusterState(client).metaData().index("test").settings().get("index.refresh_interval");
assertThat(refreshInterval, equalTo("9999"));
}
}
@Test
public void testUpdateSettingsNoAcknowledgement() {
createIndex("test");
UpdateSettingsResponse updateSettingsResponse = client().admin().indices().prepareUpdateSettings("test").setTimeout("0s")
.setSettings(ImmutableSettings.builder().put("refresh_interval", 9999)).get();
assertThat(updateSettingsResponse.isAcknowledged(), equalTo(false));
}
@Test
public void testPutWarmerAcknowledgement() {
createIndex("test");
ensureGreen();
assertAcked(client().admin().indices().preparePutWarmer("custom_warmer")
.setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery())));
for (Client client : clients()) {
GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get();
assertThat(getWarmersResponse.warmers().size(), equalTo(1));
ObjectObjectCursor<String, ImmutableList<IndexWarmersMetaData.Entry>> entry = getWarmersResponse.warmers().iterator().next();
assertThat(entry.key, equalTo("test"));
assertThat(entry.value.size(), equalTo(1));
assertThat(entry.value.get(0).name(), equalTo("custom_warmer"));
}
}
@Test
public void testPutWarmerNoAcknowledgement() {
createIndex("test");
ensureGreen();
PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer").setTimeout("0s")
.setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
.get();
assertThat(putWarmerResponse.isAcknowledged(), equalTo(false));
}
@Test
public void testDeleteWarmerAcknowledgement() {
createIndex("test");
ensureGreen();
assertAcked(client().admin().indices().preparePutWarmer("custom_warmer")
.setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery())));
assertAcked(client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("custom_warmer"));
for (Client client : clients()) {
GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get();
assertThat(getWarmersResponse.warmers().size(), equalTo(0));
}
}
@Test
public void testDeleteWarmerNoAcknowledgement() {
createIndex("test");
ensureGreen();
PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer").setTimeout("0s")
.setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
.get();
assertThat(putWarmerResponse.isAcknowledged(), equalTo(false));
}
@Test
public void testDeleteMappingAcknowledgement() {
client().admin().indices().prepareCreate("test")
.addMapping("type1", "field1", "type=string").get();
ensureGreen();
client().prepareIndex("test", "type1").setSource("field1", "value1");
GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").addTypes("type1").get();
assertThat(getMappingsResponse.mappings().get("test").get("type1"), notNullValue());
assertAcked(client().admin().indices().prepareDeleteMapping("test").setType("type1"));
for (Client client : clients()) {
getMappingsResponse = client.admin().indices().prepareGetMappings("test").addTypes("type1").setLocal(true).get();
assertThat(getMappingsResponse.mappings().size(), equalTo(0));
}
}
@Test
public void testDeleteMappingNoAcknowledgement() {
client().admin().indices().prepareCreate("test")
.addMapping("type1", "field1", "type=string").get();
ensureGreen();
client().prepareIndex("test", "type1").setSource("field1", "value1");
DeleteMappingResponse deleteMappingResponse = client().admin().indices().prepareDeleteMapping("test").setTimeout("0s").setType("type1").get();
assertThat(deleteMappingResponse.isAcknowledged(), equalTo(false));
}
@Test
public void testClusterRerouteAcknowledgement() throws InterruptedException {
client().admin().indices().prepareCreate("test")
.setSettings(settingsBuilder()
.put("number_of_shards", atLeast(cluster().size()))
.put("number_of_replicas", 0)).get();
ensureGreen();
MoveAllocationCommand moveAllocationCommand = getAllocationCommand();
assertAcked(client().admin().cluster().prepareReroute().add(moveAllocationCommand));
for (Client client : clients()) {
ClusterState clusterState = getLocalClusterState(client);
for (MutableShardRouting mutableShardRouting : clusterState.routingNodes().routingNodeIter(moveAllocationCommand.fromNode())) {
//if the shard that we wanted to move is still on the same node, it must be relocating
if (mutableShardRouting.shardId().equals(moveAllocationCommand.shardId())) {
assertThat(mutableShardRouting.relocating(), equalTo(true));
}
}
boolean found = false;
for (MutableShardRouting mutableShardRouting : clusterState.routingNodes().routingNodeIter(moveAllocationCommand.toNode())) {
if (mutableShardRouting.shardId().equals(moveAllocationCommand.shardId())) {
assertThat(mutableShardRouting.state(), anyOf(equalTo(ShardRoutingState.INITIALIZING), equalTo(ShardRoutingState.STARTED)));
found = true;
break;
}
}
assertThat(found, equalTo(true));
}
//let's wait for the relocation to be completed, otherwise there can be issues with after test checks (mock directory wrapper etc.)
waitForRelocation();
}
@Test
public void testClusterRerouteNoAcknowledgement() throws InterruptedException {
client().admin().indices().prepareCreate("test")
.setSettings(settingsBuilder()
.put("number_of_shards", atLeast(cluster().size()))
.put("number_of_replicas", 0)).get();
ensureGreen();
MoveAllocationCommand moveAllocationCommand = getAllocationCommand();
ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setTimeout("0s").add(moveAllocationCommand).get();
assertThat(clusterRerouteResponse.isAcknowledged(), equalTo(false));
}
@Test
public void testClusterRerouteAcknowledgementDryRun() throws InterruptedException {
client().admin().indices().prepareCreate("test")
.setSettings(settingsBuilder()
.put("number_of_shards", atLeast(cluster().size()))
.put("number_of_replicas", 0)).get();
ensureGreen();
MoveAllocationCommand moveAllocationCommand = getAllocationCommand();
assertAcked(client().admin().cluster().prepareReroute().setDryRun(true).add(moveAllocationCommand));
//testing only on master with the latest cluster state as we didn't make any change thus we cannot guarantee that
//all nodes hold the same cluster state version. We only know there was no need to change anything, thus no need for ack on this update.
ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
boolean found = false;
for (MutableShardRouting mutableShardRouting : clusterStateResponse.getState().routingNodes().routingNodeIter(moveAllocationCommand.fromNode())) {
//the shard that we wanted to move is still on the same node, as we had dryRun flag
if (mutableShardRouting.shardId().equals(moveAllocationCommand.shardId())) {
assertThat(mutableShardRouting.started(), equalTo(true));
found = true;
break;
}
}
assertThat(found, equalTo(true));
for (MutableShardRouting mutableShardRouting : clusterStateResponse.getState().routingNodes().routingNodeIter(moveAllocationCommand.toNode())) {
if (mutableShardRouting.shardId().equals(moveAllocationCommand.shardId())) {
fail("shard [" + mutableShardRouting + "] shouldn't be on node [" + moveAllocationCommand.toString() + "]");
}
}
}
@Test
public void testClusterRerouteNoAcknowledgementDryRun() throws InterruptedException {
client().admin().indices().prepareCreate("test")
.setSettings(settingsBuilder()
.put("number_of_shards", atLeast(cluster().size()))
.put("number_of_replicas", 0)).get();
ensureGreen();
MoveAllocationCommand moveAllocationCommand = getAllocationCommand();
ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setTimeout("0s").setDryRun(true).add(moveAllocationCommand).get();
//acknowledged anyway as no changes were made
assertThat(clusterRerouteResponse.isAcknowledged(), equalTo(true));
}
private MoveAllocationCommand getAllocationCommand() {
String fromNodeId = null;
String toNodeId = null;
MutableShardRouting shardToBeMoved = null;
ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
for (RoutingNode routingNode : clusterStateResponse.getState().routingNodes()) {
if (routingNode.node().isDataNode()) {
if (fromNodeId == null && routingNode.numberOfOwningShards() > 0) {
fromNodeId = routingNode.nodeId();
shardToBeMoved = routingNode.get(randomInt(routingNode.size() - 1));
} else {
toNodeId = routingNode.nodeId();
}
if (toNodeId != null && fromNodeId != null) {
break;
}
}
}
assertNotNull(fromNodeId);
assertNotNull(toNodeId);
assertNotNull(shardToBeMoved);
logger.info("==> going to move shard [{}] from [{}] to [{}]", shardToBeMoved, fromNodeId, toNodeId);
return new MoveAllocationCommand(shardToBeMoved.shardId(), fromNodeId, toNodeId);
}
@Test
public void testIndicesAliasesAcknowledgement() {
createIndex("test");
//testing acknowledgement when trying to submit an existing alias too
//in that case it would not make any change, but we are sure about the cluster state
//as the previous operation was acknowledged
for (int i = 0; i < 2; i++) {
assertAcked(client().admin().indices().prepareAliases().addAlias("test", "alias"));
for (Client client : clients()) {
AliasMetaData aliasMetaData = getLocalClusterState(client).metaData().aliases().get("alias").get("test");
assertThat(aliasMetaData.alias(), equalTo("alias"));
}
}
}
@Test
public void testIndicesAliasesNoAcknowledgement() {
createIndex("test");
IndicesAliasesResponse indicesAliasesResponse = client().admin().indices().prepareAliases().addAlias("test", "alias").setTimeout("0s").get();
assertThat(indicesAliasesResponse.isAcknowledged(), equalTo(false));
}
public void testCloseIndexAcknowledgement() {
createIndex("test");
ensureGreen();
assertAcked(client().admin().indices().prepareClose("test"));
for (Client client : clients()) {
IndexMetaData indexMetaData = getLocalClusterState(client).metaData().indices().get("test");
assertThat(indexMetaData.getState(), equalTo(IndexMetaData.State.CLOSE));
}
}
@Test
public void testCloseIndexNoAcknowledgement() {
createIndex("test");
ensureGreen();
CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test").setTimeout("0s").get();
assertThat(closeIndexResponse.isAcknowledged(), equalTo(false));
}
@Test
public void testOpenIndexAcknowledgement() {
createIndex("test");
ensureGreen();
assertAcked(client().admin().indices().prepareClose("test"));
assertAcked(client().admin().indices().prepareOpen("test"));
for (Client client : clients()) {
IndexMetaData indexMetaData = getLocalClusterState(client).metaData().indices().get("test");
assertThat(indexMetaData.getState(), equalTo(IndexMetaData.State.OPEN));
}
}
@Test
public void testOpenIndexNoAcknowledgement() {
createIndex("test");
ensureGreen();
CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test").execute().actionGet();
assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
OpenIndexResponse openIndexResponse = client().admin().indices().prepareOpen("test").setTimeout("0s").get();
assertThat(openIndexResponse.isAcknowledged(), equalTo(false));
}
@Test
public void testPutMappingAcknowledgement() {
createIndex("test");
ensureGreen();
assertAcked(client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=string,index=not_analyzed"));
for (Client client : clients()) {
assertThat(getLocalClusterState(client).metaData().indices().get("test").mapping("test"), notNullValue());
}
}
@Test
public void testPutMappingNoAcknowledgement() {
createIndex("test");
ensureGreen();
PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=string,index=not_analyzed").setTimeout("0s").get();
assertThat(putMappingResponse.isAcknowledged(), equalTo(false));
}
@Test
public void testCreateIndexAcknowledgement() {
createIndex("test");
for (Client client : clients()) {
assertThat(getLocalClusterState(client).metaData().indices().containsKey("test"), equalTo(true));
}
//let's wait for green, otherwise there can be issues with after test checks (mock directory wrapper etc.)
//but we do want to check that the new index is on all nodes cluster state even before green
ensureGreen();
}
@Test
public void testCreateIndexNoAcknowledgement() {
CreateIndexResponse createIndexResponse = client().admin().indices().prepareCreate("test").setTimeout("0s").get();
assertThat(createIndexResponse.isAcknowledged(), equalTo(false));
//let's wait for green, otherwise there can be issues with after test checks (mock directory wrapper etc.)
ensureGreen();
}
private static ClusterState getLocalClusterState(Client client) {
return client.admin().cluster().prepareState().setLocal(true).get().getState();
}
} | 0true
| src_test_java_org_elasticsearch_cluster_ack_AckTests.java |
620 | public abstract class OIndexOneValue extends OIndexAbstract<OIdentifiable> {
public OIndexOneValue(final String type, String algorithm, OIndexEngine<OIdentifiable> engine, String valueContainerAlgorithm) {
super(type, algorithm, engine, valueContainerAlgorithm);
}
public OIdentifiable get(Object iKey) {
checkForRebuild();
iKey = getCollatingValue(iKey);
acquireSharedLock();
try {
return indexEngine.get(iKey);
} finally {
releaseSharedLock();
}
}
public long count(Object iKey) {
checkForRebuild();
iKey = getCollatingValue(iKey);
acquireSharedLock();
try {
return indexEngine.contains(iKey) ? 1 : 0;
} finally {
releaseSharedLock();
}
}
@Override
public void checkEntry(final OIdentifiable iRecord, Object key) {
checkForRebuild();
key = getCollatingValue(key);
// CHECK IF ALREADY EXIST
final OIdentifiable indexedRID = get(key);
if (indexedRID != null && !indexedRID.getIdentity().equals(iRecord.getIdentity())) {
// CHECK IF IN THE SAME TX THE ENTRY WAS DELETED
String storageType = getDatabase().getStorage().getType();
if (storageType.equals(OEngineMemory.NAME) || storageType.equals(OEngineLocal.NAME)) {
final OTransactionIndexChanges indexChanges = ODatabaseRecordThreadLocal.INSTANCE.get().getTransaction()
.getIndexChanges(getName());
if (indexChanges != null) {
final OTransactionIndexChangesPerKey keyChanges = indexChanges.getChangesPerKey(key);
if (keyChanges != null) {
for (OTransactionIndexChangesPerKey.OTransactionIndexEntry entry : keyChanges.entries) {
if (entry.operation == OTransactionIndexChanges.OPERATION.REMOVE)
// WAS DELETED, OK!
return;
}
}
}
}
OLogManager.instance().exception(
"Cannot index record %s: found duplicated key '%s' in index '%s' previously assigned to the record %s", null,
OIndexException.class, key, iRecord, indexedRID);
}
}
public OIndexOneValue create(final String name, final OIndexDefinition indexDefinition, final String clusterIndexName,
final Set<String> clustersToIndex, boolean rebuild, final OProgressListener progressListener) {
return (OIndexOneValue) super.create(name, indexDefinition, clusterIndexName, clustersToIndex, rebuild, progressListener,
OStreamSerializerRID.INSTANCE);
}
public void getValuesBetween(Object iRangeFrom, final boolean iFromInclusive, Object iRangeTo, final boolean iToInclusive,
final IndexValuesResultListener resultListener) {
checkForRebuild();
if (iRangeFrom.getClass() != iRangeTo.getClass())
throw new IllegalArgumentException("Range from-to parameters are of different types");
iRangeFrom = getCollatingValue(iRangeFrom);
iRangeTo = getCollatingValue(iRangeTo);
acquireSharedLock();
try {
indexEngine.getValuesBetween(iRangeFrom, iFromInclusive, iRangeTo, iToInclusive, null,
new OIndexEngine.ValuesResultListener() {
@Override
public boolean addResult(OIdentifiable identifiable) {
return resultListener.addResult(identifiable);
}
});
} finally {
releaseSharedLock();
}
}
public void getValuesMajor(Object iRangeFrom, final boolean isInclusive, final IndexValuesResultListener resultListener) {
checkForRebuild();
iRangeFrom = getCollatingValue(iRangeFrom);
acquireSharedLock();
try {
indexEngine.getValuesMajor(iRangeFrom, isInclusive, null, new OIndexEngine.ValuesResultListener() {
@Override
public boolean addResult(OIdentifiable identifiable) {
return resultListener.addResult(identifiable);
}
});
} finally {
releaseSharedLock();
}
}
public void getValuesMinor(Object iRangeTo, final boolean isInclusive, final IndexValuesResultListener resultListener) {
checkForRebuild();
iRangeTo = getCollatingValue(iRangeTo);
acquireSharedLock();
try {
indexEngine.getValuesMinor(iRangeTo, isInclusive, null, new OIndexEngine.ValuesResultListener() {
@Override
public boolean addResult(OIdentifiable identifiable) {
return resultListener.addResult(identifiable);
}
});
} finally {
releaseSharedLock();
}
}
public void getValues(final Collection<?> keys, final IndexValuesResultListener resultListener) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(keys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
for (Object key : sortedKeys) {
key = getCollatingValue(key);
final OIdentifiable val = indexEngine.get(key);
if (val != null) {
if (!resultListener.addResult(val))
return;
}
}
} finally {
releaseSharedLock();
}
}
public void getEntriesMajor(Object iRangeFrom, final boolean isInclusive, final IndexEntriesResultListener entriesResultListener) {
checkForRebuild();
iRangeFrom = getCollatingValue(iRangeFrom);
acquireSharedLock();
try {
indexEngine.getEntriesMajor(iRangeFrom, isInclusive, null, new OIndexEngine.EntriesResultListener() {
@Override
public boolean addResult(ODocument entry) {
return entriesResultListener.addResult(entry);
}
});
} finally {
releaseSharedLock();
}
}
public void getEntriesMinor(Object iRangeTo, final boolean isInclusive, final IndexEntriesResultListener entriesResultListener) {
checkForRebuild();
iRangeTo = getCollatingValue(iRangeTo);
acquireSharedLock();
try {
indexEngine.getEntriesMinor(iRangeTo, isInclusive, null, new OIndexEngine.EntriesResultListener() {
@Override
public boolean addResult(ODocument entry) {
return entriesResultListener.addResult(entry);
}
});
} finally {
releaseSharedLock();
}
}
public void getEntriesBetween(Object iRangeFrom, Object iRangeTo, final boolean inclusive,
final IndexEntriesResultListener entriesResultListener) {
checkForRebuild();
if (iRangeFrom.getClass() != iRangeTo.getClass())
throw new IllegalArgumentException("Range from-to parameters are of different types");
iRangeFrom = getCollatingValue(iRangeFrom);
iRangeTo = getCollatingValue(iRangeTo);
acquireSharedLock();
try {
indexEngine.getEntriesBetween(iRangeFrom, iRangeTo, inclusive, null, new OIndexEngine.EntriesResultListener() {
@Override
public boolean addResult(ODocument entry) {
return entriesResultListener.addResult(entry);
}
});
} finally {
releaseSharedLock();
}
}
public void getEntries(final Collection<?> keys, IndexEntriesResultListener resultListener) {
checkForRebuild();
final List<Object> sortedKeys = new ArrayList<Object>(keys);
Collections.sort(sortedKeys, ODefaultComparator.INSTANCE);
acquireSharedLock();
try {
for (Object key : sortedKeys) {
key = getCollatingValue(key);
final OIdentifiable val = indexEngine.get(key);
if (val != null) {
final ODocument document = new ODocument();
document.field("key", key);
document.field("rid", val.getIdentity());
document.unsetDirty();
if (!resultListener.addResult(document))
return;
}
}
} finally {
releaseSharedLock();
}
}
public long getSize() {
checkForRebuild();
acquireExclusiveLock();
try {
return indexEngine.size(null);
} finally {
releaseExclusiveLock();
}
}
public long count(Object iRangeFrom, final boolean iFromInclusive, Object iRangeTo, final boolean iToInclusive,
final int maxValuesToFetch) {
checkForRebuild();
if (iRangeFrom != null && iRangeTo != null && iRangeFrom.getClass() != iRangeTo.getClass())
throw new IllegalArgumentException("Range from-to parameters are of different types");
iRangeFrom = getCollatingValue(iRangeFrom);
iRangeTo = getCollatingValue(iRangeTo);
acquireSharedLock();
try {
return indexEngine.count(iRangeFrom, iFromInclusive, iRangeTo, iToInclusive, maxValuesToFetch, null);
} finally {
releaseSharedLock();
}
}
public long getKeySize() {
checkForRebuild();
acquireExclusiveLock();
try {
return indexEngine.size(null);
} finally {
releaseExclusiveLock();
}
}
public Iterator<OIdentifiable> valuesIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator<OIdentifiable>(this, indexEngine.valuesIterator());
} finally {
releaseSharedLock();
}
}
public Iterator<OIdentifiable> valuesInverseIterator() {
checkForRebuild();
acquireSharedLock();
try {
return new OSharedResourceIterator<OIdentifiable>(this, indexEngine.inverseValuesIterator());
} finally {
releaseSharedLock();
}
}
} | 1no label
| core_src_main_java_com_orientechnologies_orient_core_index_OIndexOneValue.java |
71 | public interface StaticAssetStorageDao {
StaticAssetStorage create();
StaticAssetStorage readStaticAssetStorageById(Long id);
public StaticAssetStorage readStaticAssetStorageByStaticAssetId(Long id);
StaticAssetStorage save(StaticAssetStorage assetStorage);
void delete(StaticAssetStorage assetStorage);
public Blob createBlob(MultipartFile uploadedFile) throws IOException;
} | 0true
| admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_file_dao_StaticAssetStorageDao.java |
41 | static final class ModuleDescriptorProposal extends CompletionProposal {
ModuleDescriptorProposal(int offset, String prefix, String moduleName) {
super(offset, prefix, MODULE,
"module " + moduleName,
"module " + moduleName + " \"1.0.0\" {}");
}
@Override
public Point getSelection(IDocument document) {
return new Point(offset - prefix.length() + text.indexOf('\"')+1, 5);
}
@Override
protected boolean qualifiedNameIsPath() {
return true;
}
} | 0true
| plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_complete_ModuleCompletions.java |
Subsets and Splits