Unnamed: 0
int64 0
6.45k
| func
stringlengths 37
161k
| target
class label 2
classes | project
stringlengths 33
167
|
---|---|---|---|
228 |
@Repository("blModuleConfigurationDao")
public class ModuleConfigurationDaoImpl implements ModuleConfigurationDao {
@PersistenceContext(unitName = "blPU")
protected EntityManager em;
@Resource(name = "blEntityConfiguration")
protected EntityConfiguration entityConfiguration;
protected Long currentDateResolution = 10000L;
protected Date cachedDate = SystemTime.asDate();
protected Date getCurrentDateAfterFactoringInDateResolution() {
Date returnDate = SystemTime.getCurrentDateWithinTimeResolution(cachedDate, currentDateResolution);
if (returnDate != cachedDate) {
if (SystemTime.shouldCacheDate()) {
cachedDate = returnDate;
}
}
return returnDate;
}
@Override
public ModuleConfiguration readById(Long id) {
return em.find(AbstractModuleConfiguration.class, id);
}
@Override
public ModuleConfiguration save(ModuleConfiguration config) {
if (config.getIsDefault()) {
Query batchUpdate = em.createNamedQuery("BC_BATCH_UPDATE_MODULE_CONFIG_DEFAULT");
batchUpdate.setParameter("configType", config.getModuleConfigurationType().getType());
batchUpdate.executeUpdate();
}
return em.merge(config);
}
@Override
public void delete(ModuleConfiguration config) {
((Status) config).setArchived('Y');
em.merge(config);
}
@SuppressWarnings("unchecked")
@Override
public List<ModuleConfiguration> readAllByType(ModuleConfigurationType type) {
Query query = em.createNamedQuery("BC_READ_MODULE_CONFIG_BY_TYPE");
query.setParameter("configType", type.getType());
query.setHint(QueryHints.CACHEABLE, true);
return query.getResultList();
}
@SuppressWarnings("unchecked")
@Override
public List<ModuleConfiguration> readActiveByType(ModuleConfigurationType type) {
Query query = em.createNamedQuery("BC_READ_ACTIVE_MODULE_CONFIG_BY_TYPE");
query.setParameter("configType", type.getType());
Date myDate = getCurrentDateAfterFactoringInDateResolution();
query.setParameter("currentDate", myDate);
query.setHint(QueryHints.CACHEABLE, true);
return query.getResultList();
}
@SuppressWarnings("unchecked")
@Override
public List<ModuleConfiguration> readByType(Class<? extends ModuleConfiguration> type) {
//TODO change this to a JPA criteria expression
Query query = em.createQuery("SELECT config FROM " + type.getName() + " config");
query.setHint(QueryHints.CACHEABLE, true);
return query.getResultList();
}
@Override
public Long getCurrentDateResolution() {
return currentDateResolution;
}
@Override
public void setCurrentDateResolution(Long currentDateResolution) {
this.currentDateResolution = currentDateResolution;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_config_dao_ModuleConfigurationDaoImpl.java
|
51 |
public class QueryException extends TitanException {
private static final long serialVersionUID = 1L;
/**
* @param msg Exception message
*/
public QueryException(String msg) {
super(msg);
}
/**
* @param msg Exception message
* @param cause Cause of the exception
*/
public QueryException(String msg, Throwable cause) {
super(msg, cause);
}
/**
* Constructs an exception with a generic message
*
* @param cause Cause of the exception
*/
public QueryException(Throwable cause) {
this("Exception in query.", cause);
}
}
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_core_QueryException.java
|
780 |
public class OMemoryWatchDog extends Thread {
private final Map<ListenerWrapper, Object> listeners = new WeakHashMap<ListenerWrapper, Object>(128);
private static long lastGC = 0;
private int alertTimes = 0;
protected final ReferenceQueue<Object> monitorQueue = new ReferenceQueue<Object>();
protected SoftReference<Object> monitorRef = new SoftReference<Object>(new Object(), monitorQueue);
/**
* we want properties of both IdentityHashMap and WeakHashMap
*/
private static class ListenerWrapper {
final Listener listener;
private ListenerWrapper(Listener listener) {
this.listener = listener;
}
@Override
public boolean equals(final Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
final ListenerWrapper that = (ListenerWrapper) o;
return listener == that.listener;
}
@Override
public int hashCode() {
return listener != null ? System.identityHashCode(listener) : 0;
}
}
public static interface Listener {
/**
* Execute a soft free of memory resources.
*
* @param iType
* OS or JVM
* @param iFreeMemory
* Current used memory
* @param iFreeMemoryPercentage
* Max memory
*/
public void memoryUsageLow(long iFreeMemory, long iFreeMemoryPercentage);
}
/**
* Create the memory watch dog with the default memory threshold.
*
* @param iThreshold
*/
public OMemoryWatchDog() {
super("OrientDB MemoryWatchDog");
setDaemon(true);
start();
}
public void run() {
Orient
.instance()
.getProfiler()
.registerHookValue("system.memory.alerts", "Number of alerts received by JVM to free memory resources",
METRIC_TYPE.COUNTER, new OProfilerHookValue() {
public Object getValue() {
return alertTimes;
}
});
Orient
.instance()
.getProfiler()
.registerHookValue("system.memory.lastGC", "Date of last System.gc() invocation", METRIC_TYPE.STAT,
new OProfilerHookValue() {
public Object getValue() {
return lastGC;
}
});
while (true) {
try {
// WAITS FOR THE GC FREE
monitorQueue.remove();
if (Thread.interrupted())
break;
// GC is freeing memory!
alertTimes++;
long maxMemory = Runtime.getRuntime().maxMemory();
long freeMemory = Runtime.getRuntime().freeMemory();
int freeMemoryPer = (int) (freeMemory * 100 / maxMemory);
if (OLogManager.instance().isDebugEnabled())
OLogManager.instance().debug(this, "Free memory is low %s of %s (%d%%), calling listeners to free memory...",
OFileUtils.getSizeAsString(freeMemory), OFileUtils.getSizeAsString(maxMemory), freeMemoryPer);
final long timer = Orient.instance().getProfiler().startChrono();
synchronized (listeners) {
for (ListenerWrapper listener : listeners.keySet()) {
try {
listener.listener.memoryUsageLow(freeMemory, freeMemoryPer);
} catch (Exception e) {
e.printStackTrace();
}
}
}
Orient.instance().getProfiler().stopChrono("OMemoryWatchDog.freeResources", "WatchDog free resources", timer);
} catch (InterruptedException e) {
break;
} catch (Exception e) {
} finally {
// RE-INSTANTIATE THE MONITOR REF
monitorRef = new SoftReference<Object>(new Object(), monitorQueue);
}
}
OLogManager.instance().debug(this, "[OMemoryWatchDog] shutdowning...");
synchronized (listeners) {
listeners.clear();
}
monitorRef = null;
}
public Listener addListener(final Listener listener) {
synchronized (listeners) {
listeners.put(new ListenerWrapper(listener), listener);
}
return listener;
}
public boolean removeListener(final Listener listener) {
synchronized (listeners) {
return listeners.remove(new ListenerWrapper(listener)) != null;
}
}
public List<Listener> getListeners() {
synchronized (listeners) {
List<Listener> listenerList = new ArrayList<Listener>();
for (ListenerWrapper wrapper : listeners.keySet()) {
listenerList.add(wrapper.listener);
}
return listenerList;
}
}
public static void freeMemoryForOptimization(final long iDelayTime) {
freeMemory(iDelayTime, OGlobalConfiguration.JVM_GC_DELAY_FOR_OPTIMIZE.getValueAsLong());
}
public static void freeMemoryForResourceCleanup(final long iDelayTime) {
freeMemory(iDelayTime, 0);
}
private static void freeMemory(final long iDelayTime, final long minimalTimeAmount) {
final long dateLastGC = System.currentTimeMillis();
if (dateLastGC - lastGC > minimalTimeAmount * 1000) {
lastGC = dateLastGC;
System.gc();
if (iDelayTime > 0)
try {
Thread.sleep(iDelayTime);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_memory_OMemoryWatchDog.java
|
367 |
public static class TestReducerFactory
implements ReducerFactory<String, Integer, Integer> {
public TestReducerFactory() {
}
@Override
public Reducer<String, Integer, Integer> newReducer(String key) {
return new TestReducer();
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_mapreduce_ClientMapReduceTest.java
|
267 |
public class OCommandGenericIterator implements Iterator<Object>, Iterable<Object> {
protected OCommandExecutor command;
protected Iterator<Object> resultSet;
protected Object resultOne;
protected boolean executed = false;
public OCommandGenericIterator(OCommandExecutor command) {
this.command = command;
}
public boolean hasNext() {
checkForExecution();
if (resultOne != null)
return true;
else if (resultSet != null)
return resultSet.hasNext();
return false;
}
public Object next() {
checkForExecution();
if (resultOne != null)
return resultOne;
else if (resultSet != null)
return resultSet.next();
return null;
}
public Iterator<Object> iterator() {
return this;
}
public void remove() {
throw new UnsupportedOperationException("remove()");
}
@SuppressWarnings("unchecked")
protected void checkForExecution() {
if (!executed) {
executed = true;
final Object result = command.execute(null);
if (result instanceof Collection)
resultSet = ((Collection<Object>) result).iterator();
else if (result instanceof Object)
resultOne = (Object) result;
}
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_command_OCommandGenericIterator.java
|
362 |
public class PropertyFilter extends Filter {
protected boolean isJoinTableFilter = false;
protected String propertyName;
public Boolean getJoinTableFilter() {
return isJoinTableFilter;
}
public void setJoinTableFilter(Boolean joinTableFilter) {
isJoinTableFilter = joinTableFilter;
}
public String getPropertyName() {
return propertyName;
}
public void setPropertyName(String propertyName) {
this.propertyName = propertyName;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_filter_PropertyFilter.java
|
85 |
@Entity
@Inheritance(strategy = InheritanceType.JOINED)
@Table(name = "BLC_STATIC_ASSET_STRG")
public class StaticAssetStorageImpl implements StaticAssetStorage {
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(generator = "StaticAssetStorageId")
@GenericGenerator(
name="StaticAssetStorageId",
strategy="org.broadleafcommerce.common.persistence.IdOverrideTableGenerator",
parameters = {
@Parameter(name="segment_value", value="StaticAssetStorageImpl"),
@Parameter(name="entity_name", value="org.broadleafcommerce.cms.file.domain.StaticAssetStorageImpl")
}
)
@Column(name = "STATIC_ASSET_STRG_ID")
protected Long id;
@Column(name ="STATIC_ASSET_ID", nullable = false)
@Index(name="STATIC_ASSET_ID_INDEX", columnNames={"STATIC_ASSET_ID"})
protected Long staticAssetId;
@Column (name = "FILE_DATA", length = Integer.MAX_VALUE - 1)
@Lob
protected Blob fileData;
@Override
public Long getId() {
return id;
}
@Override
public void setId(Long id) {
this.id = id;
}
@Override
public Blob getFileData() {
return fileData;
}
@Override
public void setFileData(Blob fileData) {
this.fileData = fileData;
}
@Override
public Long getStaticAssetId() {
return staticAssetId;
}
@Override
public void setStaticAssetId(Long staticAssetId) {
this.staticAssetId = staticAssetId;
}
}
| 0true
|
admin_broadleaf-contentmanagement-module_src_main_java_org_broadleafcommerce_cms_file_domain_StaticAssetStorageImpl.java
|
537 |
public class ORemoteFetchContext implements OFetchContext {
public void onBeforeStandardField(Object iFieldValue, String iFieldName, Object iUserObject) {
}
public void onAfterStandardField(Object iFieldValue, String iFieldName, Object iUserObject) {
}
public void onBeforeMap(ORecordSchemaAware<?> iRootRecord, String iFieldName, final Object iUserObject) throws OFetchException {
}
public void onBeforeFetch(ORecordSchemaAware<?> iRootRecord) throws OFetchException {
}
public void onBeforeArray(ORecordSchemaAware<?> iRootRecord, String iFieldName, Object iUserObject, OIdentifiable[] iArray)
throws OFetchException {
}
public void onAfterArray(ORecordSchemaAware<?> iRootRecord, String iFieldName, Object iUserObject) throws OFetchException {
}
public void onBeforeDocument(ORecordSchemaAware<?> iRecord, final ORecordSchemaAware<?> iDocument, String iFieldName,
final Object iUserObject) throws OFetchException {
}
public void onBeforeCollection(ORecordSchemaAware<?> iRootRecord, String iFieldName, final Object iUserObject,
final Collection<?> iCollection) throws OFetchException {
}
public void onAfterMap(ORecordSchemaAware<?> iRootRecord, String iFieldName, final Object iUserObject) throws OFetchException {
}
public void onAfterFetch(ORecordSchemaAware<?> iRootRecord) throws OFetchException {
}
public void onAfterDocument(ORecordSchemaAware<?> iRootRecord, final ORecordSchemaAware<?> iDocument, String iFieldName,
final Object iUserObject) throws OFetchException {
}
public void onAfterCollection(ORecordSchemaAware<?> iRootRecord, String iFieldName, final Object iUserObject)
throws OFetchException {
}
public boolean fetchEmbeddedDocuments() {
return false;
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_fetch_remote_ORemoteFetchContext.java
|
119 |
static final class WorkQueue {
/**
* Capacity of work-stealing queue array upon initialization.
* Must be a power of two; at least 4, but should be larger to
* reduce or eliminate cacheline sharing among queues.
* Currently, it is much larger, as a partial workaround for
* the fact that JVMs often place arrays in locations that
* share GC bookkeeping (especially cardmarks) such that
* per-write accesses encounter serious memory contention.
*/
static final int INITIAL_QUEUE_CAPACITY = 1 << 13;
/**
* Maximum size for queue arrays. Must be a power of two less
* than or equal to 1 << (31 - width of array entry) to ensure
* lack of wraparound of index calculations, but defined to a
* value a bit less than this to help users trap runaway
* programs before saturating systems.
*/
static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M
// Heuristic padding to ameliorate unfortunate memory placements
volatile long pad00, pad01, pad02, pad03, pad04, pad05, pad06;
volatile int eventCount; // encoded inactivation count; < 0 if inactive
int nextWait; // encoded record of next event waiter
int nsteals; // number of steals
int hint; // steal index hint
short poolIndex; // index of this queue in pool
final short mode; // 0: lifo, > 0: fifo, < 0: shared
volatile int qlock; // 1: locked, -1: terminate; else 0
volatile int base; // index of next slot for poll
int top; // index of next slot for push
ForkJoinTask<?>[] array; // the elements (initially unallocated)
final ForkJoinPool pool; // the containing pool (may be null)
final ForkJoinWorkerThread owner; // owning thread or null if shared
volatile Thread parker; // == owner during call to park; else null
volatile ForkJoinTask<?> currentJoin; // task being joined in awaitJoin
ForkJoinTask<?> currentSteal; // current non-local task being executed
volatile Object pad10, pad11, pad12, pad13, pad14, pad15, pad16, pad17;
volatile Object pad18, pad19, pad1a, pad1b, pad1c, pad1d;
WorkQueue(ForkJoinPool pool, ForkJoinWorkerThread owner, int mode,
int seed) {
this.pool = pool;
this.owner = owner;
this.mode = (short)mode;
this.hint = seed; // store initial seed for runWorker
// Place indices in the center of array (that is not yet allocated)
base = top = INITIAL_QUEUE_CAPACITY >>> 1;
}
/**
* Returns the approximate number of tasks in the queue.
*/
final int queueSize() {
int n = base - top; // non-owner callers must read base first
return (n >= 0) ? 0 : -n; // ignore transient negative
}
/**
* Provides a more accurate estimate of whether this queue has
* any tasks than does queueSize, by checking whether a
* near-empty queue has at least one unclaimed task.
*/
final boolean isEmpty() {
ForkJoinTask<?>[] a; int m, s;
int n = base - (s = top);
return (n >= 0 ||
(n == -1 &&
((a = array) == null ||
(m = a.length - 1) < 0 ||
U.getObject
(a, (long)((m & (s - 1)) << ASHIFT) + ABASE) == null)));
}
/**
* Pushes a task. Call only by owner in unshared queues. (The
* shared-queue version is embedded in method externalPush.)
*
* @param task the task. Caller must ensure non-null.
* @throws RejectedExecutionException if array cannot be resized
*/
final void push(ForkJoinTask<?> task) {
ForkJoinTask<?>[] a; ForkJoinPool p;
int s = top, n;
if ((a = array) != null) { // ignore if queue removed
int m = a.length - 1;
U.putOrderedObject(a, ((m & s) << ASHIFT) + ABASE, task);
if ((n = (top = s + 1) - base) <= 2)
(p = pool).signalWork(p.workQueues, this);
else if (n >= m)
growArray();
}
}
/**
* Initializes or doubles the capacity of array. Call either
* by owner or with lock held -- it is OK for base, but not
* top, to move while resizings are in progress.
*/
final ForkJoinTask<?>[] growArray() {
ForkJoinTask<?>[] oldA = array;
int size = oldA != null ? oldA.length << 1 : INITIAL_QUEUE_CAPACITY;
if (size > MAXIMUM_QUEUE_CAPACITY)
throw new RejectedExecutionException("Queue capacity exceeded");
int oldMask, t, b;
ForkJoinTask<?>[] a = array = new ForkJoinTask<?>[size];
if (oldA != null && (oldMask = oldA.length - 1) >= 0 &&
(t = top) - (b = base) > 0) {
int mask = size - 1;
do {
ForkJoinTask<?> x;
int oldj = ((b & oldMask) << ASHIFT) + ABASE;
int j = ((b & mask) << ASHIFT) + ABASE;
x = (ForkJoinTask<?>)U.getObjectVolatile(oldA, oldj);
if (x != null &&
U.compareAndSwapObject(oldA, oldj, x, null))
U.putObjectVolatile(a, j, x);
} while (++b != t);
}
return a;
}
/**
* Takes next task, if one exists, in LIFO order. Call only
* by owner in unshared queues.
*/
final ForkJoinTask<?> pop() {
ForkJoinTask<?>[] a; ForkJoinTask<?> t; int m;
if ((a = array) != null && (m = a.length - 1) >= 0) {
for (int s; (s = top - 1) - base >= 0;) {
long j = ((m & s) << ASHIFT) + ABASE;
if ((t = (ForkJoinTask<?>)U.getObject(a, j)) == null)
break;
if (U.compareAndSwapObject(a, j, t, null)) {
top = s;
return t;
}
}
}
return null;
}
/**
* Takes a task in FIFO order if b is base of queue and a task
* can be claimed without contention. Specialized versions
* appear in ForkJoinPool methods scan and tryHelpStealer.
*/
final ForkJoinTask<?> pollAt(int b) {
ForkJoinTask<?> t; ForkJoinTask<?>[] a;
if ((a = array) != null) {
int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) != null &&
base == b && U.compareAndSwapObject(a, j, t, null)) {
U.putOrderedInt(this, QBASE, b + 1);
return t;
}
}
return null;
}
/**
* Takes next task, if one exists, in FIFO order.
*/
final ForkJoinTask<?> poll() {
ForkJoinTask<?>[] a; int b; ForkJoinTask<?> t;
while ((b = base) - top < 0 && (a = array) != null) {
int j = (((a.length - 1) & b) << ASHIFT) + ABASE;
t = (ForkJoinTask<?>)U.getObjectVolatile(a, j);
if (t != null) {
if (U.compareAndSwapObject(a, j, t, null)) {
U.putOrderedInt(this, QBASE, b + 1);
return t;
}
}
else if (base == b) {
if (b + 1 == top)
break;
Thread.yield(); // wait for lagging update (very rare)
}
}
return null;
}
/**
* Takes next task, if one exists, in order specified by mode.
*/
final ForkJoinTask<?> nextLocalTask() {
return mode == 0 ? pop() : poll();
}
/**
* Returns next task, if one exists, in order specified by mode.
*/
final ForkJoinTask<?> peek() {
ForkJoinTask<?>[] a = array; int m;
if (a == null || (m = a.length - 1) < 0)
return null;
int i = mode == 0 ? top - 1 : base;
int j = ((i & m) << ASHIFT) + ABASE;
return (ForkJoinTask<?>)U.getObjectVolatile(a, j);
}
/**
* Pops the given task only if it is at the current top.
* (A shared version is available only via FJP.tryExternalUnpush)
*/
final boolean tryUnpush(ForkJoinTask<?> t) {
ForkJoinTask<?>[] a; int s;
if ((a = array) != null && (s = top) != base &&
U.compareAndSwapObject
(a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) {
top = s;
return true;
}
return false;
}
/**
* Removes and cancels all known tasks, ignoring any exceptions.
*/
final void cancelAll() {
ForkJoinTask.cancelIgnoringExceptions(currentJoin);
ForkJoinTask.cancelIgnoringExceptions(currentSteal);
for (ForkJoinTask<?> t; (t = poll()) != null; )
ForkJoinTask.cancelIgnoringExceptions(t);
}
// Specialized execution methods
/**
* Polls and runs tasks until empty.
*/
final void pollAndExecAll() {
for (ForkJoinTask<?> t; (t = poll()) != null;)
t.doExec();
}
/**
* Executes a top-level task and any local tasks remaining
* after execution.
*/
final void runTask(ForkJoinTask<?> task) {
if ((currentSteal = task) != null) {
task.doExec();
ForkJoinTask<?>[] a = array;
int md = mode;
++nsteals;
currentSteal = null;
if (md != 0)
pollAndExecAll();
else if (a != null) {
int s, m = a.length - 1;
while ((s = top - 1) - base >= 0) {
long i = ((m & s) << ASHIFT) + ABASE;
ForkJoinTask<?> t = (ForkJoinTask<?>)U.getObject(a, i);
if (t == null)
break;
if (U.compareAndSwapObject(a, i, t, null)) {
top = s;
t.doExec();
}
}
}
}
}
/**
* If present, removes from queue and executes the given task,
* or any other cancelled task. Returns (true) on any CAS
* or consistency check failure so caller can retry.
*
* @return false if no progress can be made, else true
*/
final boolean tryRemoveAndExec(ForkJoinTask<?> task) {
boolean stat;
ForkJoinTask<?>[] a; int m, s, b, n;
if (task != null && (a = array) != null && (m = a.length - 1) >= 0 &&
(n = (s = top) - (b = base)) > 0) {
boolean removed = false, empty = true;
stat = true;
for (ForkJoinTask<?> t;;) { // traverse from s to b
long j = ((--s & m) << ASHIFT) + ABASE;
t = (ForkJoinTask<?>)U.getObject(a, j);
if (t == null) // inconsistent length
break;
else if (t == task) {
if (s + 1 == top) { // pop
if (!U.compareAndSwapObject(a, j, task, null))
break;
top = s;
removed = true;
}
else if (base == b) // replace with proxy
removed = U.compareAndSwapObject(a, j, task,
new EmptyTask());
break;
}
else if (t.status >= 0)
empty = false;
else if (s + 1 == top) { // pop and throw away
if (U.compareAndSwapObject(a, j, t, null))
top = s;
break;
}
if (--n == 0) {
if (!empty && base == b)
stat = false;
break;
}
}
if (removed)
task.doExec();
}
else
stat = false;
return stat;
}
/**
* Tries to poll for and execute the given task or any other
* task in its CountedCompleter computation.
*/
final boolean pollAndExecCC(CountedCompleter<?> root) {
ForkJoinTask<?>[] a; int b; Object o; CountedCompleter<?> t, r;
if ((b = base) - top < 0 && (a = array) != null) {
long j = (((a.length - 1) & b) << ASHIFT) + ABASE;
if ((o = U.getObjectVolatile(a, j)) == null)
return true; // retry
if (o instanceof CountedCompleter) {
for (t = (CountedCompleter<?>)o, r = t;;) {
if (r == root) {
if (base == b &&
U.compareAndSwapObject(a, j, t, null)) {
U.putOrderedInt(this, QBASE, b + 1);
t.doExec();
}
return true;
}
else if ((r = r.completer) == null)
break; // not part of root computation
}
}
}
return false;
}
/**
* Tries to pop and execute the given task or any other task
* in its CountedCompleter computation.
*/
final boolean externalPopAndExecCC(CountedCompleter<?> root) {
ForkJoinTask<?>[] a; int s; Object o; CountedCompleter<?> t, r;
if (base - (s = top) < 0 && (a = array) != null) {
long j = (((a.length - 1) & (s - 1)) << ASHIFT) + ABASE;
if ((o = U.getObject(a, j)) instanceof CountedCompleter) {
for (t = (CountedCompleter<?>)o, r = t;;) {
if (r == root) {
if (U.compareAndSwapInt(this, QLOCK, 0, 1)) {
if (top == s && array == a &&
U.compareAndSwapObject(a, j, t, null)) {
top = s - 1;
qlock = 0;
t.doExec();
}
else
qlock = 0;
}
return true;
}
else if ((r = r.completer) == null)
break;
}
}
}
return false;
}
/**
* Internal version
*/
final boolean internalPopAndExecCC(CountedCompleter<?> root) {
ForkJoinTask<?>[] a; int s; Object o; CountedCompleter<?> t, r;
if (base - (s = top) < 0 && (a = array) != null) {
long j = (((a.length - 1) & (s - 1)) << ASHIFT) + ABASE;
if ((o = U.getObject(a, j)) instanceof CountedCompleter) {
for (t = (CountedCompleter<?>)o, r = t;;) {
if (r == root) {
if (U.compareAndSwapObject(a, j, t, null)) {
top = s - 1;
t.doExec();
}
return true;
}
else if ((r = r.completer) == null)
break;
}
}
}
return false;
}
/**
* Returns true if owned and not known to be blocked.
*/
final boolean isApparentlyUnblocked() {
Thread wt; Thread.State s;
return (eventCount >= 0 &&
(wt = owner) != null &&
(s = wt.getState()) != Thread.State.BLOCKED &&
s != Thread.State.WAITING &&
s != Thread.State.TIMED_WAITING);
}
// Unsafe mechanics
private static final sun.misc.Unsafe U;
private static final long QBASE;
private static final long QLOCK;
private static final int ABASE;
private static final int ASHIFT;
static {
try {
U = getUnsafe();
Class<?> k = WorkQueue.class;
Class<?> ak = ForkJoinTask[].class;
QBASE = U.objectFieldOffset
(k.getDeclaredField("base"));
QLOCK = U.objectFieldOffset
(k.getDeclaredField("qlock"));
ABASE = U.arrayBaseOffset(ak);
int scale = U.arrayIndexScale(ak);
if ((scale & (scale - 1)) != 0)
throw new Error("data type scale not a power of two");
ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
} catch (Exception e) {
throw new Error(e);
}
}
}
| 0true
|
src_main_java_jsr166e_ForkJoinPool.java
|
94 |
@SuppressWarnings("serial")
static final class ReduceValuesTask<K,V>
extends BulkTask<K,V,V> {
final BiFun<? super V, ? super V, ? extends V> reducer;
V result;
ReduceValuesTask<K,V> rights, nextRight;
ReduceValuesTask
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
ReduceValuesTask<K,V> nextRight,
BiFun<? super V, ? super V, ? extends V> reducer) {
super(p, b, i, f, t); this.nextRight = nextRight;
this.reducer = reducer;
}
public final V getRawResult() { return result; }
public final void compute() {
final BiFun<? super V, ? super V, ? extends V> reducer;
if ((reducer = this.reducer) != null) {
for (int i = baseIndex, f, h; batch > 0 &&
(h = ((f = baseLimit) + i) >>> 1) > i;) {
addToPendingCount(1);
(rights = new ReduceValuesTask<K,V>
(this, batch >>>= 1, baseLimit = h, f, tab,
rights, reducer)).fork();
}
V r = null;
for (Node<K,V> p; (p = advance()) != null; ) {
V v = p.val;
r = (r == null) ? v : reducer.apply(r, v);
}
result = r;
CountedCompleter<?> c;
for (c = firstComplete(); c != null; c = c.nextComplete()) {
@SuppressWarnings("unchecked") ReduceValuesTask<K,V>
t = (ReduceValuesTask<K,V>)c,
s = t.rights;
while (s != null) {
V tr, sr;
if ((sr = s.result) != null)
t.result = (((tr = t.result) == null) ? sr :
reducer.apply(tr, sr));
s = t.rights = s.nextRight;
}
}
}
}
}
| 0true
|
src_main_java_jsr166e_ConcurrentHashMapV8.java
|
28 |
static final class ThenAccept<T> extends Completion {
final CompletableFuture<? extends T> src;
final Action<? super T> fn;
final CompletableFuture<Void> dst;
final Executor executor;
ThenAccept(CompletableFuture<? extends T> src,
Action<? super T> fn,
CompletableFuture<Void> dst,
Executor executor) {
this.src = src; this.fn = fn; this.dst = dst;
this.executor = executor;
}
public final void run() {
final CompletableFuture<? extends T> a;
final Action<? super T> fn;
final CompletableFuture<Void> dst;
Object r; T t; Throwable ex;
if ((dst = this.dst) != null &&
(fn = this.fn) != null &&
(a = this.src) != null &&
(r = a.result) != null &&
compareAndSet(0, 1)) {
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
Executor e = executor;
if (ex == null) {
try {
if (e != null)
e.execute(new AsyncAccept<T>(t, fn, dst));
else
fn.accept(t);
} catch (Throwable rex) {
ex = rex;
}
}
if (e == null || ex != null)
dst.internalComplete(null, ex);
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
| 0true
|
src_main_java_jsr166e_CompletableFuture.java
|
1,391 |
Collection<IndexWarmersMetaData.Entry> filteredWarmers = Collections2.filter(indexWarmersMetaData.entries(), new Predicate<IndexWarmersMetaData.Entry>() {
@Override
public boolean apply(IndexWarmersMetaData.Entry warmer) {
if (warmers.length != 0 && types.length != 0) {
return Regex.simpleMatch(warmers, warmer.name()) && Regex.simpleMatch(types, warmer.types());
} else if (warmers.length != 0) {
return Regex.simpleMatch(warmers, warmer.name());
} else if (types.length != 0) {
return Regex.simpleMatch(types, warmer.types());
} else {
return true;
}
}
});
| 0true
|
src_main_java_org_elasticsearch_cluster_metadata_MetaData.java
|
3,122 |
public class InternalEngine extends AbstractIndexShardComponent implements Engine {
private volatile ByteSizeValue indexingBufferSize;
private volatile int indexConcurrency;
private volatile boolean compoundOnFlush = true;
private long gcDeletesInMillis;
private volatile boolean enableGcDeletes = true;
private volatile String codecName;
private final ThreadPool threadPool;
private final ShardIndexingService indexingService;
private final IndexSettingsService indexSettingsService;
@Nullable
private final InternalIndicesWarmer warmer;
private final Store store;
private final SnapshotDeletionPolicy deletionPolicy;
private final Translog translog;
private final MergePolicyProvider mergePolicyProvider;
private final MergeSchedulerProvider mergeScheduler;
private final AnalysisService analysisService;
private final SimilarityService similarityService;
private final CodecService codecService;
private final ReadWriteLock rwl = new ReentrantReadWriteLock();
private volatile IndexWriter indexWriter;
private final SearcherFactory searcherFactory = new SearchFactory();
private volatile SearcherManager searcherManager;
private volatile boolean closed = false;
// flag indicating if a dirty operation has occurred since the last refresh
private volatile boolean dirty = false;
private volatile boolean possibleMergeNeeded = false;
private final AtomicBoolean optimizeMutex = new AtomicBoolean();
// we use flushNeeded here, since if there are no changes, then the commit won't write
// will not really happen, and then the commitUserData and the new translog will not be reflected
private volatile boolean flushNeeded = false;
private final AtomicInteger flushing = new AtomicInteger();
private final Lock flushLock = new ReentrantLock();
private final RecoveryCounter onGoingRecoveries = new RecoveryCounter();
// A uid (in the form of BytesRef) to the version map
// we use the hashed variant since we iterate over it and check removal and additions on existing keys
private final ConcurrentMap<HashedBytesRef, VersionValue> versionMap;
private final Object[] dirtyLocks;
private final Object refreshMutex = new Object();
private final ApplySettings applySettings = new ApplySettings();
private volatile boolean failOnMergeFailure;
private Throwable failedEngine = null;
private final Object failedEngineMutex = new Object();
private final CopyOnWriteArrayList<FailedEngineListener> failedEngineListeners = new CopyOnWriteArrayList<FailedEngineListener>();
private final AtomicLong translogIdGenerator = new AtomicLong();
private SegmentInfos lastCommittedSegmentInfos;
@Inject
public InternalEngine(ShardId shardId, @IndexSettings Settings indexSettings, ThreadPool threadPool,
IndexSettingsService indexSettingsService, ShardIndexingService indexingService, @Nullable IndicesWarmer warmer,
Store store, SnapshotDeletionPolicy deletionPolicy, Translog translog,
MergePolicyProvider mergePolicyProvider, MergeSchedulerProvider mergeScheduler,
AnalysisService analysisService, SimilarityService similarityService, CodecService codecService) throws EngineException {
super(shardId, indexSettings);
Preconditions.checkNotNull(store, "Store must be provided to the engine");
Preconditions.checkNotNull(deletionPolicy, "Snapshot deletion policy must be provided to the engine");
Preconditions.checkNotNull(translog, "Translog must be provided to the engine");
this.gcDeletesInMillis = indexSettings.getAsTime(INDEX_GC_DELETES, TimeValue.timeValueSeconds(60)).millis();
this.indexingBufferSize = componentSettings.getAsBytesSize("index_buffer_size", new ByteSizeValue(64, ByteSizeUnit.MB)); // not really important, as it is set by the IndexingMemory manager
this.codecName = indexSettings.get(INDEX_CODEC, "default");
this.threadPool = threadPool;
this.indexSettingsService = indexSettingsService;
this.indexingService = indexingService;
this.warmer = (InternalIndicesWarmer) warmer;
this.store = store;
this.deletionPolicy = deletionPolicy;
this.translog = translog;
this.mergePolicyProvider = mergePolicyProvider;
this.mergeScheduler = mergeScheduler;
this.analysisService = analysisService;
this.similarityService = similarityService;
this.codecService = codecService;
this.compoundOnFlush = indexSettings.getAsBoolean(INDEX_COMPOUND_ON_FLUSH, this.compoundOnFlush);
this.indexConcurrency = indexSettings.getAsInt(INDEX_INDEX_CONCURRENCY, Math.max(IndexWriterConfig.DEFAULT_MAX_THREAD_STATES, (int) (EsExecutors.boundedNumberOfProcessors(indexSettings) * 0.65)));
this.versionMap = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
this.dirtyLocks = new Object[indexConcurrency * 50]; // we multiply it to have enough...
for (int i = 0; i < dirtyLocks.length; i++) {
dirtyLocks[i] = new Object();
}
this.indexSettingsService.addListener(applySettings);
this.failOnMergeFailure = indexSettings.getAsBoolean(INDEX_FAIL_ON_MERGE_FAILURE, true);
if (failOnMergeFailure) {
this.mergeScheduler.addFailureListener(new FailEngineOnMergeFailure());
}
}
@Override
public void updateIndexingBufferSize(ByteSizeValue indexingBufferSize) {
ByteSizeValue preValue = this.indexingBufferSize;
rwl.readLock().lock();
try {
this.indexingBufferSize = indexingBufferSize;
IndexWriter indexWriter = this.indexWriter;
if (indexWriter != null) {
indexWriter.getConfig().setRAMBufferSizeMB(this.indexingBufferSize.mbFrac());
}
} finally {
rwl.readLock().unlock();
}
if (preValue.bytes() != indexingBufferSize.bytes()) {
// its inactive, make sure we do a full flush in this case, since the memory
// changes only after a "data" change has happened to the writer
if (indexingBufferSize == Engine.INACTIVE_SHARD_INDEXING_BUFFER && preValue != Engine.INACTIVE_SHARD_INDEXING_BUFFER) {
logger.debug("updating index_buffer_size from [{}] to (inactive) [{}]", preValue, indexingBufferSize);
try {
flush(new Flush().type(Flush.Type.NEW_WRITER));
} catch (EngineClosedException e) {
// ignore
} catch (FlushNotAllowedEngineException e) {
// ignore
} catch (Throwable e) {
logger.warn("failed to flush after setting shard to inactive", e);
}
} else {
logger.debug("updating index_buffer_size from [{}] to [{}]", preValue, indexingBufferSize);
}
}
}
@Override
public void addFailedEngineListener(FailedEngineListener listener) {
failedEngineListeners.add(listener);
}
@Override
public void start() throws EngineException {
rwl.writeLock().lock();
try {
if (indexWriter != null) {
throw new EngineAlreadyStartedException(shardId);
}
if (closed) {
throw new EngineClosedException(shardId);
}
if (logger.isDebugEnabled()) {
logger.debug("starting engine");
}
try {
this.indexWriter = createWriter();
} catch (IOException e) {
throw new EngineCreationFailureException(shardId, "failed to create engine", e);
}
try {
// commit on a just opened writer will commit even if there are no changes done to it
// we rely on that for the commit data translog id key
if (Lucene.indexExists(store.directory())) {
Map<String, String> commitUserData = Lucene.readSegmentInfos(store.directory()).getUserData();
if (commitUserData.containsKey(Translog.TRANSLOG_ID_KEY)) {
translogIdGenerator.set(Long.parseLong(commitUserData.get(Translog.TRANSLOG_ID_KEY)));
} else {
translogIdGenerator.set(System.currentTimeMillis());
indexWriter.setCommitData(MapBuilder.<String, String>newMapBuilder().put(Translog.TRANSLOG_ID_KEY, Long.toString(translogIdGenerator.get())).map());
indexWriter.commit();
}
} else {
translogIdGenerator.set(System.currentTimeMillis());
indexWriter.setCommitData(MapBuilder.<String, String>newMapBuilder().put(Translog.TRANSLOG_ID_KEY, Long.toString(translogIdGenerator.get())).map());
indexWriter.commit();
}
translog.newTranslog(translogIdGenerator.get());
this.searcherManager = buildSearchManager(indexWriter);
readLastCommittedSegmentsInfo();
} catch (IOException e) {
try {
indexWriter.rollback();
} catch (IOException e1) {
// ignore
} finally {
IOUtils.closeWhileHandlingException(indexWriter);
}
throw new EngineCreationFailureException(shardId, "failed to open reader on writer", e);
}
} finally {
rwl.writeLock().unlock();
}
}
private void readLastCommittedSegmentsInfo() throws IOException {
SegmentInfos infos = new SegmentInfos();
infos.read(store.directory());
lastCommittedSegmentInfos = infos;
}
@Override
public TimeValue defaultRefreshInterval() {
return new TimeValue(1, TimeUnit.SECONDS);
}
@Override
public void enableGcDeletes(boolean enableGcDeletes) {
this.enableGcDeletes = enableGcDeletes;
}
public GetResult get(Get get) throws EngineException {
rwl.readLock().lock();
try {
if (get.realtime()) {
VersionValue versionValue = versionMap.get(versionKey(get.uid()));
if (versionValue != null) {
if (versionValue.delete()) {
return GetResult.NOT_EXISTS;
}
if (get.version() != Versions.MATCH_ANY) {
if (get.versionType().isVersionConflict(versionValue.version(), get.version())) {
Uid uid = Uid.createUid(get.uid().text());
throw new VersionConflictEngineException(shardId, uid.type(), uid.id(), versionValue.version(), get.version());
}
}
if (!get.loadSource()) {
return new GetResult(true, versionValue.version(), null);
}
byte[] data = translog.read(versionValue.translogLocation());
if (data != null) {
try {
Translog.Source source = TranslogStreams.readSource(data);
return new GetResult(true, versionValue.version(), source);
} catch (IOException e) {
// switched on us, read it from the reader
}
}
}
}
// no version, get the version from the index, we know that we refresh on flush
Searcher searcher = acquireSearcher("get");
final Versions.DocIdAndVersion docIdAndVersion;
try {
docIdAndVersion = Versions.loadDocIdAndVersion(searcher.reader(), get.uid());
} catch (Throwable e) {
searcher.release();
//TODO: A better exception goes here
throw new EngineException(shardId(), "Couldn't resolve version", e);
}
if (get.version() != Versions.MATCH_ANY && docIdAndVersion != null) {
if (get.versionType().isVersionConflict(docIdAndVersion.version, get.version())) {
searcher.release();
Uid uid = Uid.createUid(get.uid().text());
throw new VersionConflictEngineException(shardId, uid.type(), uid.id(), docIdAndVersion.version, get.version());
}
}
if (docIdAndVersion != null) {
// don't release the searcher on this path, it is the responsability of the caller to call GetResult.release
return new GetResult(searcher, docIdAndVersion);
} else {
searcher.release();
return GetResult.NOT_EXISTS;
}
} finally {
rwl.readLock().unlock();
}
}
@Override
public void create(Create create) throws EngineException {
rwl.readLock().lock();
try {
IndexWriter writer = this.indexWriter;
if (writer == null) {
throw new EngineClosedException(shardId, failedEngine);
}
innerCreate(create, writer);
dirty = true;
possibleMergeNeeded = true;
flushNeeded = true;
} catch (IOException e) {
throw new CreateFailedEngineException(shardId, create, e);
} catch (OutOfMemoryError e) {
failEngine(e);
throw new CreateFailedEngineException(shardId, create, e);
} catch (IllegalStateException e) {
if (e.getMessage().contains("OutOfMemoryError")) {
failEngine(e);
}
throw new CreateFailedEngineException(shardId, create, e);
} finally {
rwl.readLock().unlock();
}
}
private void innerCreate(Create create, IndexWriter writer) throws IOException {
synchronized (dirtyLock(create.uid())) {
HashedBytesRef versionKey = versionKey(create.uid());
final long currentVersion;
VersionValue versionValue = versionMap.get(versionKey);
if (versionValue == null) {
currentVersion = loadCurrentVersionFromIndex(create.uid());
} else {
if (enableGcDeletes && versionValue.delete() && (threadPool.estimatedTimeInMillis() - versionValue.time()) > gcDeletesInMillis) {
currentVersion = Versions.NOT_FOUND; // deleted, and GC
} else {
currentVersion = versionValue.version();
}
}
// same logic as index
long updatedVersion;
long expectedVersion = create.version();
if (create.origin() == Operation.Origin.PRIMARY) {
if (create.versionType().isVersionConflict(currentVersion, expectedVersion)) {
throw new VersionConflictEngineException(shardId, create.type(), create.id(), currentVersion, expectedVersion);
}
updatedVersion = create.versionType().updateVersion(currentVersion, expectedVersion);
} else { // if (index.origin() == Operation.Origin.REPLICA || index.origin() == Operation.Origin.RECOVERY) {
// replicas treat the version as "external" as it comes from the primary ->
// only exploding if the version they got is lower or equal to what they know.
if (VersionType.EXTERNAL.isVersionConflict(currentVersion, expectedVersion)) {
if (create.origin() == Operation.Origin.RECOVERY) {
return;
} else {
throw new VersionConflictEngineException(shardId, create.type(), create.id(), currentVersion, expectedVersion);
}
}
updatedVersion = VersionType.EXTERNAL.updateVersion(currentVersion, expectedVersion);
}
// if the doc does not exists or it exists but not delete
if (versionValue != null) {
if (!versionValue.delete()) {
if (create.origin() == Operation.Origin.RECOVERY) {
return;
} else {
throw new DocumentAlreadyExistsException(shardId, create.type(), create.id());
}
}
} else if (currentVersion != Versions.NOT_FOUND) {
// its not deleted, its already there
if (create.origin() == Operation.Origin.RECOVERY) {
return;
} else {
throw new DocumentAlreadyExistsException(shardId, create.type(), create.id());
}
}
create.version(updatedVersion);
if (create.docs().size() > 1) {
writer.addDocuments(create.docs(), create.analyzer());
} else {
writer.addDocument(create.docs().get(0), create.analyzer());
}
Translog.Location translogLocation = translog.add(new Translog.Create(create));
versionMap.put(versionKey, new VersionValue(updatedVersion, false, threadPool.estimatedTimeInMillis(), translogLocation));
indexingService.postCreateUnderLock(create);
}
}
@Override
public void index(Index index) throws EngineException {
rwl.readLock().lock();
try {
IndexWriter writer = this.indexWriter;
if (writer == null) {
throw new EngineClosedException(shardId, failedEngine);
}
innerIndex(index, writer);
dirty = true;
possibleMergeNeeded = true;
flushNeeded = true;
} catch (IOException e) {
throw new IndexFailedEngineException(shardId, index, e);
} catch (OutOfMemoryError e) {
failEngine(e);
throw new IndexFailedEngineException(shardId, index, e);
} catch (IllegalStateException e) {
if (e.getMessage().contains("OutOfMemoryError")) {
failEngine(e);
}
throw new IndexFailedEngineException(shardId, index, e);
} finally {
rwl.readLock().unlock();
}
}
private void innerIndex(Index index, IndexWriter writer) throws IOException {
synchronized (dirtyLock(index.uid())) {
HashedBytesRef versionKey = versionKey(index.uid());
final long currentVersion;
VersionValue versionValue = versionMap.get(versionKey);
if (versionValue == null) {
currentVersion = loadCurrentVersionFromIndex(index.uid());
} else {
if (enableGcDeletes && versionValue.delete() && (threadPool.estimatedTimeInMillis() - versionValue.time()) > gcDeletesInMillis) {
currentVersion = Versions.NOT_FOUND; // deleted, and GC
} else {
currentVersion = versionValue.version();
}
}
long updatedVersion;
long expectedVersion = index.version();
if (index.origin() == Operation.Origin.PRIMARY) {
if (index.versionType().isVersionConflict(currentVersion, expectedVersion)) {
throw new VersionConflictEngineException(shardId, index.type(), index.id(), currentVersion, expectedVersion);
}
updatedVersion = index.versionType().updateVersion(currentVersion, expectedVersion);
} else { // if (index.origin() == Operation.Origin.REPLICA || index.origin() == Operation.Origin.RECOVERY) {
// replicas treat the version as "external" as it comes from the primary ->
// only exploding if the version they got is lower or equal to what they know.
if (VersionType.EXTERNAL.isVersionConflict(currentVersion, expectedVersion)) {
if (index.origin() == Operation.Origin.RECOVERY) {
return;
} else {
throw new VersionConflictEngineException(shardId, index.type(), index.id(), currentVersion, expectedVersion);
}
}
updatedVersion = VersionType.EXTERNAL.updateVersion(currentVersion, expectedVersion);
}
index.version(updatedVersion);
if (currentVersion == Versions.NOT_FOUND) {
// document does not exists, we can optimize for create
index.created(true);
if (index.docs().size() > 1) {
writer.addDocuments(index.docs(), index.analyzer());
} else {
writer.addDocument(index.docs().get(0), index.analyzer());
}
} else {
if (versionValue != null) {
index.created(versionValue.delete()); // we have a delete which is not GC'ed...
}
if (index.docs().size() > 1) {
writer.updateDocuments(index.uid(), index.docs(), index.analyzer());
} else {
writer.updateDocument(index.uid(), index.docs().get(0), index.analyzer());
}
}
Translog.Location translogLocation = translog.add(new Translog.Index(index));
versionMap.put(versionKey, new VersionValue(updatedVersion, false, threadPool.estimatedTimeInMillis(), translogLocation));
indexingService.postIndexUnderLock(index);
}
}
@Override
public void delete(Delete delete) throws EngineException {
rwl.readLock().lock();
try {
IndexWriter writer = this.indexWriter;
if (writer == null) {
throw new EngineClosedException(shardId, failedEngine);
}
innerDelete(delete, writer);
dirty = true;
possibleMergeNeeded = true;
flushNeeded = true;
} catch (IOException e) {
throw new DeleteFailedEngineException(shardId, delete, e);
} catch (OutOfMemoryError e) {
failEngine(e);
throw new DeleteFailedEngineException(shardId, delete, e);
} catch (IllegalStateException e) {
if (e.getMessage().contains("OutOfMemoryError")) {
failEngine(e);
}
throw new DeleteFailedEngineException(shardId, delete, e);
} finally {
rwl.readLock().unlock();
}
}
private void innerDelete(Delete delete, IndexWriter writer) throws IOException {
synchronized (dirtyLock(delete.uid())) {
final long currentVersion;
HashedBytesRef versionKey = versionKey(delete.uid());
VersionValue versionValue = versionMap.get(versionKey);
if (versionValue == null) {
currentVersion = loadCurrentVersionFromIndex(delete.uid());
} else {
if (enableGcDeletes && versionValue.delete() && (threadPool.estimatedTimeInMillis() - versionValue.time()) > gcDeletesInMillis) {
currentVersion = Versions.NOT_FOUND; // deleted, and GC
} else {
currentVersion = versionValue.version();
}
}
long updatedVersion;
long expectedVersion = delete.version();
if (delete.origin() == Operation.Origin.PRIMARY) {
if (delete.versionType().isVersionConflict(currentVersion, expectedVersion)) {
throw new VersionConflictEngineException(shardId, delete.type(), delete.id(), currentVersion, expectedVersion);
}
updatedVersion = delete.versionType().updateVersion(currentVersion, expectedVersion);
} else { // if (index.origin() == Operation.Origin.REPLICA || index.origin() == Operation.Origin.RECOVERY) {
// replicas treat the version as "external" as it comes from the primary ->
// only exploding if the version they got is lower or equal to what they know.
if (VersionType.EXTERNAL.isVersionConflict(currentVersion, expectedVersion)) {
if (delete.origin() == Operation.Origin.RECOVERY) {
return;
} else {
throw new VersionConflictEngineException(shardId, delete.type(), delete.id(), currentVersion - 1, expectedVersion);
}
}
updatedVersion = VersionType.EXTERNAL.updateVersion(currentVersion, expectedVersion);
}
if (currentVersion == Versions.NOT_FOUND) {
// doc does not exists and no prior deletes
delete.version(updatedVersion).found(false);
Translog.Location translogLocation = translog.add(new Translog.Delete(delete));
versionMap.put(versionKey, new VersionValue(updatedVersion, true, threadPool.estimatedTimeInMillis(), translogLocation));
} else if (versionValue != null && versionValue.delete()) {
// a "delete on delete", in this case, we still increment the version, log it, and return that version
delete.version(updatedVersion).found(false);
Translog.Location translogLocation = translog.add(new Translog.Delete(delete));
versionMap.put(versionKey, new VersionValue(updatedVersion, true, threadPool.estimatedTimeInMillis(), translogLocation));
} else {
delete.version(updatedVersion).found(true);
writer.deleteDocuments(delete.uid());
Translog.Location translogLocation = translog.add(new Translog.Delete(delete));
versionMap.put(versionKey, new VersionValue(updatedVersion, true, threadPool.estimatedTimeInMillis(), translogLocation));
}
indexingService.postDeleteUnderLock(delete);
}
}
@Override
public void delete(DeleteByQuery delete) throws EngineException {
rwl.readLock().lock();
try {
IndexWriter writer = this.indexWriter;
if (writer == null) {
throw new EngineClosedException(shardId);
}
Query query;
if (delete.nested() && delete.aliasFilter() != null) {
query = new IncludeNestedDocsQuery(new XFilteredQuery(delete.query(), delete.aliasFilter()), delete.parentFilter());
} else if (delete.nested()) {
query = new IncludeNestedDocsQuery(delete.query(), delete.parentFilter());
} else if (delete.aliasFilter() != null) {
query = new XFilteredQuery(delete.query(), delete.aliasFilter());
} else {
query = delete.query();
}
writer.deleteDocuments(query);
translog.add(new Translog.DeleteByQuery(delete));
dirty = true;
possibleMergeNeeded = true;
flushNeeded = true;
} catch (IOException e) {
throw new DeleteByQueryFailedEngineException(shardId, delete, e);
} finally {
rwl.readLock().unlock();
}
//TODO: This is heavy, since we refresh, but we really have to...
refreshVersioningTable(System.currentTimeMillis());
}
@Override
public final Searcher acquireSearcher(String source) throws EngineException {
SearcherManager manager = this.searcherManager;
if (manager == null) {
throw new EngineClosedException(shardId);
}
try {
IndexSearcher searcher = manager.acquire();
return newSearcher(source, searcher, manager);
} catch (Throwable ex) {
logger.error("failed to acquire searcher, source {}", ex, source);
throw new EngineException(shardId, ex.getMessage());
}
}
protected Searcher newSearcher(String source, IndexSearcher searcher, SearcherManager manager) {
return new EngineSearcher(source, searcher, manager);
}
@Override
public boolean refreshNeeded() {
return dirty;
}
@Override
public boolean possibleMergeNeeded() {
return this.possibleMergeNeeded;
}
@Override
public void refresh(Refresh refresh) throws EngineException {
if (indexWriter == null) {
throw new EngineClosedException(shardId);
}
// we obtain a read lock here, since we don't want a flush to happen while we are refreshing
// since it flushes the index as well (though, in terms of concurrency, we are allowed to do it)
rwl.readLock().lock();
try {
// this engine always acts as if waitForOperations=true
IndexWriter currentWriter = indexWriter;
if (currentWriter == null) {
throw new EngineClosedException(shardId, failedEngine);
}
try {
// maybeRefresh will only allow one refresh to execute, and the rest will "pass through",
// but, we want to make sure not to loose ant refresh calls, if one is taking time
synchronized (refreshMutex) {
if (dirty || refresh.force()) {
dirty = false;
searcherManager.maybeRefresh();
}
}
} catch (AlreadyClosedException e) {
// an index writer got replaced on us, ignore
} catch (OutOfMemoryError e) {
failEngine(e);
throw new RefreshFailedEngineException(shardId, e);
} catch (IllegalStateException e) {
if (e.getMessage().contains("OutOfMemoryError")) {
failEngine(e);
}
throw new RefreshFailedEngineException(shardId, e);
} catch (Throwable e) {
if (indexWriter == null) {
throw new EngineClosedException(shardId, failedEngine);
} else if (currentWriter != indexWriter) {
// an index writer got replaced on us, ignore
} else {
throw new RefreshFailedEngineException(shardId, e);
}
}
} finally {
rwl.readLock().unlock();
}
}
@Override
public void flush(Flush flush) throws EngineException {
ensureOpen();
if (flush.type() == Flush.Type.NEW_WRITER || flush.type() == Flush.Type.COMMIT_TRANSLOG) {
// check outside the lock as well so we can check without blocking on the write lock
if (onGoingRecoveries.get() > 0) {
throw new FlushNotAllowedEngineException(shardId, "recovery is in progress, flush [" + flush.type() + "] is not allowed");
}
}
int currentFlushing = flushing.incrementAndGet();
if (currentFlushing > 1 && !flush.waitIfOngoing()) {
flushing.decrementAndGet();
throw new FlushNotAllowedEngineException(shardId, "already flushing...");
}
flushLock.lock();
try {
if (flush.type() == Flush.Type.NEW_WRITER) {
rwl.writeLock().lock();
try {
ensureOpen();
if (onGoingRecoveries.get() > 0) {
throw new FlushNotAllowedEngineException(shardId, "Recovery is in progress, flush is not allowed");
}
// disable refreshing, not dirty
dirty = false;
try {
// that's ok if the index writer failed and is in inconsistent state
// we will get an exception on a dirty operation, and will cause the shard
// to be allocated to a different node
indexWriter.close(false);
indexWriter = createWriter();
// commit on a just opened writer will commit even if there are no changes done to it
// we rely on that for the commit data translog id key
if (flushNeeded || flush.force()) {
flushNeeded = false;
long translogId = translogIdGenerator.incrementAndGet();
indexWriter.setCommitData(MapBuilder.<String, String>newMapBuilder().put(Translog.TRANSLOG_ID_KEY, Long.toString(translogId)).map());
indexWriter.commit();
translog.newTranslog(translogId);
}
SearcherManager current = this.searcherManager;
this.searcherManager = buildSearchManager(indexWriter);
try {
IOUtils.close(current);
} catch (Throwable t) {
logger.warn("Failed to close current SearcherManager", t);
}
refreshVersioningTable(threadPool.estimatedTimeInMillis());
} catch (OutOfMemoryError e) {
failEngine(e);
throw new FlushFailedEngineException(shardId, e);
} catch (IllegalStateException e) {
if (e.getMessage().contains("OutOfMemoryError")) {
failEngine(e);
}
throw new FlushFailedEngineException(shardId, e);
} catch (Throwable e) {
throw new FlushFailedEngineException(shardId, e);
}
} finally {
rwl.writeLock().unlock();
}
} else if (flush.type() == Flush.Type.COMMIT_TRANSLOG) {
rwl.readLock().lock();
try {
ensureOpen();
if (onGoingRecoveries.get() > 0) {
throw new FlushNotAllowedEngineException(shardId, "Recovery is in progress, flush is not allowed");
}
if (flushNeeded || flush.force()) {
flushNeeded = false;
try {
long translogId = translogIdGenerator.incrementAndGet();
translog.newTransientTranslog(translogId);
indexWriter.setCommitData(MapBuilder.<String, String>newMapBuilder().put(Translog.TRANSLOG_ID_KEY, Long.toString(translogId)).map());
indexWriter.commit();
refreshVersioningTable(threadPool.estimatedTimeInMillis());
// we need to move transient to current only after we refresh
// so items added to current will still be around for realtime get
// when tans overrides it
translog.makeTransientCurrent();
} catch (OutOfMemoryError e) {
translog.revertTransient();
failEngine(e);
throw new FlushFailedEngineException(shardId, e);
} catch (IllegalStateException e) {
if (e.getMessage().contains("OutOfMemoryError")) {
failEngine(e);
}
throw new FlushFailedEngineException(shardId, e);
} catch (Throwable e) {
translog.revertTransient();
throw new FlushFailedEngineException(shardId, e);
}
}
} finally {
rwl.readLock().unlock();
}
} else if (flush.type() == Flush.Type.COMMIT) {
// note, its ok to just commit without cleaning the translog, its perfectly fine to replay a
// translog on an index that was opened on a committed point in time that is "in the future"
// of that translog
rwl.readLock().lock();
try {
ensureOpen();
// we allow to *just* commit if there is an ongoing recovery happening...
// its ok to use this, only a flush will cause a new translogId, and we are locked here from
// other flushes use flushLock
try {
long translogId = translog.currentId();
indexWriter.setCommitData(MapBuilder.<String, String>newMapBuilder().put(Translog.TRANSLOG_ID_KEY, Long.toString(translogId)).map());
indexWriter.commit();
} catch (OutOfMemoryError e) {
translog.revertTransient();
failEngine(e);
throw new FlushFailedEngineException(shardId, e);
} catch (IllegalStateException e) {
if (e.getMessage().contains("OutOfMemoryError")) {
failEngine(e);
}
throw new FlushFailedEngineException(shardId, e);
} catch (Throwable e) {
throw new FlushFailedEngineException(shardId, e);
}
} finally {
rwl.readLock().unlock();
}
} else {
throw new ElasticsearchIllegalStateException("flush type [" + flush.type() + "] not supported");
}
// reread the last committed segment infos
rwl.readLock().lock();
try {
ensureOpen();
readLastCommittedSegmentsInfo();
} catch (Throwable e) {
if (!closed) {
logger.warn("failed to read latest segment infos on flush", e);
}
} finally {
rwl.readLock().unlock();
}
} finally {
flushLock.unlock();
flushing.decrementAndGet();
}
}
private void ensureOpen() {
if (indexWriter == null) {
throw new EngineClosedException(shardId, failedEngine);
}
}
private void refreshVersioningTable(long time) {
// we need to refresh in order to clear older version values
refresh(new Refresh("version_table").force(true));
for (Map.Entry<HashedBytesRef, VersionValue> entry : versionMap.entrySet()) {
HashedBytesRef uid = entry.getKey();
synchronized (dirtyLock(uid.bytes)) { // can we do it without this lock on each value? maybe batch to a set and get the lock once per set?
VersionValue versionValue = versionMap.get(uid);
if (versionValue == null) {
continue;
}
if (time - versionValue.time() <= 0) {
continue; // its a newer value, from after/during we refreshed, don't clear it
}
if (versionValue.delete()) {
if (enableGcDeletes && (time - versionValue.time()) > gcDeletesInMillis) {
versionMap.remove(uid);
}
} else {
versionMap.remove(uid);
}
}
}
}
@Override
public void maybeMerge() throws EngineException {
if (!possibleMergeNeeded) {
return;
}
possibleMergeNeeded = false;
rwl.readLock().lock();
try {
ensureOpen();
indexWriter.maybeMerge();
} catch (OutOfMemoryError e) {
failEngine(e);
throw new OptimizeFailedEngineException(shardId, e);
} catch (IllegalStateException e) {
if (e.getMessage().contains("OutOfMemoryError")) {
failEngine(e);
}
throw new OptimizeFailedEngineException(shardId, e);
} catch (Throwable e) {
throw new OptimizeFailedEngineException(shardId, e);
} finally {
rwl.readLock().unlock();
}
}
@Override
public void optimize(Optimize optimize) throws EngineException {
if (optimize.flush()) {
flush(new Flush().force(true).waitIfOngoing(true));
}
if (optimizeMutex.compareAndSet(false, true)) {
rwl.readLock().lock();
try {
ensureOpen();
if (optimize.onlyExpungeDeletes()) {
indexWriter.forceMergeDeletes(false);
} else if (optimize.maxNumSegments() <= 0) {
indexWriter.maybeMerge();
possibleMergeNeeded = false;
} else {
indexWriter.forceMerge(optimize.maxNumSegments(), false);
}
} catch (OutOfMemoryError e) {
failEngine(e);
throw new OptimizeFailedEngineException(shardId, e);
} catch (IllegalStateException e) {
if (e.getMessage().contains("OutOfMemoryError")) {
failEngine(e);
}
throw new OptimizeFailedEngineException(shardId, e);
} catch (Throwable e) {
throw new OptimizeFailedEngineException(shardId, e);
} finally {
rwl.readLock().unlock();
optimizeMutex.set(false);
}
}
// wait for the merges outside of the read lock
if (optimize.waitForMerge()) {
indexWriter.waitForMerges();
}
if (optimize.flush()) {
flush(new Flush().force(true).waitIfOngoing(true));
}
}
@Override
public <T> T snapshot(SnapshotHandler<T> snapshotHandler) throws EngineException {
SnapshotIndexCommit snapshotIndexCommit = null;
Translog.Snapshot traslogSnapshot = null;
rwl.readLock().lock();
try {
snapshotIndexCommit = deletionPolicy.snapshot();
traslogSnapshot = translog.snapshot();
} catch (Throwable e) {
if (snapshotIndexCommit != null) {
snapshotIndexCommit.release();
}
throw new SnapshotFailedEngineException(shardId, e);
} finally {
rwl.readLock().unlock();
}
try {
return snapshotHandler.snapshot(snapshotIndexCommit, traslogSnapshot);
} finally {
snapshotIndexCommit.release();
traslogSnapshot.release();
}
}
@Override
public SnapshotIndexCommit snapshotIndex() throws EngineException {
rwl.readLock().lock();
try {
flush(new Flush().type(Flush.Type.COMMIT).waitIfOngoing(true));
ensureOpen();
return deletionPolicy.snapshot();
} catch (IOException e) {
throw new SnapshotFailedEngineException(shardId, e);
} finally {
rwl.readLock().unlock();
}
}
@Override
public void recover(RecoveryHandler recoveryHandler) throws EngineException {
// take a write lock here so it won't happen while a flush is in progress
// this means that next commits will not be allowed once the lock is released
rwl.writeLock().lock();
try {
if (closed) {
throw new EngineClosedException(shardId);
}
onGoingRecoveries.increment();
} finally {
rwl.writeLock().unlock();
}
SnapshotIndexCommit phase1Snapshot;
try {
phase1Snapshot = deletionPolicy.snapshot();
} catch (Throwable e) {
onGoingRecoveries.decrement();
throw new RecoveryEngineException(shardId, 1, "Snapshot failed", e);
}
try {
recoveryHandler.phase1(phase1Snapshot);
} catch (Throwable e) {
onGoingRecoveries.decrement();
phase1Snapshot.release();
if (closed) {
e = new EngineClosedException(shardId, e);
}
throw new RecoveryEngineException(shardId, 1, "Execution failed", e);
}
Translog.Snapshot phase2Snapshot;
try {
phase2Snapshot = translog.snapshot();
} catch (Throwable e) {
onGoingRecoveries.decrement();
phase1Snapshot.release();
if (closed) {
e = new EngineClosedException(shardId, e);
}
throw new RecoveryEngineException(shardId, 2, "Snapshot failed", e);
}
try {
recoveryHandler.phase2(phase2Snapshot);
} catch (Throwable e) {
onGoingRecoveries.decrement();
phase1Snapshot.release();
phase2Snapshot.release();
if (closed) {
e = new EngineClosedException(shardId, e);
}
throw new RecoveryEngineException(shardId, 2, "Execution failed", e);
}
rwl.writeLock().lock();
Translog.Snapshot phase3Snapshot = null;
try {
phase3Snapshot = translog.snapshot(phase2Snapshot);
recoveryHandler.phase3(phase3Snapshot);
} catch (Throwable e) {
throw new RecoveryEngineException(shardId, 3, "Execution failed", e);
} finally {
onGoingRecoveries.decrement();
rwl.writeLock().unlock();
phase1Snapshot.release();
phase2Snapshot.release();
if (phase3Snapshot != null) {
phase3Snapshot.release();
}
}
}
private long getReaderRamBytesUsed(AtomicReaderContext reader) {
return SegmentReaderUtils.segmentReader(reader.reader()).ramBytesUsed();
}
@Override
public SegmentsStats segmentsStats() {
rwl.readLock().lock();
try {
ensureOpen();
Searcher searcher = acquireSearcher("segments_stats");
try {
SegmentsStats stats = new SegmentsStats();
for (AtomicReaderContext reader : searcher.reader().leaves()) {
stats.add(1, getReaderRamBytesUsed(reader));
}
return stats;
} finally {
searcher.release();
}
} finally {
rwl.readLock().unlock();
}
}
@Override
public List<Segment> segments() {
rwl.readLock().lock();
try {
ensureOpen();
Map<String, Segment> segments = new HashMap<String, Segment>();
// first, go over and compute the search ones...
Searcher searcher = acquireSearcher("segments");
try {
for (AtomicReaderContext reader : searcher.reader().leaves()) {
assert reader.reader() instanceof SegmentReader;
SegmentCommitInfo info = SegmentReaderUtils.segmentReader(reader.reader()).getSegmentInfo();
assert !segments.containsKey(info.info.name);
Segment segment = new Segment(info.info.name);
segment.search = true;
segment.docCount = reader.reader().numDocs();
segment.delDocCount = reader.reader().numDeletedDocs();
segment.version = info.info.getVersion();
segment.compound = info.info.getUseCompoundFile();
try {
segment.sizeInBytes = info.sizeInBytes();
} catch (IOException e) {
logger.trace("failed to get size for [{}]", e, info.info.name);
}
segment.memoryInBytes = getReaderRamBytesUsed(reader);
segments.put(info.info.name, segment);
}
} finally {
searcher.release();
}
// now, correlate or add the committed ones...
if (lastCommittedSegmentInfos != null) {
SegmentInfos infos = lastCommittedSegmentInfos;
for (SegmentCommitInfo info : infos) {
Segment segment = segments.get(info.info.name);
if (segment == null) {
segment = new Segment(info.info.name);
segment.search = false;
segment.committed = true;
segment.docCount = info.info.getDocCount();
segment.delDocCount = info.getDelCount();
segment.version = info.info.getVersion();
segment.compound = info.info.getUseCompoundFile();
try {
segment.sizeInBytes = info.sizeInBytes();
} catch (IOException e) {
logger.trace("failed to get size for [{}]", e, info.info.name);
}
segments.put(info.info.name, segment);
} else {
segment.committed = true;
}
}
}
Segment[] segmentsArr = segments.values().toArray(new Segment[segments.values().size()]);
Arrays.sort(segmentsArr, new Comparator<Segment>() {
@Override
public int compare(Segment o1, Segment o2) {
return (int) (o1.getGeneration() - o2.getGeneration());
}
});
// fill in the merges flag
Set<OnGoingMerge> onGoingMerges = mergeScheduler.onGoingMerges();
for (OnGoingMerge onGoingMerge : onGoingMerges) {
for (SegmentCommitInfo segmentInfoPerCommit : onGoingMerge.getMergedSegments()) {
for (Segment segment : segmentsArr) {
if (segment.getName().equals(segmentInfoPerCommit.info.name)) {
segment.mergeId = onGoingMerge.getId();
break;
}
}
}
}
return Arrays.asList(segmentsArr);
} finally {
rwl.readLock().unlock();
}
}
@Override
public void close() throws ElasticsearchException {
rwl.writeLock().lock();
try {
innerClose();
} finally {
rwl.writeLock().unlock();
}
try {
// wait for recoveries to join and close all resources / IO streams
int ongoingRecoveries = onGoingRecoveries.awaitNoRecoveries(5000);
if (ongoingRecoveries > 0) {
logger.debug("Waiting for ongoing recoveries timed out on close currently ongoing disoveries: [{}]", ongoingRecoveries);
}
} catch (InterruptedException e) {
// ignore & restore interrupt
Thread.currentThread().interrupt();
}
}
class FailEngineOnMergeFailure implements MergeSchedulerProvider.FailureListener {
@Override
public void onFailedMerge(MergePolicy.MergeException e) {
failEngine(e);
}
}
private void failEngine(Throwable failure) {
synchronized (failedEngineMutex) {
if (failedEngine != null) {
return;
}
logger.warn("failed engine", failure);
failedEngine = failure;
for (FailedEngineListener listener : failedEngineListeners) {
listener.onFailedEngine(shardId, failure);
}
innerClose();
}
}
private void innerClose() {
if (closed) {
return;
}
indexSettingsService.removeListener(applySettings);
closed = true;
this.versionMap.clear();
this.failedEngineListeners.clear();
try {
try {
IOUtils.close(searcherManager);
} catch (Throwable t) {
logger.warn("Failed to close SearcherManager", t);
}
// no need to commit in this case!, we snapshot before we close the shard, so translog and all sync'ed
if (indexWriter != null) {
try {
indexWriter.rollback();
} catch (AlreadyClosedException e) {
// ignore
}
}
} catch (Throwable e) {
logger.debug("failed to rollback writer on close", e);
} finally {
indexWriter = null;
}
}
private HashedBytesRef versionKey(Term uid) {
return new HashedBytesRef(uid.bytes());
}
private Object dirtyLock(BytesRef uid) {
int hash = DjbHashFunction.DJB_HASH(uid.bytes, uid.offset, uid.length);
// abs returns Integer.MIN_VALUE, so we need to protect against it...
if (hash == Integer.MIN_VALUE) {
hash = 0;
}
return dirtyLocks[Math.abs(hash) % dirtyLocks.length];
}
private Object dirtyLock(Term uid) {
return dirtyLock(uid.bytes());
}
private long loadCurrentVersionFromIndex(Term uid) throws IOException {
Searcher searcher = acquireSearcher("load_version");
try {
return Versions.loadVersion(searcher.reader(), uid);
} finally {
searcher.release();
}
}
/**
* Returns whether a leaf reader comes from a merge (versus flush or addIndexes).
*/
private static boolean isMergedSegment(AtomicReader reader) {
// We expect leaves to be segment readers
final Map<String, String> diagnostics = SegmentReaderUtils.segmentReader(reader).getSegmentInfo().info.getDiagnostics();
final String source = diagnostics.get(IndexWriter.SOURCE);
assert Arrays.asList(IndexWriter.SOURCE_ADDINDEXES_READERS, IndexWriter.SOURCE_FLUSH, IndexWriter.SOURCE_MERGE).contains(source) : "Unknown source " + source;
return IndexWriter.SOURCE_MERGE.equals(source);
}
private IndexWriter createWriter() throws IOException {
try {
// release locks when started
if (IndexWriter.isLocked(store.directory())) {
logger.warn("shard is locked, releasing lock");
IndexWriter.unlock(store.directory());
}
boolean create = !Lucene.indexExists(store.directory());
IndexWriterConfig config = new IndexWriterConfig(Lucene.VERSION, analysisService.defaultIndexAnalyzer());
config.setOpenMode(create ? IndexWriterConfig.OpenMode.CREATE : IndexWriterConfig.OpenMode.APPEND);
config.setIndexDeletionPolicy(deletionPolicy);
config.setMergeScheduler(mergeScheduler.newMergeScheduler());
MergePolicy mergePolicy = mergePolicyProvider.newMergePolicy();
// Give us the opportunity to upgrade old segments while performing
// background merges
mergePolicy = new IndexUpgraderMergePolicy(mergePolicy);
config.setMergePolicy(mergePolicy);
config.setSimilarity(similarityService.similarity());
config.setRAMBufferSizeMB(indexingBufferSize.mbFrac());
config.setMaxThreadStates(indexConcurrency);
config.setCodec(codecService.codec(codecName));
/* We set this timeout to a highish value to work around
* the default poll interval in the Lucene lock that is
* 1000ms by default. We might need to poll multiple times
* here but with 1s poll this is only executed twice at most
* in combination with the default writelock timeout*/
config.setWriteLockTimeout(5000);
config.setUseCompoundFile(this.compoundOnFlush);
// Warm-up hook for newly-merged segments. Warming up segments here is better since it will be performed at the end
// of the merge operation and won't slow down _refresh
config.setMergedSegmentWarmer(new IndexReaderWarmer() {
@Override
public void warm(AtomicReader reader) throws IOException {
try {
assert isMergedSegment(reader);
final Engine.Searcher searcher = new SimpleSearcher("warmer", new IndexSearcher(reader));
final IndicesWarmer.WarmerContext context = new IndicesWarmer.WarmerContext(shardId, searcher);
if (warmer != null) warmer.warm(context);
} catch (Throwable t) {
// Don't fail a merge if the warm-up failed
if (!closed) {
logger.warn("Warm-up failed", t);
}
if (t instanceof Error) {
// assertion/out-of-memory error, don't ignore those
throw (Error) t;
}
}
}
});
return new IndexWriter(store.directory(), config);
} catch (LockObtainFailedException ex) {
boolean isLocked = IndexWriter.isLocked(store.directory());
logger.warn("Could not lock IndexWriter isLocked [{}]", ex, isLocked);
throw ex;
}
}
public static final String INDEX_INDEX_CONCURRENCY = "index.index_concurrency";
public static final String INDEX_COMPOUND_ON_FLUSH = "index.compound_on_flush";
public static final String INDEX_GC_DELETES = "index.gc_deletes";
public static final String INDEX_FAIL_ON_MERGE_FAILURE = "index.fail_on_merge_failure";
class ApplySettings implements IndexSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
long gcDeletesInMillis = settings.getAsTime(INDEX_GC_DELETES, TimeValue.timeValueMillis(InternalEngine.this.gcDeletesInMillis)).millis();
if (gcDeletesInMillis != InternalEngine.this.gcDeletesInMillis) {
logger.info("updating index.gc_deletes from [{}] to [{}]", TimeValue.timeValueMillis(InternalEngine.this.gcDeletesInMillis), TimeValue.timeValueMillis(gcDeletesInMillis));
InternalEngine.this.gcDeletesInMillis = gcDeletesInMillis;
}
final boolean compoundOnFlush = settings.getAsBoolean(INDEX_COMPOUND_ON_FLUSH, InternalEngine.this.compoundOnFlush);
if (compoundOnFlush != InternalEngine.this.compoundOnFlush) {
logger.info("updating {} from [{}] to [{}]", InternalEngine.INDEX_COMPOUND_ON_FLUSH, InternalEngine.this.compoundOnFlush, compoundOnFlush);
InternalEngine.this.compoundOnFlush = compoundOnFlush;
indexWriter.getConfig().setUseCompoundFile(compoundOnFlush);
}
int indexConcurrency = settings.getAsInt(INDEX_INDEX_CONCURRENCY, InternalEngine.this.indexConcurrency);
boolean failOnMergeFailure = settings.getAsBoolean(INDEX_FAIL_ON_MERGE_FAILURE, InternalEngine.this.failOnMergeFailure);
String codecName = settings.get(INDEX_CODEC, InternalEngine.this.codecName);
final boolean codecBloomLoad = settings.getAsBoolean(CodecService.INDEX_CODEC_BLOOM_LOAD, codecService.isLoadBloomFilter());
boolean requiresFlushing = false;
if (indexConcurrency != InternalEngine.this.indexConcurrency ||
!codecName.equals(InternalEngine.this.codecName) ||
failOnMergeFailure != InternalEngine.this.failOnMergeFailure ||
codecBloomLoad != codecService.isLoadBloomFilter()) {
rwl.readLock().lock();
try {
if (indexConcurrency != InternalEngine.this.indexConcurrency) {
logger.info("updating index.index_concurrency from [{}] to [{}]", InternalEngine.this.indexConcurrency, indexConcurrency);
InternalEngine.this.indexConcurrency = indexConcurrency;
// we have to flush in this case, since it only applies on a new index writer
requiresFlushing = true;
}
if (!codecName.equals(InternalEngine.this.codecName)) {
logger.info("updating index.codec from [{}] to [{}]", InternalEngine.this.codecName, codecName);
InternalEngine.this.codecName = codecName;
// we want to flush in this case, so the new codec will be reflected right away...
requiresFlushing = true;
}
if (failOnMergeFailure != InternalEngine.this.failOnMergeFailure) {
logger.info("updating {} from [{}] to [{}]", InternalEngine.INDEX_FAIL_ON_MERGE_FAILURE, InternalEngine.this.failOnMergeFailure, failOnMergeFailure);
InternalEngine.this.failOnMergeFailure = failOnMergeFailure;
}
if (codecBloomLoad != codecService.isLoadBloomFilter()) {
logger.info("updating {} from [{}] to [{}]", CodecService.INDEX_CODEC_BLOOM_LOAD, codecService.isLoadBloomFilter(), codecBloomLoad);
codecService.setLoadBloomFilter(codecBloomLoad);
// we need to flush in this case, to load/unload the bloom filters
requiresFlushing = true;
}
} finally {
rwl.readLock().unlock();
}
if (requiresFlushing) {
flush(new Flush().type(Flush.Type.NEW_WRITER));
}
}
}
}
private SearcherManager buildSearchManager(IndexWriter indexWriter) throws IOException {
return new SearcherManager(indexWriter, true, searcherFactory);
}
static class EngineSearcher implements Searcher {
private final String source;
private final IndexSearcher searcher;
private final SearcherManager manager;
private EngineSearcher(String source, IndexSearcher searcher, SearcherManager manager) {
this.source = source;
this.searcher = searcher;
this.manager = manager;
}
@Override
public String source() {
return this.source;
}
@Override
public IndexReader reader() {
return searcher.getIndexReader();
}
@Override
public IndexSearcher searcher() {
return searcher;
}
@Override
public boolean release() throws ElasticsearchException {
try {
manager.release(searcher);
return true;
} catch (IOException e) {
return false;
} catch (AlreadyClosedException e) {
/* this one can happen if we already closed the
* underlying store / directory and we call into the
* IndexWriter to free up pending files. */
return false;
}
}
}
static class VersionValue {
private final long version;
private final boolean delete;
private final long time;
private final Translog.Location translogLocation;
VersionValue(long version, boolean delete, long time, Translog.Location translogLocation) {
this.version = version;
this.delete = delete;
this.time = time;
this.translogLocation = translogLocation;
}
public long time() {
return this.time;
}
public long version() {
return version;
}
public boolean delete() {
return delete;
}
public Translog.Location translogLocation() {
return this.translogLocation;
}
}
class SearchFactory extends SearcherFactory {
@Override
public IndexSearcher newSearcher(IndexReader reader) throws IOException {
IndexSearcher searcher = new IndexSearcher(reader);
searcher.setSimilarity(similarityService.similarity());
if (warmer != null) {
// we need to pass a custom searcher that does not release anything on Engine.Search Release,
// we will release explicitly
Searcher currentSearcher = null;
IndexSearcher newSearcher = null;
boolean closeNewSearcher = false;
try {
if (searcherManager == null) {
// fresh index writer, just do on all of it
newSearcher = searcher;
} else {
currentSearcher = acquireSearcher("search_factory");
// figure out the newSearcher, with only the new readers that are relevant for us
List<IndexReader> readers = Lists.newArrayList();
for (AtomicReaderContext newReaderContext : searcher.getIndexReader().leaves()) {
if (isMergedSegment(newReaderContext.reader())) {
// merged segments are already handled by IndexWriterConfig.setMergedSegmentWarmer
continue;
}
boolean found = false;
for (AtomicReaderContext currentReaderContext : currentSearcher.reader().leaves()) {
if (currentReaderContext.reader().getCoreCacheKey().equals(newReaderContext.reader().getCoreCacheKey())) {
found = true;
break;
}
}
if (!found) {
readers.add(newReaderContext.reader());
}
}
if (!readers.isEmpty()) {
// we don't want to close the inner readers, just increase ref on them
newSearcher = new IndexSearcher(new MultiReader(readers.toArray(new IndexReader[readers.size()]), false));
closeNewSearcher = true;
}
}
if (newSearcher != null) {
IndicesWarmer.WarmerContext context = new IndicesWarmer.WarmerContext(shardId,
new SimpleSearcher("warmer", newSearcher));
warmer.warm(context);
}
} catch (Throwable e) {
if (!closed) {
logger.warn("failed to prepare/warm", e);
}
} finally {
// no need to release the fullSearcher, nothing really is done...
if (currentSearcher != null) {
currentSearcher.release();
}
if (newSearcher != null && closeNewSearcher) {
IOUtils.closeWhileHandlingException(newSearcher.getIndexReader()); // ignore
}
}
}
return searcher;
}
}
private static final class RecoveryCounter {
private volatile int ongoingRecoveries = 0;
synchronized void increment() {
ongoingRecoveries++;
}
synchronized void decrement() {
ongoingRecoveries--;
if (ongoingRecoveries == 0) {
notifyAll(); // notify waiting threads - we only wait on ongoingRecoveries == 0
}
assert ongoingRecoveries >= 0 : "ongoingRecoveries must be >= 0 but was: " + ongoingRecoveries;
}
int get() {
// volatile read - no sync needed
return ongoingRecoveries;
}
synchronized int awaitNoRecoveries(long timeout) throws InterruptedException {
if (ongoingRecoveries > 0) { // no loop here - we either time out or we are done!
wait(timeout);
}
return ongoingRecoveries;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_engine_internal_InternalEngine.java
|
302 |
static class DumEntryListener implements EntryListener {
public void entryAdded(EntryEvent event) {
}
public void entryRemoved(EntryEvent event) {
}
public void entryUpdated(EntryEvent event) {
}
public void entryEvicted(EntryEvent event) {
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapBasicTest.java
|
704 |
shardBulkAction.execute(bulkShardRequest, new ActionListener<BulkShardResponse>() {
@Override
public void onResponse(BulkShardResponse bulkShardResponse) {
for (BulkItemResponse bulkItemResponse : bulkShardResponse.getResponses()) {
responses.set(bulkItemResponse.getItemId(), bulkItemResponse);
}
if (counter.decrementAndGet() == 0) {
finishHim();
}
}
@Override
public void onFailure(Throwable e) {
// create failures for all relevant requests
String message = ExceptionsHelper.detailedMessage(e);
RestStatus status = ExceptionsHelper.status(e);
for (BulkItemRequest request : requests) {
if (request.request() instanceof IndexRequest) {
IndexRequest indexRequest = (IndexRequest) request.request();
responses.set(request.id(), new BulkItemResponse(request.id(), indexRequest.opType().toString().toLowerCase(Locale.ENGLISH),
new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), indexRequest.id(), message, status)));
} else if (request.request() instanceof DeleteRequest) {
DeleteRequest deleteRequest = (DeleteRequest) request.request();
responses.set(request.id(), new BulkItemResponse(request.id(), "delete",
new BulkItemResponse.Failure(deleteRequest.index(), deleteRequest.type(), deleteRequest.id(), message, status)));
} else if (request.request() instanceof UpdateRequest) {
UpdateRequest updateRequest = (UpdateRequest) request.request();
responses.set(request.id(), new BulkItemResponse(request.id(), "update",
new BulkItemResponse.Failure(updateRequest.index(), updateRequest.type(), updateRequest.id(), message, status)));
}
}
if (counter.decrementAndGet() == 0) {
finishHim();
}
}
private void finishHim() {
listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), System.currentTimeMillis() - startTime));
}
});
| 0true
|
src_main_java_org_elasticsearch_action_bulk_TransportBulkAction.java
|
1,149 |
public class GeoDistanceSearchBenchmark {
public static void main(String[] args) throws Exception {
Node node = NodeBuilder.nodeBuilder().clusterName(GeoDistanceSearchBenchmark.class.getSimpleName()).node();
Client client = node.client();
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
if (clusterHealthResponse.isTimedOut()) {
System.err.println("Failed to wait for green status, bailing");
System.exit(1);
}
final long NUM_DOCS = SizeValue.parseSizeValue("1m").singles();
final long NUM_WARM = 50;
final long NUM_RUNS = 100;
if (client.admin().indices().prepareExists("test").execute().actionGet().isExists()) {
System.out.println("Found an index, count: " + client.prepareCount("test").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount());
} else {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
.startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
.endObject().endObject().string();
client.admin().indices().prepareCreate("test")
.setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
.addMapping("type1", mapping)
.execute().actionGet();
System.err.println("--> Indexing [" + NUM_DOCS + "]");
for (long i = 0; i < NUM_DOCS; ) {
client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
.field("name", "New York")
.startObject("location").field("lat", 40.7143528).field("lon", -74.0059731).endObject()
.endObject()).execute().actionGet();
// to NY: 5.286 km
client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
.field("name", "Times Square")
.startObject("location").field("lat", 40.759011).field("lon", -73.9844722).endObject()
.endObject()).execute().actionGet();
// to NY: 0.4621 km
client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
.field("name", "Tribeca")
.startObject("location").field("lat", 40.718266).field("lon", -74.007819).endObject()
.endObject()).execute().actionGet();
// to NY: 1.258 km
client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
.field("name", "Soho")
.startObject("location").field("lat", 40.7247222).field("lon", -74).endObject()
.endObject()).execute().actionGet();
// to NY: 8.572 km
client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
.field("name", "Brooklyn")
.startObject("location").field("lat", 40.65).field("lon", -73.95).endObject()
.endObject()).execute().actionGet();
if ((i % 10000) == 0) {
System.err.println("--> indexed " + i);
}
}
System.err.println("Done indexed");
client.admin().indices().prepareFlush("test").execute().actionGet();
client.admin().indices().prepareRefresh().execute().actionGet();
}
System.err.println("--> Warming up (ARC) - optimize_bbox");
long start = System.currentTimeMillis();
for (int i = 0; i < NUM_WARM; i++) {
run(client, GeoDistance.ARC, "memory");
}
long totalTime = System.currentTimeMillis() - start;
System.err.println("--> Warmup (ARC) - optimize_bbox (memory) " + (totalTime / NUM_WARM) + "ms");
System.err.println("--> Perf (ARC) - optimize_bbox (memory)");
start = System.currentTimeMillis();
for (int i = 0; i < NUM_RUNS; i++) {
run(client, GeoDistance.ARC, "memory");
}
totalTime = System.currentTimeMillis() - start;
System.err.println("--> Perf (ARC) - optimize_bbox " + (totalTime / NUM_RUNS) + "ms");
System.err.println("--> Warming up (ARC) - optimize_bbox (indexed)");
start = System.currentTimeMillis();
for (int i = 0; i < NUM_WARM; i++) {
run(client, GeoDistance.ARC, "indexed");
}
totalTime = System.currentTimeMillis() - start;
System.err.println("--> Warmup (ARC) - optimize_bbox (indexed) " + (totalTime / NUM_WARM) + "ms");
System.err.println("--> Perf (ARC) - optimize_bbox (indexed)");
start = System.currentTimeMillis();
for (int i = 0; i < NUM_RUNS; i++) {
run(client, GeoDistance.ARC, "indexed");
}
totalTime = System.currentTimeMillis() - start;
System.err.println("--> Perf (ARC) - optimize_bbox (indexed) " + (totalTime / NUM_RUNS) + "ms");
System.err.println("--> Warming up (ARC) - no optimize_bbox");
start = System.currentTimeMillis();
for (int i = 0; i < NUM_WARM; i++) {
run(client, GeoDistance.ARC, "none");
}
totalTime = System.currentTimeMillis() - start;
System.err.println("--> Warmup (ARC) - no optimize_bbox " + (totalTime / NUM_WARM) + "ms");
System.err.println("--> Perf (ARC) - no optimize_bbox");
start = System.currentTimeMillis();
for (int i = 0; i < NUM_RUNS; i++) {
run(client, GeoDistance.ARC, "none");
}
totalTime = System.currentTimeMillis() - start;
System.err.println("--> Perf (ARC) - no optimize_bbox " + (totalTime / NUM_RUNS) + "ms");
System.err.println("--> Warming up (SLOPPY_ARC)");
start = System.currentTimeMillis();
for (int i = 0; i < NUM_WARM; i++) {
run(client, GeoDistance.SLOPPY_ARC, "memory");
}
totalTime = System.currentTimeMillis() - start;
System.err.println("--> Warmup (SLOPPY_ARC) " + (totalTime / NUM_WARM) + "ms");
System.err.println("--> Perf (SLOPPY_ARC)");
start = System.currentTimeMillis();
for (int i = 0; i < NUM_RUNS; i++) {
run(client, GeoDistance.SLOPPY_ARC, "memory");
}
totalTime = System.currentTimeMillis() - start;
System.err.println("--> Perf (SLOPPY_ARC) " + (totalTime / NUM_RUNS) + "ms");
System.err.println("--> Warming up (PLANE)");
start = System.currentTimeMillis();
for (int i = 0; i < NUM_WARM; i++) {
run(client, GeoDistance.PLANE, "memory");
}
totalTime = System.currentTimeMillis() - start;
System.err.println("--> Warmup (PLANE) " + (totalTime / NUM_WARM) + "ms");
System.err.println("--> Perf (PLANE)");
start = System.currentTimeMillis();
for (int i = 0; i < NUM_RUNS; i++) {
run(client, GeoDistance.PLANE, "memory");
}
totalTime = System.currentTimeMillis() - start;
System.err.println("--> Perf (PLANE) " + (totalTime / NUM_RUNS) + "ms");
node.close();
}
public static void run(Client client, GeoDistance geoDistance, String optimizeBbox) {
client.prepareSearch() // from NY
.setSearchType(SearchType.COUNT)
.setQuery(filteredQuery(matchAllQuery(), geoDistanceFilter("location")
.distance("2km")
.optimizeBbox(optimizeBbox)
.geoDistance(geoDistance)
.point(40.7143528, -74.0059731)))
.execute().actionGet();
}
}
| 0true
|
src_test_java_org_elasticsearch_benchmark_search_geo_GeoDistanceSearchBenchmark.java
|
397 |
public class TransportClusterSearchShardsAction extends TransportMasterNodeReadOperationAction<ClusterSearchShardsRequest, ClusterSearchShardsResponse> {
@Inject
public TransportClusterSearchShardsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
super(settings, transportService, clusterService, threadPool);
}
@Override
protected String transportAction() {
return ClusterSearchShardsAction.NAME;
}
@Override
protected String executor() {
// all in memory work here...
return ThreadPool.Names.SAME;
}
@Override
protected ClusterSearchShardsRequest newRequest() {
return new ClusterSearchShardsRequest();
}
@Override
protected ClusterSearchShardsResponse newResponse() {
return new ClusterSearchShardsResponse();
}
@Override
protected void masterOperation(final ClusterSearchShardsRequest request, final ClusterState state, final ActionListener<ClusterSearchShardsResponse> listener) throws ElasticsearchException {
ClusterState clusterState = clusterService.state();
String[] concreteIndices = clusterState.metaData().concreteIndices(request.indices(), request.indicesOptions());
Map<String, Set<String>> routingMap = clusterState.metaData().resolveSearchRouting(request.routing(), request.indices());
Set<String> nodeIds = newHashSet();
GroupShardsIterator groupShardsIterator = clusterService.operationRouting().searchShards(clusterState, request.indices(), concreteIndices, routingMap, request.preference());
ShardRouting shard;
ClusterSearchShardsGroup[] groupResponses = new ClusterSearchShardsGroup[groupShardsIterator.size()];
int currentGroup = 0;
for (ShardIterator shardIt : groupShardsIterator) {
String index = shardIt.shardId().getIndex();
int shardId = shardIt.shardId().getId();
ShardRouting[] shardRoutings = new ShardRouting[shardIt.size()];
int currentShard = 0;
shardIt.reset();
while ((shard = shardIt.nextOrNull()) != null) {
shardRoutings[currentShard++] = shard;
nodeIds.add(shard.currentNodeId());
}
groupResponses[currentGroup++] = new ClusterSearchShardsGroup(index, shardId, shardRoutings);
}
DiscoveryNode[] nodes = new DiscoveryNode[nodeIds.size()];
int currentNode = 0;
for (String nodeId : nodeIds) {
nodes[currentNode++] = clusterState.getNodes().get(nodeId);
}
listener.onResponse(new ClusterSearchShardsResponse(groupResponses, nodes));
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_admin_cluster_shards_TransportClusterSearchShardsAction.java
|
10 |
private class MessageReceiver
extends SimpleChannelHandler
{
@Override
public void channelOpen( ChannelHandlerContext ctx, ChannelStateEvent e ) throws Exception
{
Channel ctxChannel = ctx.getChannel();
openedChannel( getURI( (InetSocketAddress) ctxChannel.getRemoteAddress() ), ctxChannel );
channels.add( ctxChannel );
}
@Override
public void messageReceived( ChannelHandlerContext ctx, MessageEvent event ) throws Exception
{
if (!bindingDetected)
{
InetSocketAddress local = ((InetSocketAddress)event.getChannel().getLocalAddress());
bindingDetected = true;
listeningAt( getURI( local ) );
}
final Message message = (Message) event.getMessage();
// Fix FROM header since sender cannot know it's correct IP/hostname
InetSocketAddress remote = (InetSocketAddress) ctx.getChannel().getRemoteAddress();
String remoteAddress = remote.getAddress().getHostAddress();
URI fromHeader = URI.create( message.getHeader( Message.FROM ) );
fromHeader = URI.create(fromHeader.getScheme()+"://"+remoteAddress + ":" + fromHeader.getPort());
message.setHeader( Message.FROM, fromHeader.toASCIIString() );
msgLog.debug( "Received:" + message );
receive( message );
}
@Override
public void channelDisconnected( ChannelHandlerContext ctx, ChannelStateEvent e ) throws Exception
{
closedChannel( getURI( (InetSocketAddress) ctx.getChannel().getRemoteAddress() ) );
}
@Override
public void channelClosed( ChannelHandlerContext ctx, ChannelStateEvent e ) throws Exception
{
closedChannel( getURI( (InetSocketAddress) ctx.getChannel().getRemoteAddress() ) );
channels.remove( ctx.getChannel() );
}
@Override
public void exceptionCaught( ChannelHandlerContext ctx, ExceptionEvent e ) throws Exception
{
if ( !(e.getCause() instanceof ConnectException) )
{
msgLog.error( "Receive exception:", e.getCause() );
}
}
}
| 1no label
|
enterprise_cluster_src_main_java_org_neo4j_cluster_com_NetworkReceiver.java
|
32 |
static final class ThenCompose<T,U> extends Completion {
final CompletableFuture<? extends T> src;
final Fun<? super T, CompletableFuture<U>> fn;
final CompletableFuture<U> dst;
final Executor executor;
ThenCompose(CompletableFuture<? extends T> src,
Fun<? super T, CompletableFuture<U>> fn,
CompletableFuture<U> dst,
Executor executor) {
this.src = src; this.fn = fn; this.dst = dst;
this.executor = executor;
}
public final void run() {
final CompletableFuture<? extends T> a;
final Fun<? super T, CompletableFuture<U>> fn;
final CompletableFuture<U> dst;
Object r; T t; Throwable ex; Executor e;
if ((dst = this.dst) != null &&
(fn = this.fn) != null &&
(a = this.src) != null &&
(r = a.result) != null &&
compareAndSet(0, 1)) {
if (r instanceof AltResult) {
ex = ((AltResult)r).ex;
t = null;
}
else {
ex = null;
@SuppressWarnings("unchecked") T tr = (T) r;
t = tr;
}
CompletableFuture<U> c = null;
U u = null;
boolean complete = false;
if (ex == null) {
if ((e = executor) != null)
e.execute(new AsyncCompose<T,U>(t, fn, dst));
else {
try {
if ((c = fn.apply(t)) == null)
ex = new NullPointerException();
} catch (Throwable rex) {
ex = rex;
}
}
}
if (c != null) {
ThenCopy<U> d = null;
Object s;
if ((s = c.result) == null) {
CompletionNode p = new CompletionNode
(d = new ThenCopy<U>(c, dst));
while ((s = c.result) == null) {
if (UNSAFE.compareAndSwapObject
(c, COMPLETIONS, p.next = c.completions, p))
break;
}
}
if (s != null && (d == null || d.compareAndSet(0, 1))) {
complete = true;
if (s instanceof AltResult) {
ex = ((AltResult)s).ex; // no rewrap
u = null;
}
else {
@SuppressWarnings("unchecked") U us = (U) s;
u = us;
}
}
}
if (complete || ex != null)
dst.internalComplete(u, ex);
if (c != null)
c.helpPostComplete();
}
}
private static final long serialVersionUID = 5232453952276885070L;
}
| 0true
|
src_main_java_jsr166e_CompletableFuture.java
|
25 |
return Iterables.transform(Iterables.filter(outEdges, new Predicate<Edge>() {
@Override
public boolean apply(@Nullable Edge edge) {
return !CHECK_VALUE || ((Integer) edge.getProperty("number")).intValue() == value;
}
}), new Function<Edge, Vertex>() {
| 0true
|
titan-test_src_main_java_com_thinkaurelius_titan_TestByteBuffer.java
|
654 |
public class PutIndexTemplateAction extends IndicesAction<PutIndexTemplateRequest, PutIndexTemplateResponse, PutIndexTemplateRequestBuilder> {
public static final PutIndexTemplateAction INSTANCE = new PutIndexTemplateAction();
public static final String NAME = "indices/template/put";
private PutIndexTemplateAction() {
super(NAME);
}
@Override
public PutIndexTemplateResponse newResponse() {
return new PutIndexTemplateResponse();
}
@Override
public PutIndexTemplateRequestBuilder newRequestBuilder(IndicesAdminClient client) {
return new PutIndexTemplateRequestBuilder(client);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_template_put_PutIndexTemplateAction.java
|
1,869 |
@Component("blAdminRequestProcessor")
public class BroadleafAdminRequestProcessor extends AbstractBroadleafWebRequestProcessor {
protected final Log LOG = LogFactory.getLog(getClass());
@Resource(name = "blSiteResolver")
protected BroadleafSiteResolver siteResolver;
@Resource(name = "messageSource")
protected MessageSource messageSource;
@Resource(name = "blLocaleResolver")
protected BroadleafLocaleResolver localeResolver;
@Resource(name = "blAdminTimeZoneResolver")
protected BroadleafTimeZoneResolver broadleafTimeZoneResolver;
@Resource(name = "blCurrencyResolver")
protected BroadleafCurrencyResolver currencyResolver;
@Override
public void process(WebRequest request) throws SiteNotFoundException {
Site site = siteResolver.resolveSite(request);
BroadleafRequestContext brc = new BroadleafRequestContext();
BroadleafRequestContext.setBroadleafRequestContext(brc);
brc.setSite(site);
brc.setWebRequest(request);
brc.setIgnoreSite(site == null);
Locale locale = localeResolver.resolveLocale(request);
brc.setLocale(locale);
brc.setMessageSource(messageSource);
TimeZone timeZone = broadleafTimeZoneResolver.resolveTimeZone(request);
brc.setTimeZone(timeZone);
BroadleafCurrency currency = currencyResolver.resolveCurrency(request);
brc.setBroadleafCurrency(currency);
}
@Override
public void postProcess(WebRequest request) {
ThreadLocalManager.remove();
//temporary workaround for Thymeleaf issue #18 (resolved in version 2.1)
//https://github.com/thymeleaf/thymeleaf-spring3/issues/18
try {
Field currentProcessLocale = TemplateEngine.class.getDeclaredField("currentProcessLocale");
currentProcessLocale.setAccessible(true);
((ThreadLocal) currentProcessLocale.get(null)).remove();
Field currentProcessTemplateEngine = TemplateEngine.class.getDeclaredField("currentProcessTemplateEngine");
currentProcessTemplateEngine.setAccessible(true);
((ThreadLocal) currentProcessTemplateEngine.get(null)).remove();
Field currentProcessTemplateName = TemplateEngine.class.getDeclaredField("currentProcessTemplateName");
currentProcessTemplateName.setAccessible(true);
((ThreadLocal) currentProcessTemplateName.get(null)).remove();
} catch (Throwable e) {
LOG.warn("Unable to remove Thymeleaf threadlocal variables from request thread", e);
}
}
}
| 1no label
|
admin_broadleaf-open-admin-platform_src_main_java_org_broadleafcommerce_openadmin_web_filter_BroadleafAdminRequestProcessor.java
|
31 |
public class HBaseBlueprintsTest extends TitanBlueprintsTest {
@Override
public void beforeSuite() {
try {
HBaseStorageSetup.startHBase();
} catch (IOException e) {
throw new AssertionError(e);
}
}
@AfterClass
public static void stopHBase() {
// Workaround for https://issues.apache.org/jira/browse/HBASE-10312
if (VersionInfo.getVersion().startsWith("0.96"))
HBaseStorageSetup.killIfRunning();
}
@Override
public void extraCleanUp(String uid) throws BackendException {
HBaseStoreManager s = new HBaseStoreManager(HBaseStorageSetup.getHBaseConfiguration());
s.clearStorage();
s.close();
}
@Override
public boolean supportsMultipleGraphs() {
return false;
}
@Override
protected TitanGraph openGraph(String uid) {
return TitanFactory.open(HBaseStorageSetup.getHBaseGraphConfiguration());
}
}
| 0true
|
titan-hbase-parent_titan-hbase-core_src_test_java_com_thinkaurelius_titan_blueprints_HBaseBlueprintsTest.java
|
365 |
public class OGraphDatabasePool extends ODatabasePoolBase<OGraphDatabase> {
private static OGraphDatabasePool globalInstance = new OGraphDatabasePool();
public OGraphDatabasePool() {
super();
}
public OGraphDatabasePool(final String iURL, final String iUserName, final String iUserPassword) {
super(iURL, iUserName, iUserPassword);
}
public static OGraphDatabasePool global() {
globalInstance.setup();
return globalInstance;
}
@Override
protected OGraphDatabase createResource(Object owner, String iDatabaseName, Object... iAdditionalArgs) {
return new OGraphDatabasePooled((OGraphDatabasePool) owner, iDatabaseName, (String) iAdditionalArgs[0],
(String) iAdditionalArgs[1]);
}
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_db_graph_OGraphDatabasePool.java
|
883 |
threadPool.executor(ThreadPool.Names.SEARCH).execute(new Runnable() {
@Override
public void run() {
Tuple<String, Long>[] context1 = scrollId.getContext();
for (int i = 0; i < context1.length; i++) {
Tuple<String, Long> target = context1[i];
DiscoveryNode node = nodes.get(target.v1());
if (node != null && nodes.localNodeId().equals(node.id())) {
executePhase(i, node, target.v2());
}
}
}
});
| 0true
|
src_main_java_org_elasticsearch_action_search_type_TransportSearchScrollQueryAndFetchAction.java
|
3,225 |
final class RemoteProvisionTask<K, V>
implements Runnable {
private final AbstractBaseReplicatedRecordStore<K, V> replicatedRecordStore;
private final OperationService operationService;
private final Address callerAddress;
private final int chunkSize;
private ReplicatedRecord[] recordCache;
private int recordCachePos;
RemoteProvisionTask(AbstractBaseReplicatedRecordStore<K, V> replicatedRecordStore, NodeEngine nodeEngine,
Address callerAddress, int chunkSize) {
this.replicatedRecordStore = replicatedRecordStore;
this.operationService = nodeEngine.getOperationService();
this.callerAddress = callerAddress;
this.chunkSize = chunkSize;
}
@Override
public void run() {
recordCache = new ReplicatedRecord[chunkSize];
List<ReplicatedRecord<K, V>> replicatedRecords = new ArrayList<ReplicatedRecord<K, V>>(
replicatedRecordStore.storage.values());
for (int i = 0; i < replicatedRecords.size(); i++) {
ReplicatedRecord<K, V> replicatedRecord = replicatedRecords.get(i);
processReplicatedRecord(replicatedRecord, i == replicatedRecords.size() - 1);
}
}
private void processReplicatedRecord(ReplicatedRecord<K, V> replicatedRecord, boolean finalRecord) {
Object marshalledKey = replicatedRecordStore.marshallKey(replicatedRecord.getKey());
synchronized (replicatedRecordStore.getMutex(marshalledKey)) {
pushReplicatedRecord(replicatedRecord, finalRecord);
}
}
private void pushReplicatedRecord(ReplicatedRecord<K, V> replicatedRecord, boolean finalRecord) {
if (recordCachePos == chunkSize) {
sendChunk(finalRecord);
}
int hash = replicatedRecord.getLatestUpdateHash();
Object key = replicatedRecordStore.unmarshallKey(replicatedRecord.getKey());
Object value = replicatedRecordStore.unmarshallValue(replicatedRecord.getValue());
VectorClock vectorClock = VectorClock.copyVector(replicatedRecord.getVectorClock());
long ttlMillis = replicatedRecord.getTtlMillis();
recordCache[recordCachePos++] = new ReplicatedRecord(key, value, vectorClock, hash, ttlMillis);
if (finalRecord) {
sendChunk(finalRecord);
}
}
private void sendChunk(boolean finalChunk) {
if (recordCachePos > 0) {
String name = replicatedRecordStore.getName();
Member localMember = replicatedRecordStore.localMember;
Operation operation = new ReplicatedMapInitChunkOperation(name, localMember, recordCache, recordCachePos, finalChunk);
operationService.send(operation, callerAddress);
// Reset chunk cache and pos
recordCache = new ReplicatedRecord[chunkSize];
recordCachePos = 0;
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_replicatedmap_record_RemoteProvisionTask.java
|
5,326 |
public static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
@Override
public UnmappedTerms readResult(StreamInput in) throws IOException {
UnmappedTerms buckets = new UnmappedTerms();
buckets.readFrom(in);
return buckets;
}
};
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_bucket_terms_UnmappedTerms.java
|
145 |
@Test
public class LongSerializerTest {
private static final int FIELD_SIZE = 8;
private static final Long OBJECT = 999999999999999999L;
private OLongSerializer longSerializer;
byte[] stream = new byte[FIELD_SIZE];
@BeforeClass
public void beforeClass() {
longSerializer = new OLongSerializer();
}
public void testFieldSize() {
Assert.assertEquals(longSerializer.getObjectSize(null), FIELD_SIZE);
}
public void testSerialize() {
longSerializer.serialize(OBJECT, stream, 0);
Assert.assertEquals(longSerializer.deserialize(stream, 0), OBJECT);
}
public void testSerializeNative() {
longSerializer.serializeNative(OBJECT, stream, 0);
Assert.assertEquals(longSerializer.deserializeNative(stream, 0), OBJECT);
}
public void testNativeDirectMemoryCompatibility() {
longSerializer.serializeNative(OBJECT, stream, 0);
ODirectMemoryPointer pointer = new ODirectMemoryPointer(stream);
try {
Assert.assertEquals(longSerializer.deserializeFromDirectMemory(pointer, 0), OBJECT);
} finally {
pointer.free();
}
}
}
| 0true
|
commons_src_test_java_com_orientechnologies_common_serialization_types_LongSerializerTest.java
|
449 |
public class ClusterStatsRequestBuilder extends NodesOperationRequestBuilder<ClusterStatsRequest, ClusterStatsResponse, ClusterStatsRequestBuilder> {
public ClusterStatsRequestBuilder(ClusterAdminClient clusterClient) {
super((InternalClusterAdminClient) clusterClient, new ClusterStatsRequest());
}
@Override
protected void doExecute(ActionListener<ClusterStatsResponse> listener) {
((ClusterAdminClient) client).clusterStats(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_stats_ClusterStatsRequestBuilder.java
|
5,865 |
public class SourceLookup implements Map {
private AtomicReader reader;
private int docId = -1;
private BytesReference sourceAsBytes;
private Map<String, Object> source;
private XContentType sourceContentType;
public Map<String, Object> source() {
return source;
}
public XContentType sourceContentType() {
return sourceContentType;
}
private Map<String, Object> loadSourceIfNeeded() {
if (source != null) {
return source;
}
if (sourceAsBytes != null) {
Tuple<XContentType, Map<String, Object>> tuple = sourceAsMapAndType(sourceAsBytes);
sourceContentType = tuple.v1();
source = tuple.v2();
return source;
}
try {
JustSourceFieldsVisitor sourceFieldVisitor = new JustSourceFieldsVisitor();
reader.document(docId, sourceFieldVisitor);
BytesReference source = sourceFieldVisitor.source();
if (source == null) {
this.source = ImmutableMap.of();
this.sourceContentType = null;
} else {
Tuple<XContentType, Map<String, Object>> tuple = sourceAsMapAndType(source);
this.sourceContentType = tuple.v1();
this.source = tuple.v2();
}
} catch (Exception e) {
throw new ElasticsearchParseException("failed to parse / load source", e);
}
return this.source;
}
public static Tuple<XContentType, Map<String, Object>> sourceAsMapAndType(BytesReference source) throws ElasticsearchParseException {
return XContentHelper.convertToMap(source, false);
}
public static Map<String, Object> sourceAsMap(BytesReference source) throws ElasticsearchParseException {
return sourceAsMapAndType(source).v2();
}
public static Tuple<XContentType, Map<String, Object>> sourceAsMapAndType(byte[] bytes, int offset, int length) throws ElasticsearchParseException {
return XContentHelper.convertToMap(bytes, offset, length, false);
}
public static Map<String, Object> sourceAsMap(byte[] bytes, int offset, int length) throws ElasticsearchParseException {
return sourceAsMapAndType(bytes, offset, length).v2();
}
public void setNextReader(AtomicReaderContext context) {
if (this.reader == context.reader()) { // if we are called with the same reader, don't invalidate source
return;
}
this.reader = context.reader();
this.source = null;
this.sourceAsBytes = null;
this.docId = -1;
}
public void setNextDocId(int docId) {
if (this.docId == docId) { // if we are called with the same docId, don't invalidate source
return;
}
this.docId = docId;
this.sourceAsBytes = null;
this.source = null;
}
public void setNextSource(BytesReference source) {
this.sourceAsBytes = source;
}
public void setNextSource(Map<String, Object> source) {
this.source = source;
}
/**
* Internal source representation, might be compressed....
*/
public BytesReference internalSourceRef() {
return sourceAsBytes;
}
/**
* Returns the values associated with the path. Those are "low" level values, and it can
* handle path expression where an array/list is navigated within.
*/
public List<Object> extractRawValues(String path) {
return XContentMapValues.extractRawValues(path, loadSourceIfNeeded());
}
public Object filter(String[] includes, String[] excludes) {
return XContentMapValues.filter(loadSourceIfNeeded(), includes, excludes);
}
public Object extractValue(String path) {
return XContentMapValues.extractValue(path, loadSourceIfNeeded());
}
@Override
public Object get(Object key) {
return loadSourceIfNeeded().get(key);
}
@Override
public int size() {
return loadSourceIfNeeded().size();
}
@Override
public boolean isEmpty() {
return loadSourceIfNeeded().isEmpty();
}
@Override
public boolean containsKey(Object key) {
return loadSourceIfNeeded().containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return loadSourceIfNeeded().containsValue(value);
}
@Override
public Set keySet() {
return loadSourceIfNeeded().keySet();
}
@Override
public Collection values() {
return loadSourceIfNeeded().values();
}
@Override
public Set entrySet() {
return loadSourceIfNeeded().entrySet();
}
@Override
public Object put(Object key, Object value) {
throw new UnsupportedOperationException();
}
@Override
public Object remove(Object key) {
throw new UnsupportedOperationException();
}
@Override
public void putAll(Map m) {
throw new UnsupportedOperationException();
}
@Override
public void clear() {
throw new UnsupportedOperationException();
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_lookup_SourceLookup.java
|
299 |
new Thread() {
public void run() {
assertTrue(l.isLocked());
assertFalse(l.isLockedByCurrentThread());
assertEquals(1, l.getLockCount());
assertTrue(l.getRemainingLeaseTime() > 1000 * 30);
latch.countDown();
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_lock_ClientLockTest.java
|
5,368 |
public class InternalStats extends MetricsAggregation.MultiValue implements Stats {
public final static Type TYPE = new Type("stats");
public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() {
@Override
public InternalStats readResult(StreamInput in) throws IOException {
InternalStats result = new InternalStats();
result.readFrom(in);
return result;
}
};
public static void registerStreams() {
AggregationStreams.registerStream(STREAM, TYPE.stream());
}
enum Metrics {
count, sum, min, max, avg;
public static Metrics resolve(String name) {
return Metrics.valueOf(name);
}
}
protected long count;
protected double min;
protected double max;
protected double sum;
protected InternalStats() {} // for serialization
public InternalStats(String name, long count, double sum, double min, double max) {
super(name);
this.count = count;
this.sum = sum;
this.min = min;
this.max = max;
}
@Override
public long getCount() {
return count;
}
@Override
public double getMin() {
return min;
}
@Override
public double getMax() {
return max;
}
@Override
public double getAvg() {
return sum / count;
}
@Override
public double getSum() {
return sum;
}
@Override
public Type type() {
return TYPE;
}
@Override
public double value(String name) {
Metrics metrics = Metrics.valueOf(name);
switch (metrics) {
case min: return this.min;
case max: return this.max;
case avg: return this.getAvg();
case count: return this.count;
case sum: return this.sum;
default:
throw new IllegalArgumentException("Unknown value [" + name + "] in common stats aggregation");
}
}
@Override
public InternalStats reduce(ReduceContext reduceContext) {
List<InternalAggregation> aggregations = reduceContext.aggregations();
if (aggregations.size() == 1) {
return (InternalStats) aggregations.get(0);
}
InternalStats reduced = null;
for (InternalAggregation aggregation : aggregations) {
if (reduced == null) {
if (((InternalStats) aggregation).count != 0) {
reduced = (InternalStats) aggregation;
}
} else {
if (((InternalStats) aggregation).count != 0) {
reduced.count += ((InternalStats) aggregation).count;
reduced.min = Math.min(reduced.min, ((InternalStats) aggregation).min);
reduced.max = Math.max(reduced.max, ((InternalStats) aggregation).max);
reduced.sum += ((InternalStats) aggregation).sum;
mergeOtherStats(reduced, aggregation);
}
}
}
if (reduced != null) {
return reduced;
}
return (InternalStats) aggregations.get(0);
}
protected void mergeOtherStats(InternalStats to, InternalAggregation from) {
}
@Override
public void readFrom(StreamInput in) throws IOException {
name = in.readString();
valueFormatter = ValueFormatterStreams.readOptional(in);
count = in.readVLong();
min = in.readDouble();
max = in.readDouble();
sum = in.readDouble();
readOtherStatsFrom(in);
}
public void readOtherStatsFrom(StreamInput in) throws IOException {
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
ValueFormatterStreams.writeOptional(valueFormatter, out);
out.writeVLong(count);
out.writeDouble(min);
out.writeDouble(max);
out.writeDouble(sum);
writeOtherStatsTo(out);
}
protected void writeOtherStatsTo(StreamOutput out) throws IOException {
}
static class Fields {
public static final XContentBuilderString COUNT = new XContentBuilderString("count");
public static final XContentBuilderString MIN = new XContentBuilderString("min");
public static final XContentBuilderString MIN_AS_STRING = new XContentBuilderString("min_as_string");
public static final XContentBuilderString MAX = new XContentBuilderString("max");
public static final XContentBuilderString MAX_AS_STRING = new XContentBuilderString("max_as_string");
public static final XContentBuilderString AVG = new XContentBuilderString("avg");
public static final XContentBuilderString AVG_AS_STRING = new XContentBuilderString("avg_as_string");
public static final XContentBuilderString SUM = new XContentBuilderString("sum");
public static final XContentBuilderString SUM_AS_STRING = new XContentBuilderString("sum_as_string");
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(name);
builder.field(Fields.COUNT, count);
builder.field(Fields.MIN, count != 0 ? min : null);
builder.field(Fields.MAX, count != 0 ? max : null);
builder.field(Fields.AVG, count != 0 ? getAvg() : null);
builder.field(Fields.SUM, count != 0 ? sum : null);
if (count != 0 && valueFormatter != null) {
builder.field(Fields.MIN_AS_STRING, valueFormatter.format(min));
builder.field(Fields.MAX_AS_STRING, valueFormatter.format(max));
builder.field(Fields.AVG_AS_STRING, valueFormatter.format(getAvg()));
builder.field(Fields.SUM_AS_STRING, valueFormatter.format(sum));
}
otherStatsToXCotent(builder, params);
builder.endObject();
return builder;
}
protected XContentBuilder otherStatsToXCotent(XContentBuilder builder, Params params) throws IOException {
return builder;
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_metrics_stats_InternalStats.java
|
774 |
public class CollectionReserveRemoveOperation extends CollectionOperation {
String transactionId;
private Data value;
private long reservedItemId = -1;
public CollectionReserveRemoveOperation() {
}
public CollectionReserveRemoveOperation(String name, long reservedItemId, Data value, String transactionId) {
super(name);
this.reservedItemId = reservedItemId;
this.value = value;
this.transactionId = transactionId;
}
@Override
public int getId() {
return CollectionDataSerializerHook.COLLECTION_RESERVE_REMOVE;
}
@Override
public void beforeRun() throws Exception {
}
@Override
public void run() throws Exception {
response = getOrCreateContainer().reserveRemove(reservedItemId, value, transactionId);
}
@Override
public void afterRun() throws Exception {
}
@Override
protected void writeInternal(ObjectDataOutput out) throws IOException {
super.writeInternal(out);
out.writeLong(reservedItemId);
value.writeData(out);
out.writeUTF(transactionId);
}
@Override
protected void readInternal(ObjectDataInput in) throws IOException {
super.readInternal(in);
reservedItemId = in.readLong();
value = new Data();
value.readData(in);
transactionId = in.readUTF();
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_txn_CollectionReserveRemoveOperation.java
|
1,093 |
public class RecyclerBenchmark {
private static final long NUM_RECYCLES = 5000000L;
private static final Random RANDOM = new Random(0);
private static long bench(final Recycler<?> recycler, long numRecycles, int numThreads) throws InterruptedException {
final AtomicLong recycles = new AtomicLong(numRecycles);
final CountDownLatch latch = new CountDownLatch(1);
final Thread[] threads = new Thread[numThreads];
for (int i = 0; i < numThreads; ++i){
// Thread ids happen to be generated sequentially, so we also generate random threads so that distribution of IDs
// is not perfect for the concurrent recycler
for (int j = RANDOM.nextInt(5); j >= 0; --j) {
new Thread();
}
threads[i] = new Thread() {
@Override
public void run() {
try {
latch.await();
} catch (InterruptedException e) {
return;
}
while (recycles.getAndDecrement() > 0) {
final Recycler.V<?> v = recycler.obtain();
v.release();
}
}
};
}
for (Thread thread : threads) {
thread.start();
}
final long start = System.nanoTime();
latch.countDown();
for (Thread thread : threads) {
thread.join();
}
return System.nanoTime() - start;
}
public static void main(String[] args) throws InterruptedException {
final int limit = 100;
final Recycler.C<Object> c = new Recycler.C<Object>() {
@Override
public Object newInstance(int sizing) {
return new Object();
}
@Override
public void clear(Object value) {}
};
final ImmutableMap<String, Recycler<Object>> recyclers = ImmutableMap.<String, Recycler<Object>>builder()
.put("none", none(c))
.put("concurrent-queue", concurrentDeque(c, limit))
.put("thread-local", threadLocal(dequeFactory(c, limit)))
.put("soft-thread-local", threadLocal(softFactory(dequeFactory(c, limit))))
.put("locked", locked(deque(c, limit)))
.put("concurrent", concurrent(dequeFactory(c, limit), Runtime.getRuntime().availableProcessors()))
.put("soft-concurrent", concurrent(softFactory(dequeFactory(c, limit)), Runtime.getRuntime().availableProcessors())).build();
// warmup
final long start = System.nanoTime();
while (System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10)) {
for (Recycler<?> recycler : recyclers.values()) {
bench(recycler, NUM_RECYCLES, 2);
}
}
// run
for (int numThreads = 1; numThreads <= 4 * Runtime.getRuntime().availableProcessors(); numThreads *= 2) {
System.out.println("## " + numThreads + " threads\n");
System.gc();
Thread.sleep(1000);
for (Recycler<?> recycler : recyclers.values()) {
bench(recycler, NUM_RECYCLES, numThreads);
}
for (int i = 0; i < 5; ++i) {
for (Map.Entry<String, Recycler<Object>> entry : recyclers.entrySet()) {
System.out.println(entry.getKey() + "\t" + TimeUnit.NANOSECONDS.toMillis(bench(entry.getValue(), NUM_RECYCLES, numThreads)));
}
System.out.println();
}
}
}
}
| 0true
|
src_test_java_org_elasticsearch_benchmark_common_recycler_RecyclerBenchmark.java
|
131 |
class InitializerProposal extends CorrectionProposal {
private final class InitializerValueProposal
implements ICompletionProposal, ICompletionProposalExtension2 {
private final String text;
private final Image image;
private final int offset;
private InitializerValueProposal(int offset, String text, Image image) {
this.offset = offset;
this.text = text;
this.image = image;
}
protected IRegion getCurrentRegion(IDocument document)
throws BadLocationException {
int start = offset;
int length = 0;
for (int i=offset;
i<document.getLength();
i++) {
char ch = document.getChar(i);
if (Character.isWhitespace(ch) ||
ch==';'||ch==','||ch==')') {
break;
}
length++;
}
return new Region(start, length);
}
@Override
public Image getImage() {
return image;
}
@Override
public Point getSelection(IDocument document) {
return new Point(offset + text.length(), 0);
}
public void apply(IDocument document) {
try {
IRegion region = getCurrentRegion(document);
document.replace(region.getOffset(),
region.getLength(), text);
}
catch (BadLocationException e) {
e.printStackTrace();
}
}
public String getDisplayString() {
return text;
}
public String getAdditionalProposalInfo() {
return null;
}
@Override
public IContextInformation getContextInformation() {
return null;
}
@Override
public void apply(ITextViewer viewer, char trigger,
int stateMask, int offset) {
apply(viewer.getDocument());
}
@Override
public void selected(ITextViewer viewer, boolean smartToggle) {}
@Override
public void unselected(ITextViewer viewer) {}
@Override
public boolean validate(IDocument document, int offset,
DocumentEvent event) {
try {
IRegion region = getCurrentRegion(document);
String prefix = document.get(region.getOffset(),
offset-region.getOffset());
return text.startsWith(prefix);
}
catch (BadLocationException e) {
return false;
}
}
}
private CeylonEditor editor;
private final ProducedType type;
private final Scope scope;
private final Unit unit;
private final int exitPos;
InitializerProposal(String name, Change change,
Declaration declaration, ProducedType type,
Region selection, Image image, int exitPos,
CeylonEditor editor) {
super(name, change, selection, image);
this.exitPos = exitPos;
this.editor = editor;
this.scope = declaration.getScope();
this.unit = declaration.getUnit();
this.type = type;
}
InitializerProposal(String name, Change change,
Scope scope, Unit unit, ProducedType type,
Region selection, Image image, int exitPos,
CeylonEditor editor) {
super(name, change, selection, image);
this.exitPos = exitPos;
this.editor = editor;
this.scope = scope;
this.unit = unit;
this.type = type;
}
@Override
public void apply(IDocument document) {
int lenBefore = document.getLength();
super.apply(document);
int lenAfter = document.getLength();
if (editor==null) {
IEditorPart ed = EditorUtil.getCurrentEditor();
if (ed instanceof CeylonEditor) {
editor = (CeylonEditor) ed;
}
}
if (editor!=null) {
Point point = getSelection(document);
if (point.y>0) {
LinkedModeModel linkedModeModel = new LinkedModeModel();
ICompletionProposal[] proposals = getProposals(document, point);
if (proposals.length>1) {
ProposalPosition linkedPosition =
new ProposalPosition(document, point.x, point.y, 0,
proposals);
try {
LinkedMode.addLinkedPosition(linkedModeModel, linkedPosition);
int adjustedExitPos = exitPos;
if (exitPos>=0 && exitPos>point.x) {
adjustedExitPos += lenAfter-lenBefore;
}
int exitSeq = exitPos>=0 ? 1 : NO_STOP;
LinkedMode.installLinkedMode(editor, document, linkedModeModel,
this, new DeleteBlockingExitPolicy(document),
exitSeq, adjustedExitPos);
}
catch (BadLocationException e) {
e.printStackTrace();
}
}
}
}
}
private ICompletionProposal[] getProposals(IDocument document,
Point point) {
List<ICompletionProposal> proposals =
new ArrayList<ICompletionProposal>();
try {
proposals.add(new InitializerValueProposal(point.x,
document.get(point.x, point.y), null));
}
catch (BadLocationException e1) {
e1.printStackTrace();
}
addValueArgumentProposals(point.x, proposals);
return proposals.toArray(new ICompletionProposal[0]);
}
private void addValueArgumentProposals(int loc,
List<ICompletionProposal> props) {
TypeDeclaration td = type.getDeclaration();
for (DeclarationWithProximity dwp:
getSortedProposedValues(scope, unit)) {
if (dwp.isUnimported()) {
//don't propose unimported stuff b/c adding
//imports drops us out of linked mode and
//because it results in a pause
continue;
}
Declaration d = dwp.getDeclaration();
if (d instanceof Value) {
Value value = (Value) d;
if (d.getUnit().getPackage().getNameAsString()
.equals(Module.LANGUAGE_MODULE_NAME)) {
if (isIgnoredLanguageModuleValue(value)) {
continue;
}
}
ProducedType vt = value.getType();
if (vt!=null && !vt.isNothing() &&
((td instanceof TypeParameter) &&
isInBounds(((TypeParameter)td).getSatisfiedTypes(), vt) ||
vt.isSubtypeOf(type))) {
props.add(new InitializerValueProposal(loc, d.getName(),
getImageForDeclaration(d)));
}
}
if (d instanceof Method) {
if (!d.isAnnotation()) {
Method method = (Method) d;
if (d.getUnit().getPackage().getNameAsString()
.equals(Module.LANGUAGE_MODULE_NAME)) {
if (isIgnoredLanguageModuleMethod(method)) {
continue;
}
}
ProducedType mt = method.getType();
if (mt!=null && !mt.isNothing() &&
((td instanceof TypeParameter) &&
isInBounds(((TypeParameter)td).getSatisfiedTypes(), mt) ||
mt.isSubtypeOf(type))) {
StringBuilder sb = new StringBuilder();
sb.append(d.getName());
appendPositionalArgs(d, unit, sb, false, false);
props.add(new InitializerValueProposal(loc, sb.toString(),
getImageForDeclaration(d)));
}
}
}
if (d instanceof Class) {
Class clazz = (Class) d;
if (!clazz.isAbstract() && !d.isAnnotation()) {
if (d.getUnit().getPackage().getNameAsString()
.equals(Module.LANGUAGE_MODULE_NAME)) {
if (isIgnoredLanguageModuleClass(clazz)) {
continue;
}
}
ProducedType ct = clazz.getType();
if (ct!=null && !ct.isNothing() &&
((td instanceof TypeParameter) &&
isInBounds(((TypeParameter)td).getSatisfiedTypes(), ct) ||
ct.getDeclaration().equals(type.getDeclaration()) ||
ct.isSubtypeOf(type))) {
StringBuilder sb = new StringBuilder();
sb.append(d.getName());
appendPositionalArgs(d, unit, sb, false, false);
props.add(new InitializerValueProposal(loc, sb.toString(),
getImageForDeclaration(d)));
}
}
}
}
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_InitializerProposal.java
|
763 |
shardAction.execute(shardRequest, new ActionListener<MultiGetShardResponse>() {
@Override
public void onResponse(MultiGetShardResponse response) {
for (int i = 0; i < response.locations.size(); i++) {
responses.set(response.locations.get(i), new MultiGetItemResponse(response.responses.get(i), response.failures.get(i)));
}
if (counter.decrementAndGet() == 0) {
finishHim();
}
}
@Override
public void onFailure(Throwable e) {
// create failures for all relevant requests
String message = ExceptionsHelper.detailedMessage(e);
for (int i = 0; i < shardRequest.locations.size(); i++) {
responses.set(shardRequest.locations.get(i), new MultiGetItemResponse(null,
new MultiGetResponse.Failure(shardRequest.index(), shardRequest.types.get(i), shardRequest.ids.get(i), message)));
}
if (counter.decrementAndGet() == 0) {
finishHim();
}
}
private void finishHim() {
listener.onResponse(new MultiGetResponse(responses.toArray(new MultiGetItemResponse[responses.length()])));
}
});
| 0true
|
src_main_java_org_elasticsearch_action_get_TransportMultiGetAction.java
|
1,132 |
public class OSQLMethodAsBoolean extends OAbstractSQLMethod {
public static final String NAME = "asboolean";
public OSQLMethodAsBoolean() {
super(NAME);
}
@Override
public Object execute(OIdentifiable iCurrentRecord, OCommandContext iContext, Object ioResult, Object[] iMethodParams) {
if (ioResult != null) {
if (ioResult instanceof String) {
ioResult = Boolean.valueOf(((String) ioResult).trim());
} else if (ioResult instanceof Number) {
final int bValue = ((Number) ioResult).intValue();
if (bValue == 0) {
ioResult = Boolean.FALSE;
} else if (bValue == 1) {
ioResult = Boolean.TRUE;
} else {
// IGNORE OTHER VALUES
ioResult = null;
}
}
}
return ioResult;
}
}
| 1no label
|
core_src_main_java_com_orientechnologies_orient_core_sql_method_misc_OSQLMethodAsBoolean.java
|
603 |
public class GetSettingsResponse extends ActionResponse {
private ImmutableOpenMap<String, Settings> indexToSettings = ImmutableOpenMap.of();
public GetSettingsResponse(ImmutableOpenMap<String, Settings> indexToSettings) {
this.indexToSettings = indexToSettings;
}
GetSettingsResponse() {
}
public ImmutableOpenMap<String, Settings> getIndexToSettings() {
return indexToSettings;
}
public String getSetting(String index, String setting) {
Settings settings = indexToSettings.get(index);
if (setting != null) {
return settings.get(setting);
} else {
return null;
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
ImmutableOpenMap.Builder<String, Settings> builder = ImmutableOpenMap.builder();
for (int i = 0; i < size; i++) {
builder.put(in.readString(), ImmutableSettings.readSettingsFromStream(in));
}
indexToSettings = builder.build();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(indexToSettings.size());
for (ObjectObjectCursor<String, Settings> cursor : indexToSettings) {
out.writeString(cursor.key);
ImmutableSettings.writeSettingsToStream(cursor.value, out);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_settings_get_GetSettingsResponse.java
|
1,619 |
class UpdateTask extends PrioritizedRunnable {
public final String source;
public final ClusterStateUpdateTask updateTask;
public final long addedAt = System.currentTimeMillis();
UpdateTask(String source, Priority priority, ClusterStateUpdateTask updateTask) {
super(priority);
this.source = source;
this.updateTask = updateTask;
}
@Override
public void run() {
if (!lifecycle.started()) {
logger.debug("processing [{}]: ignoring, cluster_service not started", source);
return;
}
logger.debug("processing [{}]: execute", source);
ClusterState previousClusterState = clusterState;
ClusterState newClusterState;
try {
newClusterState = updateTask.execute(previousClusterState);
} catch (Throwable e) {
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder("failed to execute cluster state update, state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n");
sb.append(previousClusterState.nodes().prettyPrint());
sb.append(previousClusterState.routingTable().prettyPrint());
sb.append(previousClusterState.readOnlyRoutingNodes().prettyPrint());
logger.trace(sb.toString(), e);
}
updateTask.onFailure(source, e);
return;
}
if (previousClusterState == newClusterState) {
logger.debug("processing [{}]: no change in cluster_state", source);
if (updateTask instanceof AckedClusterStateUpdateTask) {
//no need to wait for ack if nothing changed, the update can be counted as acknowledged
((AckedClusterStateUpdateTask) updateTask).onAllNodesAcked(null);
}
if (updateTask instanceof ProcessedClusterStateUpdateTask) {
((ProcessedClusterStateUpdateTask) updateTask).clusterStateProcessed(source, previousClusterState, newClusterState);
}
return;
}
try {
Discovery.AckListener ackListener = new NoOpAckListener();
if (newClusterState.nodes().localNodeMaster()) {
// only the master controls the version numbers
Builder builder = ClusterState.builder(newClusterState).version(newClusterState.version() + 1);
if (previousClusterState.routingTable() != newClusterState.routingTable()) {
builder.routingTable(RoutingTable.builder(newClusterState.routingTable()).version(newClusterState.routingTable().version() + 1));
}
if (previousClusterState.metaData() != newClusterState.metaData()) {
builder.metaData(MetaData.builder(newClusterState.metaData()).version(newClusterState.metaData().version() + 1));
}
newClusterState = builder.build();
if (updateTask instanceof AckedClusterStateUpdateTask) {
final AckedClusterStateUpdateTask ackedUpdateTask = (AckedClusterStateUpdateTask) updateTask;
if (ackedUpdateTask.ackTimeout() == null || ackedUpdateTask.ackTimeout().millis() == 0) {
ackedUpdateTask.onAckTimeout();
} else {
try {
ackListener = new AckCountDownListener(ackedUpdateTask, newClusterState.version(), newClusterState.nodes(), threadPool);
} catch (EsRejectedExecutionException ex) {
if (logger.isDebugEnabled()) {
logger.debug("Couldn't schedule timeout thread - node might be shutting down", ex);
}
//timeout straightaway, otherwise we could wait forever as the timeout thread has not started
ackedUpdateTask.onAckTimeout();
}
}
}
} else {
if (previousClusterState.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK) && !newClusterState.blocks().hasGlobalBlock(Discovery.NO_MASTER_BLOCK)) {
// force an update, its a fresh update from the master as we transition from a start of not having a master to having one
// have a fresh instances of routing and metadata to remove the chance that version might be the same
Builder builder = ClusterState.builder(newClusterState);
builder.routingTable(RoutingTable.builder(newClusterState.routingTable()));
builder.metaData(MetaData.builder(newClusterState.metaData()));
newClusterState = builder.build();
logger.debug("got first state from fresh master [{}]", newClusterState.nodes().masterNodeId());
} else if (newClusterState.version() < previousClusterState.version()) {
// we got this cluster state from the master, filter out based on versions (don't call listeners)
logger.debug("got old cluster state [" + newClusterState.version() + "<" + previousClusterState.version() + "] from source [" + source + "], ignoring");
return;
}
}
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder("cluster state updated:\nversion [").append(newClusterState.version()).append("], source [").append(source).append("]\n");
sb.append(newClusterState.nodes().prettyPrint());
sb.append(newClusterState.routingTable().prettyPrint());
sb.append(newClusterState.readOnlyRoutingNodes().prettyPrint());
logger.trace(sb.toString());
} else if (logger.isDebugEnabled()) {
logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), source);
}
ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(source, newClusterState, previousClusterState);
// new cluster state, notify all listeners
final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta();
if (nodesDelta.hasChanges() && logger.isInfoEnabled()) {
String summary = nodesDelta.shortSummary();
if (summary.length() > 0) {
logger.info("{}, reason: {}", summary, source);
}
}
// TODO, do this in parallel (and wait)
for (DiscoveryNode node : nodesDelta.addedNodes()) {
if (!nodeRequiresConnection(node)) {
continue;
}
try {
transportService.connectToNode(node);
} catch (Throwable e) {
// the fault detection will detect it as failed as well
logger.warn("failed to connect to node [" + node + "]", e);
}
}
// if we are the master, publish the new state to all nodes
// we publish here before we send a notification to all the listeners, since if it fails
// we don't want to notify
if (newClusterState.nodes().localNodeMaster()) {
logger.debug("publishing cluster state version {}", newClusterState.version());
discoveryService.publish(newClusterState, ackListener);
}
// update the current cluster state
clusterState = newClusterState;
logger.debug("set local cluster state to version {}", newClusterState.version());
for (ClusterStateListener listener : priorityClusterStateListeners) {
listener.clusterChanged(clusterChangedEvent);
}
for (ClusterStateListener listener : clusterStateListeners) {
listener.clusterChanged(clusterChangedEvent);
}
for (ClusterStateListener listener : lastClusterStateListeners) {
listener.clusterChanged(clusterChangedEvent);
}
if (!nodesDelta.removedNodes().isEmpty()) {
threadPool.generic().execute(new Runnable() {
@Override
public void run() {
for (DiscoveryNode node : nodesDelta.removedNodes()) {
transportService.disconnectFromNode(node);
}
}
});
}
//manual ack only from the master at the end of the publish
if (newClusterState.nodes().localNodeMaster()) {
try {
ackListener.onNodeAck(localNode(), null);
} catch (Throwable t) {
logger.debug("error while processing ack for master node [{}]", t, newClusterState.nodes().localNode());
}
}
if (updateTask instanceof ProcessedClusterStateUpdateTask) {
((ProcessedClusterStateUpdateTask) updateTask).clusterStateProcessed(source, previousClusterState, newClusterState);
}
logger.debug("processing [{}]: done applying updated cluster_state (version: {})", source, newClusterState.version());
} catch (Throwable t) {
StringBuilder sb = new StringBuilder("failed to apply updated cluster state:\nversion [").append(newClusterState.version()).append("], source [").append(source).append("]\n");
sb.append(newClusterState.nodes().prettyPrint());
sb.append(newClusterState.routingTable().prettyPrint());
sb.append(newClusterState.readOnlyRoutingNodes().prettyPrint());
logger.warn(sb.toString(), t);
// TODO: do we want to call updateTask.onFailure here?
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_cluster_service_InternalClusterService.java
|
25 |
public class DataDTOToMVELTranslatorTest extends TestCase {
private OrderItemFieldServiceImpl orderItemFieldService;
private CustomerFieldServiceImpl customerFieldService;
private OrderFieldServiceImpl orderFieldService;
private FulfillmentGroupFieldServiceImpl fulfillmentGroupFieldService;
@Override
protected void setUp() {
orderItemFieldService = new OrderItemFieldServiceImpl();
orderItemFieldService.init();
customerFieldService = new CustomerFieldServiceImpl();
customerFieldService.init();
orderFieldService = new OrderFieldServiceImpl();
orderFieldService.init();
fulfillmentGroupFieldService = new FulfillmentGroupFieldServiceImpl();
fulfillmentGroupFieldService.init();
}
/**
* Tests the creation of an MVEL expression from a DataDTO
* @throws MVELTranslationException
*
* Here's an example of a DataWrapper with a single DataDTO
*
* [{"id":"100",
* "quantity":"1",
* "groupOperator":"AND",
* "groups":[
* {"id":null,
* "quantity":null,
* "groupOperator":null,
* "groups":null,
* "name":"category.name",
* "operator":"IEQUALS",
* "value":"merchandise"}]
* }]
*/
public void testCreateMVEL() throws MVELTranslationException {
DataDTOToMVELTranslator translator = new DataDTOToMVELTranslator();
ExpressionDTO expressionDTO = new ExpressionDTO();
expressionDTO.setName("category.name");
expressionDTO.setOperator(BLCOperator.IEQUALS.name());
expressionDTO.setValue("merchandise");
String translated = translator.createMVEL("discreteOrderItem", expressionDTO, orderItemFieldService);
String mvel = "MvelHelper.toUpperCase(discreteOrderItem.?category.?name)==MvelHelper.toUpperCase(\"merchandise\")";
assert(mvel.equals(translated));
}
/**
* Tests the creation of a Customer Qualification MVEL expression from a DataDTO
* @throws MVELTranslationException
*
* [{"id":null,
* "quantity":null,
* "groupOperator":"AND",
* "groups":[
* {"id":null,
* "quantity":null,
* "groupOperator":null,
* "groups":null,
* "name":"emailAddress",
* "operator":"NOT_EQUAL_FIELD",
* "value":"username"},
* {"id":null,
* "quantity":null,
* "groupOperator":null,
* "groups":null,
* "name":"deactivated",
* "operator":"EQUALS",
* "value":"true"}]
* }]
*/
public void testCustomerQualificationMVEL() throws MVELTranslationException {
DataDTOToMVELTranslator translator = new DataDTOToMVELTranslator();
DataDTO dataDTO = new DataDTO();
dataDTO.setGroupOperator(BLCOperator.AND.name());
//not currently supported
// ExpressionDTO e1 = new ExpressionDTO();
// e1.setName("emailAddress");
// e1.setOperator(BLCOperator.NOT_EQUAL_FIELD.name());
// e1.setValue("username");
ExpressionDTO e2 = new ExpressionDTO();
e2.setName("deactivated");
e2.setOperator(BLCOperator.EQUALS.name());
e2.setValue("true");
//dataDTO.getGroups().add(e1);
dataDTO.getGroups().add(e2);
String translated = translator.createMVEL("customer", dataDTO, customerFieldService);
String mvel = "customer.?deactivated==true";
assert (mvel.equals(translated));
}
/**
* Tests the creation of an Order Qualification MVEL expression from a DataDTO
* @throws MVELTranslationException
*
* [{"id":null,
* "quantity":null,
* "groupOperator":"AND",
* "groups":[
* {"id":null,
* "quantity":null,
* "groupOperator":null,
* "groups":null,
* "name":"subTotal",
* "operator":"GREATER_OR_EQUAL",
* "value":"100"},
* {"id":null,
* "quantity":null,
* "groupOperator":"OR",
* "groups":[
* {"id":null,
* "quantity":null,
* "groupOperator":null,
* "groups":null,
* "name":"currency.defaultFlag",
* "operator":"EQUALS",
* "value":"true"},
* {"id":null,
* "quantity":null,
* "groupOperator":"null",
* "groups":null,
* "name":"locale.localeCode",
* "operator":"EQUALS",
* "value":"my"}]
* }]
* }]
*/
public void testOrderQualificationMVEL() throws MVELTranslationException {
DataDTOToMVELTranslator translator = new DataDTOToMVELTranslator();
DataDTO dataDTO = new DataDTO();
dataDTO.setGroupOperator(BLCOperator.AND.name());
ExpressionDTO expressionDTO = new ExpressionDTO();
expressionDTO.setName("subTotal");
expressionDTO.setOperator(BLCOperator.GREATER_OR_EQUAL.name());
expressionDTO.setValue("100");
dataDTO.getGroups().add(expressionDTO);
DataDTO d1 = new DataDTO();
d1.setGroupOperator(BLCOperator.OR.name());
ExpressionDTO e1 = new ExpressionDTO();
e1.setName("currency.defaultFlag");
e1.setOperator(BLCOperator.EQUALS.name());
e1.setValue("true");
ExpressionDTO e2 = new ExpressionDTO();
e2.setName("locale.localeCode");
e2.setOperator(BLCOperator.EQUALS.name());
e2.setValue("my");
d1.getGroups().add(e1);
d1.getGroups().add(e2);
dataDTO.getGroups().add(d1);
String translated = translator.createMVEL("order", dataDTO, orderFieldService);
String mvel = "order.?subTotal.getAmount()>=100&&(order.?currency.?defaultFlag==true||order.?locale.?localeCode==\"my\")";
assert (mvel.equals(translated));
}
/**
* Tests the creation of an Item Qualification MVEL expression from a DataDTO
* @throws MVELTranslationException
*
* [{"id":100,
* "quantity":1,
* "groupOperator":"AND",
* "groups":[
* {"id":null,
* "quantity":null,
* "groupOperator":null,
* "groups":null,
* "name":"category.name",
* "operator":"EQUALS",
* "value":"test category"
* }]
* },
* {"id":"200",
* "quantity":2,
* "groupOperator":"NOT",
* "groups":[
* {"id":null,
* "quantity":null,
* "groupOperator":null,
* "groups":null,
* "name":"product.manufacturer",
* "operator":"EQUALS",
* "value":"test manufacturer"},
* {"id":null,
* "quantity":null,
* "groupOperator":null,
* "groups":null,
* "name":"product.model",
* "operator":"EQUALS",
* "value":"test model"
* }]
* }]
*/
public void testItemQualificationMVEL() throws MVELTranslationException {
DataDTOToMVELTranslator translator = new DataDTOToMVELTranslator();
DataDTO d1 = new DataDTO();
d1.setQuantity(1);
d1.setGroupOperator(BLCOperator.AND.name());
ExpressionDTO d1e1 = new ExpressionDTO();
d1e1.setName("category.name");
d1e1.setOperator(BLCOperator.EQUALS.name());
d1e1.setValue("test category");
d1.getGroups().add(d1e1);
String d1Translated = translator.createMVEL("discreteOrderItem", d1, orderItemFieldService);
String d1Mvel = "discreteOrderItem.?category.?name==\"test category\"";
assert(d1Mvel.equals(d1Translated));
DataDTO d2 = new DataDTO();
d2.setQuantity(2);
d2.setGroupOperator(BLCOperator.NOT.name());
ExpressionDTO d2e1 = new ExpressionDTO();
d2e1.setName("product.manufacturer");
d2e1.setOperator(BLCOperator.EQUALS.name());
d2e1.setValue("test manufacturer");
ExpressionDTO d2e2 = new ExpressionDTO();
d2e2.setName("product.model");
d2e2.setOperator(BLCOperator.EQUALS.name());
d2e2.setValue("test model");
d2.getGroups().add(d2e1);
d2.getGroups().add(d2e2);
String d2Translated = translator.createMVEL("discreteOrderItem", d2, orderItemFieldService);
String d2Mvel = "!(discreteOrderItem.?product.?manufacturer==\"test manufacturer\"&&discreteOrderItem.?product.?model==\"test model\")";
assert (d2Mvel.equals(d2Translated));
}
/**
* Tests the creation of a Fulfillment Group Qualification MVEL expression from a DataDTO
* @throws MVELTranslationException
*
* [{"id":null,
* "quantity":null,
* "groupOperator":"AND",
* "groups":[
* {"id":null,
* "quantity":null,
* "groupOperator":null,
* "groups":null,
* "name":"address.state.name",
* "operator":"EQUALS",
* "value":"Texas"},
* {"id":null,
* "quantity":null,
* "groupOperator":null,
* "groups":null,
* "name":"retailShippingPrice",
* "operator":"BETWEEN_INCLUSIVE",
* "start":"99",
* "end":"199"}]
* }]
*/
public void testFulfillmentQualificationMVEL() throws MVELTranslationException {
DataDTOToMVELTranslator translator = new DataDTOToMVELTranslator();
DataDTO dataDTO = new DataDTO();
dataDTO.setGroupOperator(BLCOperator.AND.name());
ExpressionDTO e1 = new ExpressionDTO();
e1.setName("address.state.name");
e1.setOperator(BLCOperator.EQUALS.name());
e1.setValue("Texas");
ExpressionDTO e2 = new ExpressionDTO();
e2.setName("retailFulfillmentPrice");
e2.setOperator(BLCOperator.BETWEEN_INCLUSIVE.name());
e2.setStart("99");
e2.setEnd("199");
dataDTO.getGroups().add(e1);
dataDTO.getGroups().add(e2);
String translated = translator.createMVEL("fulfillmentGroup", dataDTO, fulfillmentGroupFieldService);
String mvel = "fulfillmentGroup.?address.?state.?name==\"Texas\"&&(fulfillmentGroup.?retailFulfillmentPrice.getAmount()>=99&&fulfillmentGroup.?retailFulfillmentPrice.getAmount()<=199)";
assert (mvel.equals(translated));
}
}
| 0true
|
admin_broadleaf-admin-module_src_test_java_org_broadleafcommerce_admin_web_rulebuilder_DataDTOToMVELTranslatorTest.java
|
497 |
public interface Catalog extends Serializable {
Long getId();
void setId(Long id);
String getName();
void setName(String name);
List<Site> getSites();
void setSites(List<Site> sites);
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_site_domain_Catalog.java
|
74 |
public abstract class CallableClientRequest extends ClientRequest implements Callable {
@Override
final void process() throws Exception {
ClientEndpoint endpoint = getEndpoint();
try {
Object result = call();
endpoint.sendResponse(result, getCallId());
} catch (Exception e) {
clientEngine.getLogger(getClass()).warning(e);
endpoint.sendResponse(e, getCallId());
}
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_client_CallableClientRequest.java
|
387 |
new Thread() {
public void run() {
if (mm.tryLock(key) == false) {
tryLockFailed.countDown();
}
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_multimap_ClientMultiMapLockTest.java
|
125 |
public class TestDirectMappedLogBuffer
{
class FileChannelWithChoppyDisk extends StoreFileChannel
{
ByteBuffer buff = ByteBuffer.allocate(1024);
private int chunkSize;
public FileChannelWithChoppyDisk(int writeThisMuchAtATime)
{
super( (FileChannel) null );
this.chunkSize = writeThisMuchAtATime;
}
@Override
public int write( ByteBuffer byteBuffer, long l ) throws IOException
{
int bytesToWrite = chunkSize > (byteBuffer.limit() - byteBuffer.position()) ? byteBuffer.limit() - byteBuffer.position() : chunkSize;
buff.position( (int)l );
// Remember original limit
int originalLimit = byteBuffer.limit();
// Set limit to not be bigger than chunk size
byteBuffer.limit(byteBuffer.position() + bytesToWrite);
// Write
buff.put( byteBuffer );
// Restore limit
byteBuffer.limit(originalLimit);
return bytesToWrite;
}
@Override
public long position() throws IOException
{
return buff.position();
}
@Override
public StoreFileChannel position( long l ) throws IOException
{
buff.position( (int) l );
return this;
}
@Override
public long size() throws IOException
{
return buff.capacity();
}
@Override
public StoreFileChannel truncate( long l ) throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public void force( boolean b ) throws IOException { }
}
@Test
public void shouldHandleDiskThatWritesOnlyTwoBytesAtATime() throws Exception
{
// Given
FileChannelWithChoppyDisk mockChannel = new FileChannelWithChoppyDisk(/* that writes */2/* bytes at a time */);
LogBuffer writeBuffer = new DirectMappedLogBuffer( mockChannel, new Monitors().newMonitor( ByteCounterMonitor.class ) );
// When
writeBuffer.put( new byte[]{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16} );
writeBuffer.writeOut();
// Then
assertThat(mockChannel.buff.position( ), is(16));
}
@Test(expected = IOException.class)
public void shouldFailIfUnableToWriteASingleByte() throws Exception
{
// Given
FileChannelWithChoppyDisk mockChannel = new FileChannelWithChoppyDisk(/* that writes */0/* bytes at a time */);
LogBuffer writeBuffer = new DirectMappedLogBuffer( mockChannel, new Monitors().newMonitor( ByteCounterMonitor.class ) );
// When
writeBuffer.put( new byte[]{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16} );
writeBuffer.writeOut();
// Then expect an IOException
}
@Test
@Ignore("This test demonstrates a way in which DirectMappedLogBuffer can fail. In particular, using DMLB after an" +
"IOException can cause corruption in the underlying file channel. However, it is wrong to use DMLB after" +
"such an error anyway, so this not something requiring fixing.")
public void logBufferWritesContentsTwiceOnFailure() throws Exception
{
/*
* The guard will throw an exception before writing the fifth byte. We will catch that and try to continue
* writing. If that operation leads to writing to position 0 again then this is obviously an error (as we
* will be overwriting the stuff we wrote before the exception) and so we must fail.
*/
final AtomicBoolean broken = new AtomicBoolean( false );
FileSystemGuard guard = new FileSystemGuard()
{
@Override
public void checkOperation( OperationType operationType, File onFile, int bytesWrittenTotal,
int bytesWrittenThisCall, long channelPosition ) throws IOException
{
if ( !broken.get() && bytesWrittenTotal == 4 )
{
broken.set( true );
throw new IOException( "IOException after which this buffer should not be used" );
}
if ( broken.get() && channelPosition == 0 )
{
throw new IOException( "This exception should never happen" );
}
}
};
BreakableFileSystemAbstraction fs = new BreakableFileSystemAbstraction( new EphemeralFileSystemAbstraction(), guard );
DirectMappedLogBuffer buffer = new DirectMappedLogBuffer( fs.create( new File( "log" ) ), new Monitors().newMonitor( ByteCounterMonitor.class ) );
buffer.putInt( 1 ).putInt( 2 ).putInt( 3 );
try
{
buffer.writeOut();
}
catch ( IOException e )
{
e.printStackTrace();
}
buffer.writeOut();
}
@Test
public void testMonitoringBytesWritten() throws Exception
{
Monitors monitors = new Monitors();
ByteCounterMonitor monitor = monitors.newMonitor( ByteCounterMonitor.class );
DirectMappedLogBuffer buffer = new DirectMappedLogBuffer( new FileChannelWithChoppyDisk( 100 ), monitor );
final AtomicLong bytesWritten = new AtomicLong();
monitors.addMonitorListener( new ByteCounterMonitor()
{
@Override
public void bytesWritten( long numberOfBytes )
{
bytesWritten.addAndGet( numberOfBytes );
}
@Override
public void bytesRead( long numberOfBytes )
{
}
} );
buffer.put( (byte) 1 );
assertEquals( 0, bytesWritten.get() );
buffer.force();
assertEquals( 1, bytesWritten.get() );
buffer.putShort( (short) 1 );
assertEquals( 1, bytesWritten.get() );
buffer.force();
assertEquals( 3, bytesWritten.get() );
buffer.putInt( 1 );
assertEquals( 3, bytesWritten.get() );
buffer.force();
assertEquals( 7, bytesWritten.get() );
buffer.putLong( 1 );
assertEquals( 7, bytesWritten.get() );
buffer.force();
assertEquals( 15, bytesWritten.get() );
buffer.putFloat( 1 );
assertEquals( 15, bytesWritten.get() );
buffer.force();
assertEquals( 19, bytesWritten.get() );
buffer.putDouble( 1 );
assertEquals( 19, bytesWritten.get() );
buffer.force();
assertEquals( 27, bytesWritten.get() );
buffer.put( new byte[]{ 1, 2, 3 } );
assertEquals( 27, bytesWritten.get() );
buffer.force();
assertEquals( 30, bytesWritten.get() );
buffer.put( new char[] { '1', '2', '3'} );
assertEquals( 30, bytesWritten.get() );
buffer.force();
assertEquals( 36, bytesWritten.get() );
buffer.force();
assertEquals( 36, bytesWritten.get() );
}
}
| 0true
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_xaframework_TestDirectMappedLogBuffer.java
|
684 |
constructors[COLLECTION_SIZE] = new ConstructorFunction<Integer, IdentifiedDataSerializable>() {
public IdentifiedDataSerializable createNew(Integer arg) {
return new CollectionSizeOperation();
}
};
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_CollectionDataSerializerHook.java
|
201 |
public final class ExtendedMemoryIndex extends MemoryIndex {
public ExtendedMemoryIndex(boolean storeOffsets, long maxReusedBytes) {
super(storeOffsets, maxReusedBytes);
}
}
| 0true
|
src_main_java_org_apache_lucene_index_memory_ExtendedMemoryIndex.java
|
81 |
class ChangeReferenceProposal extends CorrectionProposal
implements ICompletionProposalExtension {
private ChangeReferenceProposal(ProblemLocation problem,
String name, String pkg, TextFileChange change) {
super("Change reference to '" + name + "'" + pkg, change,
new Region(problem.getOffset(), name.length()),
MINOR_CHANGE);
}
static void addChangeReferenceProposal(ProblemLocation problem,
Collection<ICompletionProposal> proposals, IFile file,
String brokenName, DeclarationWithProximity dwp, int dist,
Tree.CompilationUnit cu) {
TextFileChange change =
new TextFileChange("Change Reference", file);
change.setEdit(new MultiTextEdit());
IDocument doc = EditorUtil.getDocument(change);
Declaration dec = dwp.getDeclaration();
String pkg = "";
if (dec.isToplevel() &&
!isImported(dec, cu) &&
isInPackage(cu, dec)) {
String pn = dec.getContainer().getQualifiedNameString();
pkg = " in '" + pn + "'";
if (!pn.isEmpty() &&
!pn.equals(Module.LANGUAGE_MODULE_NAME)) {
OccurrenceLocation ol =
getOccurrenceLocation(cu,
Nodes.findNode(cu, problem.getOffset()),
problem.getOffset());
if (ol!=IMPORT) {
List<InsertEdit> ies =
importEdits(cu, singleton(dec),
null, null, doc);
for (InsertEdit ie: ies) {
change.addEdit(ie);
}
}
}
}
change.addEdit(new ReplaceEdit(problem.getOffset(),
brokenName.length(), dwp.getName())); //Note: don't use problem.getLength() because it's wrong from the problem list
proposals.add(new ChangeReferenceProposal(problem,
dwp.getName(), pkg, change));
}
protected static boolean isInPackage(Tree.CompilationUnit cu,
Declaration dec) {
return !dec.getUnit().getPackage()
.equals(cu.getUnit().getPackage());
}
@Override
public void apply(IDocument document, char trigger, int offset) {
apply(document);
}
@Override
public boolean isValidFor(IDocument document, int offset) {
return true;
}
@Override
public char[] getTriggerCharacters() {
return "r".toCharArray();
}
@Override
public int getContextInformationPosition() {
return -1;
}
static void addChangeReferenceProposals(Tree.CompilationUnit cu,
Node node, ProblemLocation problem,
Collection<ICompletionProposal> proposals, IFile file) {
String brokenName = Nodes.getIdentifyingNode(node).getText();
if (brokenName.isEmpty()) return;
for (DeclarationWithProximity dwp:
getProposals(node, node.getScope(), cu).values()) {
if (isUpperCase(dwp.getName().charAt(0))==isUpperCase(brokenName.charAt(0))) {
int dist = getLevenshteinDistance(brokenName, dwp.getName()); //+dwp.getProximity()/3;
//TODO: would it be better to just sort by dist, and
// then select the 3 closest possibilities?
if (dist<=brokenName.length()/3+1) {
addChangeReferenceProposal(problem, proposals, file,
brokenName, dwp, dist, cu);
}
}
}
}
@Override
public StyledString getStyledDisplayString() {
return Highlights.styleProposal(getDisplayString(), true);
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_ChangeReferenceProposal.java
|
4,034 |
public class MultiMatchQuery extends MatchQuery {
private boolean useDisMax = true;
private float tieBreaker;
public void setUseDisMax(boolean useDisMax) {
this.useDisMax = useDisMax;
}
public void setTieBreaker(float tieBreaker) {
this.tieBreaker = tieBreaker;
}
public MultiMatchQuery(QueryParseContext parseContext) {
super(parseContext);
}
private Query parseAndApply(Type type, String fieldName, Object value, String minimumShouldMatch, Float boostValue) throws IOException {
Query query = parse(type, fieldName, value);
if (query instanceof BooleanQuery) {
Queries.applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch);
}
if (boostValue != null && query != null) {
query.setBoost(boostValue);
}
return query;
}
public Query parse(Type type, Map<String, Float> fieldNames, Object value, String minimumShouldMatch) throws IOException {
if (fieldNames.size() == 1) {
Map.Entry<String, Float> fieldBoost = fieldNames.entrySet().iterator().next();
Float boostValue = fieldBoost.getValue();
return parseAndApply(type, fieldBoost.getKey(), value, minimumShouldMatch, boostValue);
}
if (useDisMax) {
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(tieBreaker);
boolean clauseAdded = false;
for (String fieldName : fieldNames.keySet()) {
Float boostValue = fieldNames.get(fieldName);
Query query = parseAndApply(type, fieldName, value, minimumShouldMatch, boostValue);
if (query != null) {
clauseAdded = true;
disMaxQuery.add(query);
}
}
return clauseAdded ? disMaxQuery : null;
} else {
BooleanQuery booleanQuery = new BooleanQuery();
for (String fieldName : fieldNames.keySet()) {
Float boostValue = fieldNames.get(fieldName);
Query query = parseAndApply(type, fieldName, value, minimumShouldMatch, boostValue);
if (query != null) {
booleanQuery.add(query, BooleanClause.Occur.SHOULD);
}
}
return !booleanQuery.clauses().isEmpty() ? booleanQuery : null;
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_search_MultiMatchQuery.java
|
575 |
private static class InitialMembershipListenerImpl implements InitialMembershipListener {
private List<EventObject> events = Collections.synchronizedList(new LinkedList<EventObject>());
public void init(InitialMembershipEvent e) {
events.add(e);
}
public void memberAdded(MembershipEvent e) {
events.add(e);
}
public void memberRemoved(MembershipEvent e) {
events.add(e);
}
public void memberAttributeChanged(MemberAttributeEvent memberAttributeEvent) {
}
public void assertEventCount(int expected) {
assertEquals(expected, events.size());
}
}
| 0true
|
hazelcast_src_test_java_com_hazelcast_cluster_ClusterMembershipTest.java
|
1,210 |
SOFT_THREAD_LOCAL {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
return threadLocal(softFactory(dequeFactory(c, limit)));
}
},
| 0true
|
src_main_java_org_elasticsearch_cache_recycler_CacheRecycler.java
|
651 |
public class CategoryDaoDataProvider {
@DataProvider(name = "basicCategory")
public static Object[][] provideBasicCategory() {
Category category = new CategoryImpl();
category.setName("Yuban");
category.setDescription("Yuban");
category.setId(1001L);
return new Object[][] { { category } };
}
}
| 0true
|
integration_src_test_java_org_broadleafcommerce_core_catalog_CategoryDaoDataProvider.java
|
543 |
public enum HOOK_POSITION {
FIRST, EARLY, REGULAR, LATE, LAST
}
| 0true
|
core_src_main_java_com_orientechnologies_orient_core_hook_ORecordHook.java
|
1,190 |
@edu.umd.cs.findbugs.annotations.SuppressWarnings("SE_BAD_FIELD")
public class MemberLeftException extends ExecutionException implements DataSerializable, RetryableException {
private Member member;
public MemberLeftException() {
}
public MemberLeftException(Member member) {
this.member = member;
}
/**
* Returns the member which left the cluster
* @return member
*/
public Member getMember() {
return member;
}
public String getMessage() {
return member + " has left cluster!";
}
public void writeData(ObjectDataOutput out) throws IOException {
member.writeData(out);
}
public void readData(ObjectDataInput in) throws IOException {
member = new MemberImpl();
member.readData(in);
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_core_MemberLeftException.java
|
643 |
public abstract class CollectionBackupAwareOperation extends CollectionOperation implements BackupAwareOperation {
protected CollectionBackupAwareOperation() {
}
protected CollectionBackupAwareOperation(String name) {
super(name);
}
@Override
public int getSyncBackupCount() {
return getOrCreateContainer().getConfig().getBackupCount();
}
@Override
public int getAsyncBackupCount() {
return getOrCreateContainer().getConfig().getAsyncBackupCount();
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_CollectionBackupAwareOperation.java
|
1,522 |
public class ValueGroupCountMapReduce {
public static final String PROPERTY = Tokens.makeNamespace(ValueGroupCountMapReduce.class) + ".property";
public static final String CLASS = Tokens.makeNamespace(ValueGroupCountMapReduce.class) + ".class";
public static final String TYPE = Tokens.makeNamespace(ValueGroupCountMapReduce.class) + ".type";
public enum Counters {
PROPERTIES_COUNTED
}
public static Configuration createConfiguration(final Class<? extends Element> klass, final String key, final Class<? extends Writable> type) {
final Configuration configuration = new EmptyConfiguration();
configuration.setClass(CLASS, klass, Element.class);
configuration.set(PROPERTY, key);
configuration.setClass(TYPE, type, Writable.class);
return configuration;
}
public static class Map extends Mapper<NullWritable, FaunusVertex, WritableComparable, LongWritable> {
private String property;
private WritableHandler handler;
private boolean isVertex;
// making use of in-map aggregation/combiner
private CounterMap<Object> map;
private int mapSpillOver;
private SafeMapperOutputs outputs;
@Override
public void setup(final Mapper.Context context) throws IOException, InterruptedException {
this.map = new CounterMap<Object>();
this.mapSpillOver = context.getConfiguration().getInt(Tokens.TITAN_HADOOP_PIPELINE_MAP_SPILL_OVER, Tokens.DEFAULT_MAP_SPILL_OVER);
this.property = context.getConfiguration().get(PROPERTY);
this.isVertex = context.getConfiguration().getClass(CLASS, Element.class, Element.class).equals(Vertex.class);
this.handler = new WritableHandler(context.getConfiguration().getClass(TYPE, Text.class, WritableComparable.class));
this.outputs = new SafeMapperOutputs(context);
}
@Override
public void map(final NullWritable key, final FaunusVertex value, final Mapper<NullWritable, FaunusVertex, WritableComparable, LongWritable>.Context context) throws IOException, InterruptedException {
if (this.isVertex) {
if (value.hasPaths()) {
this.map.incr(ElementPicker.getProperty(value, this.property), value.pathCount());
DEFAULT_COMPAT.incrementContextCounter(context, Counters.PROPERTIES_COUNTED, 1L);
}
} else {
for (final Edge e : value.getEdges(Direction.OUT)) {
final StandardFaunusEdge edge = (StandardFaunusEdge) e;
if (edge.hasPaths()) {
this.map.incr(ElementPicker.getProperty(edge, this.property), edge.pathCount());
DEFAULT_COMPAT.incrementContextCounter(context, Counters.PROPERTIES_COUNTED, 1L);
}
}
}
// protected against memory explosion
if (this.map.size() > this.mapSpillOver) {
this.dischargeMap(context);
}
this.outputs.write(Tokens.GRAPH, NullWritable.get(), value);
}
private final LongWritable longWritable = new LongWritable();
public void dischargeMap(final Mapper<NullWritable, FaunusVertex, WritableComparable, LongWritable>.Context context) throws IOException, InterruptedException {
for (final java.util.Map.Entry<Object, Long> entry : this.map.entrySet()) {
this.longWritable.set(entry.getValue());
context.write(this.handler.set(entry.getKey()), this.longWritable);
}
this.map.clear();
}
@Override
public void cleanup(final Mapper<NullWritable, FaunusVertex, WritableComparable, LongWritable>.Context context) throws IOException, InterruptedException {
this.dischargeMap(context);
this.outputs.close();
}
}
public static class Combiner extends Reducer<WritableComparable, LongWritable, WritableComparable, LongWritable> {
private final LongWritable longWritable = new LongWritable();
@Override
public void reduce(final WritableComparable key, final Iterable<LongWritable> values, final Reducer<WritableComparable, LongWritable, WritableComparable, LongWritable>.Context context) throws IOException, InterruptedException {
long totalCount = 0;
for (final LongWritable token : values) {
totalCount = totalCount + token.get();
}
this.longWritable.set(totalCount);
context.write(key, this.longWritable);
}
}
public static class Reduce extends Reducer<WritableComparable, LongWritable, WritableComparable, LongWritable> {
private SafeReducerOutputs outputs;
@Override
public void setup(final Reducer.Context context) throws IOException, InterruptedException {
this.outputs = new SafeReducerOutputs(context);
}
private final LongWritable longWritable = new LongWritable();
@Override
public void reduce(final WritableComparable key, final Iterable<LongWritable> values, final Reducer<WritableComparable, LongWritable, WritableComparable, LongWritable>.Context context) throws IOException, InterruptedException {
long totalCount = 0;
for (final LongWritable token : values) {
totalCount = totalCount + token.get();
}
this.longWritable.set(totalCount);
this.outputs.write(Tokens.SIDEEFFECT, key, this.longWritable);
}
@Override
public void cleanup(final Reducer<WritableComparable, LongWritable, WritableComparable, LongWritable>.Context context) throws IOException, InterruptedException {
this.outputs.close();
}
}
}
| 1no label
|
titan-hadoop-parent_titan-hadoop-core_src_main_java_com_thinkaurelius_titan_hadoop_mapreduce_sideeffect_ValueGroupCountMapReduce.java
|
114 |
public class OLogManager {
private boolean debug = true;
private boolean info = true;
private boolean warn = true;
private boolean error = true;
private Level minimumLevel = Level.SEVERE;
private static final String DEFAULT_LOG = "com.orientechnologies";
private static final OLogManager instance = new OLogManager();
protected OLogManager() {
}
public void setConsoleLevel(final String iLevel) {
setLevel(iLevel, ConsoleHandler.class);
}
public void setFileLevel(final String iLevel) {
setLevel(iLevel, FileHandler.class);
}
public void log(final Object iRequester, final Level iLevel, String iMessage, final Throwable iException,
final Object... iAdditionalArgs) {
if (iMessage != null) {
final Logger log = iRequester != null ? Logger.getLogger(iRequester.getClass().getName()) : Logger.getLogger(DEFAULT_LOG);
if (log == null) {
// USE SYSERR
try {
System.err.println(String.format(iMessage, iAdditionalArgs));
} catch (Exception e) {
OLogManager.instance().warn(this, "Error on formatting message", e);
}
} else if (log.isLoggable(iLevel)) {
// USE THE LOG
try {
final String msg = String.format(iMessage, iAdditionalArgs);
if (iException != null)
log.log(iLevel, msg, iException);
else
log.log(iLevel, msg);
} catch (Exception e) {
OLogManager.instance().warn(this, "Error on formatting message", e);
}
}
}
}
public void debug(final Object iRequester, final String iMessage, final Object... iAdditionalArgs) {
if (isDebugEnabled())
log(iRequester, Level.FINE, iMessage, null, iAdditionalArgs);
}
public void debug(final Object iRequester, final String iMessage, final Throwable iException, final Object... iAdditionalArgs) {
if (isDebugEnabled())
log(iRequester, Level.FINE, iMessage, iException, iAdditionalArgs);
}
public void debug(final Object iRequester, final String iMessage, final Throwable iException,
final Class<? extends OException> iExceptionClass, final Object... iAdditionalArgs) {
debug(iRequester, iMessage, iException, iAdditionalArgs);
if (iExceptionClass != null)
try {
throw iExceptionClass.getConstructor(String.class, Throwable.class).newInstance(iMessage, iException);
} catch (NoSuchMethodException e) {
} catch (IllegalArgumentException e) {
} catch (SecurityException e) {
} catch (InstantiationException e) {
} catch (IllegalAccessException e) {
} catch (InvocationTargetException e) {
}
}
public void info(final Object iRequester, final String iMessage, final Object... iAdditionalArgs) {
if (isInfoEnabled())
log(iRequester, Level.INFO, iMessage, null, iAdditionalArgs);
}
public void info(final Object iRequester, final String iMessage, final Throwable iException, final Object... iAdditionalArgs) {
if (isInfoEnabled())
log(iRequester, Level.INFO, iMessage, iException, iAdditionalArgs);
}
public void warn(final Object iRequester, final String iMessage, final Object... iAdditionalArgs) {
if (isWarnEnabled())
log(iRequester, Level.WARNING, iMessage, null, iAdditionalArgs);
}
public void warn(final Object iRequester, final String iMessage, final Throwable iException, final Object... iAdditionalArgs) {
if (isWarnEnabled())
log(iRequester, Level.WARNING, iMessage, iException, iAdditionalArgs);
}
public void config(final Object iRequester, final String iMessage, final Object... iAdditionalArgs) {
log(iRequester, Level.CONFIG, iMessage, null, iAdditionalArgs);
}
public void error(final Object iRequester, final String iMessage, final Object... iAdditionalArgs) {
log(iRequester, Level.SEVERE, iMessage, null, iAdditionalArgs);
}
public void error(final Object iRequester, final String iMessage, final Throwable iException, final Object... iAdditionalArgs) {
if (isErrorEnabled())
log(iRequester, Level.SEVERE, iMessage, iException, iAdditionalArgs);
}
public void error(final Object iRequester, final String iMessage, final Throwable iException,
final Class<? extends OException> iExceptionClass, final Object... iAdditionalArgs) {
error(iRequester, iMessage, iException, iAdditionalArgs);
final String msg = String.format(iMessage, iAdditionalArgs);
if (iExceptionClass != null)
try {
throw iExceptionClass.getConstructor(String.class, Throwable.class).newInstance(msg, iException);
} catch (NoSuchMethodException e) {
} catch (IllegalArgumentException e) {
} catch (SecurityException e) {
} catch (InstantiationException e) {
} catch (IllegalAccessException e) {
} catch (InvocationTargetException e) {
}
}
public void error(final Object iRequester, final String iMessage, final Class<? extends OException> iExceptionClass) {
error(iRequester, iMessage, (Throwable) null);
try {
throw iExceptionClass.getConstructor(String.class).newInstance(iMessage);
} catch (IllegalArgumentException e) {
} catch (SecurityException e) {
} catch (InstantiationException e) {
} catch (IllegalAccessException e) {
} catch (InvocationTargetException e) {
} catch (NoSuchMethodException e) {
}
}
@SuppressWarnings("unchecked")
public void exception(final String iMessage, final Exception iNestedException, final Class<? extends OException> iExceptionClass,
final Object... iAdditionalArgs) throws OException {
if (iMessage == null)
return;
// FORMAT THE MESSAGE
String msg = String.format(iMessage, iAdditionalArgs);
Constructor<OException> c;
OException exceptionToThrow = null;
try {
if (iNestedException != null) {
c = (Constructor<OException>) iExceptionClass.getConstructor(String.class, Throwable.class);
exceptionToThrow = c.newInstance(msg, iNestedException);
}
} catch (Exception e) {
}
if (exceptionToThrow == null)
try {
c = (Constructor<OException>) iExceptionClass.getConstructor(String.class);
exceptionToThrow = c.newInstance(msg);
} catch (SecurityException e1) {
} catch (NoSuchMethodException e1) {
} catch (IllegalArgumentException e1) {
} catch (InstantiationException e1) {
} catch (IllegalAccessException e1) {
} catch (InvocationTargetException e1) {
}
if (exceptionToThrow != null)
throw exceptionToThrow;
else
throw new IllegalArgumentException("Cannot create the exception of type: " + iExceptionClass);
}
public boolean isWarn() {
return warn;
}
public void setWarnEnabled(boolean warn) {
this.warn = warn;
}
public void setInfoEnabled(boolean info) {
this.info = info;
}
public void setDebugEnabled(boolean debug) {
this.debug = debug;
}
public void setErrorEnabled(boolean error) {
this.error = error;
}
public boolean isLevelEnabled(final Level level) {
if (level.equals(Level.FINER) || level.equals(Level.FINE) || level.equals(Level.FINEST))
return debug;
else if (level.equals(Level.INFO))
return info;
else if (level.equals(Level.WARNING))
return warn;
else if (level.equals(Level.SEVERE))
return error;
return false;
}
public boolean isDebugEnabled() {
return debug;
}
public boolean isInfoEnabled() {
return info;
}
public boolean isWarnEnabled() {
return warn;
}
public boolean isErrorEnabled() {
return error;
}
public static OLogManager instance() {
return instance;
}
public Level setLevel(final String iLevel, final Class<? extends Handler> iHandler) {
final Level level = iLevel != null ? Level.parse(iLevel.toUpperCase(Locale.ENGLISH)) : Level.INFO;
if (level.intValue() < minimumLevel.intValue()) {
// UPDATE MINIMUM LEVEL
minimumLevel = level;
if (level.equals(Level.FINER) || level.equals(Level.FINE) || level.equals(Level.FINEST))
debug = info = warn = error = true;
else if (level.equals(Level.INFO)) {
info = warn = error = true;
debug = false;
} else if (level.equals(Level.WARNING)) {
warn = error = true;
debug = info = false;
} else if (level.equals(Level.SEVERE)) {
error = true;
debug = info = warn = false;
}
}
Logger log = Logger.getLogger(DEFAULT_LOG);
for (Handler h : log.getHandlers()) {
if (h.getClass().isAssignableFrom(iHandler)) {
h.setLevel(level);
break;
}
}
return level;
}
public static void installCustomFormatter() {
try {
// ASSURE TO HAVE THE ORIENT LOG FORMATTER TO THE CONSOLE EVEN IF NO CONFIGURATION FILE IS TAKEN
final Logger log = Logger.getLogger("");
if (log.getHandlers().length == 0) {
// SET DEFAULT LOG FORMATTER
final Handler h = new ConsoleHandler();
h.setFormatter(new OLogFormatter());
log.addHandler(h);
} else {
for (Handler h : log.getHandlers()) {
if (h instanceof ConsoleHandler && !h.getFormatter().getClass().equals(OLogFormatter.class))
h.setFormatter(new OLogFormatter());
}
}
} catch (Exception e) {
System.err.println("Error while installing custom formatter. Logging could be disabled. Cause: " + e.toString());
}
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_log_OLogManager.java
|
503 |
public interface SiteService {
/**
* Find a site by its id.
* @param id
* @return
*/
public Site retrieveSiteById(Long id);
/**
* Find a site by its domain
* @param id
* @return
*/
public Site retrieveSiteByDomainName(String domain);
/**
* Save updates to a site.
* @param id
* @return
*/
public Site save(Site site);
/**
* Returns the default site.
*
* @see {@link SiteDaoImpl}
*
* @param id
* @return
*/
public Site retrieveDefaultSite();
/**
* @return a List of all sites in the system
*/
public List<Site> findAllActiveSites();
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_site_service_SiteService.java
|
268 |
public class GetMemberUuidTask implements Callable<String>, DataSerializable, HazelcastInstanceAware{
private HazelcastInstance node;
public String call() throws Exception {
return node.getCluster().getLocalMember().getUuid();
}
public void writeData(ObjectDataOutput out) throws IOException {
}
public void readData(ObjectDataInput in) throws IOException {
}
@Override
public void setHazelcastInstance(HazelcastInstance hazelcastInstance) {
node = hazelcastInstance;
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_executor_tasks_GetMemberUuidTask.java
|
3,903 |
public class NestedFilterParser implements FilterParser {
public static final String NAME = "nested";
@Inject
public NestedFilterParser() {
}
@Override
public String[] names() {
return new String[]{NAME, Strings.toCamelCase(NAME)};
}
@Override
public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
XContentParser parser = parseContext.parser();
Query query = null;
boolean queryFound = false;
Filter filter = null;
boolean filterFound = false;
float boost = 1.0f;
boolean join = true;
String path = null;
boolean cache = false;
CacheKeyFilter.Key cacheKey = null;
String filterName = null;
// we need a late binding filter so we can inject a parent nested filter inner nested queries
NestedQueryParser.LateBindingParentFilter currentParentFilterContext = NestedQueryParser.parentFilterContext.get();
NestedQueryParser.LateBindingParentFilter usAsParentFilter = new NestedQueryParser.LateBindingParentFilter();
NestedQueryParser.parentFilterContext.set(usAsParentFilter);
try {
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("query".equals(currentFieldName)) {
queryFound = true;
query = parseContext.parseInnerQuery();
} else if ("filter".equals(currentFieldName)) {
filterFound = true;
filter = parseContext.parseInnerFilter();
} else {
throw new QueryParsingException(parseContext.index(), "[nested] filter does not support [" + currentFieldName + "]");
}
} else if (token.isValue()) {
if ("join".equals(currentFieldName)) {
join = parser.booleanValue();
} else if ("path".equals(currentFieldName)) {
path = parser.text();
} else if ("boost".equals(currentFieldName)) {
boost = parser.floatValue();
} else if ("_scope".equals(currentFieldName)) {
throw new QueryParsingException(parseContext.index(), "the [_scope] support in [nested] filter has been removed, use nested filter as a facet_filter in the relevant facet");
} else if ("_name".equals(currentFieldName)) {
filterName = parser.text();
} else if ("_cache".equals(currentFieldName)) {
cache = parser.booleanValue();
} else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) {
cacheKey = new CacheKeyFilter.Key(parser.text());
} else {
throw new QueryParsingException(parseContext.index(), "[nested] filter does not support [" + currentFieldName + "]");
}
}
}
if (!queryFound && !filterFound) {
throw new QueryParsingException(parseContext.index(), "[nested] requires either 'query' or 'filter' field");
}
if (path == null) {
throw new QueryParsingException(parseContext.index(), "[nested] requires 'path' field");
}
if (query == null && filter == null) {
return null;
}
if (filter != null) {
query = new XConstantScoreQuery(filter);
}
query.setBoost(boost);
MapperService.SmartNameObjectMapper mapper = parseContext.smartObjectMapper(path);
if (mapper == null) {
throw new QueryParsingException(parseContext.index(), "[nested] failed to find nested object under path [" + path + "]");
}
ObjectMapper objectMapper = mapper.mapper();
if (objectMapper == null) {
throw new QueryParsingException(parseContext.index(), "[nested] failed to find nested object under path [" + path + "]");
}
if (!objectMapper.nested().isNested()) {
throw new QueryParsingException(parseContext.index(), "[nested] nested object under path [" + path + "] is not of nested type");
}
Filter childFilter = parseContext.cacheFilter(objectMapper.nestedTypeFilter(), null);
usAsParentFilter.filter = childFilter;
// wrap the child query to only work on the nested path type
query = new XFilteredQuery(query, childFilter);
Filter parentFilter = currentParentFilterContext;
if (parentFilter == null) {
parentFilter = NonNestedDocsFilter.INSTANCE;
// don't do special parent filtering, since we might have same nested mapping on two different types
//if (mapper.hasDocMapper()) {
// // filter based on the type...
// parentFilter = mapper.docMapper().typeFilter();
//}
parentFilter = parseContext.cacheFilter(parentFilter, null);
}
Filter nestedFilter;
if (join) {
ToParentBlockJoinQuery joinQuery = new ToParentBlockJoinQuery(query, parentFilter, ScoreMode.None);
nestedFilter = new QueryWrapperFilter(joinQuery);
} else {
nestedFilter = new QueryWrapperFilter(query);
}
if (cache) {
nestedFilter = parseContext.cacheFilter(nestedFilter, cacheKey);
}
if (filterName != null) {
parseContext.addNamedFilter(filterName, nestedFilter);
}
return nestedFilter;
} finally {
// restore the thread local one...
NestedQueryParser.parentFilterContext.set(currentParentFilterContext);
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_index_query_NestedFilterParser.java
|
2,839 |
public final class PartitionStateGeneratorImpl implements PartitionStateGenerator {
private static final ILogger logger = Logger.getLogger(PartitionStateGenerator.class);
private static final float RANGE_CHECK_RATIO = 1.1f;
private static final int MAX_RETRY_COUNT = 3;
private static final int AGGRESSIVE_RETRY_THRESHOLD = 1;
private static final int AGGRESSIVE_INDEX_THRESHOLD = 3;
private static final int MIN_AVG_OWNER_DIFF = 3;
@Override
public Address[][] initialize(Collection<MemberGroup> memberGroups, int partitionCount) {
LinkedList<NodeGroup> nodeGroups = createNodeGroups(memberGroups);
if (nodeGroups.size() == 0) {
return null;
}
return arrange(nodeGroups, partitionCount, new EmptyStateInitializer());
}
@Override
public Address[][] reArrange(Collection<MemberGroup> memberGroups, InternalPartition[] currentState) {
LinkedList<NodeGroup> nodeGroups = createNodeGroups(memberGroups);
if (nodeGroups.size() == 0) {
return null;
}
return arrange(nodeGroups, currentState.length, new CopyStateInitializer(currentState));
}
private Address[][] arrange(LinkedList<NodeGroup> groups, int partitionCount,
StateInitializer stateInitializer) {
Address[][] state = new Address[partitionCount][];
stateInitializer.initialize(state);
TestResult result = null;
int tryCount = 0;
while (tryCount < MAX_RETRY_COUNT && result != TestResult.PASS) {
boolean aggressive = tryCount >= AGGRESSIVE_RETRY_THRESHOLD;
tryArrange(state, groups, partitionCount, aggressive);
result = testArrangement(state, groups, partitionCount);
if (result == TestResult.FAIL) {
logger.warning("Error detected on partition arrangement! Try-count: " + tryCount);
stateInitializer.initialize(state);
} else if (result == TestResult.RETRY) {
tryCount++;
if (logger.isFinestEnabled()) {
logger.finest("Re-trying partition arrangement.. Count: " + tryCount);
}
}
}
if (result == TestResult.FAIL) {
logger.severe("Failed to arrange partitions !!!");
}
return state;
}
private void tryArrange(Address[][] state, LinkedList<NodeGroup> groups,
int partitionCount, boolean aggressive) {
int groupSize = groups.size();
int replicaCount = Math.min(groupSize, InternalPartition.MAX_REPLICA_COUNT);
int avgPartitionPerGroup = partitionCount / groupSize;
// clear unused replica owners
// initialize partition registry for each group
initializeGroupPartitions(state, groups, replicaCount, aggressive);
for (int index = 0; index < replicaCount; index++) {
// partitions those are not bound to any node/group
LinkedList<Integer> freePartitions = getUnownedPartitions(state, index);
// groups having partitions under average
LinkedList<NodeGroup> underLoadedGroups = new LinkedList<NodeGroup>();
// groups having partitions over average
LinkedList<NodeGroup> overLoadedGroups = new LinkedList<NodeGroup>();
// number of groups should have (average + 1) partitions
int plusOneGroupCount = partitionCount - avgPartitionPerGroup * groupSize;
// determine under-loaded and over-loaded groups
for (NodeGroup nodeGroup : groups) {
int size = nodeGroup.getPartitionCount(index);
if (size < avgPartitionPerGroup) {
underLoadedGroups.add(nodeGroup);
} else if (size > avgPartitionPerGroup) { // maxPartitionPerGroup ??
overLoadedGroups.add(nodeGroup);
}
}
// distribute free partitions among under-loaded groups
plusOneGroupCount = tryToDistributeUnownedPartitions(underLoadedGroups, freePartitions,
avgPartitionPerGroup, index, plusOneGroupCount);
if (!freePartitions.isEmpty()) {
// if there are still free partitions those could not be distributed
// to under-loaded groups then one-by-one distribute them among all groups
// until queue is empty.
distributeUnownedPartitions(groups, freePartitions, index);
}
// TODO: what if there are still free partitions?
// iterate through over-loaded groups' partitions and distribute them to under-loaded groups.
transferPartitionsBetweenGroups(underLoadedGroups, overLoadedGroups, index,
avgPartitionPerGroup, plusOneGroupCount);
// post process each group's partition table (distribute partitions added to group to nodes
// and balance load of partition ownership s in group) and save partition ownerships to
// cluster partition state table.
updatePartitionState(state, groups, index);
}
}
private void transferPartitionsBetweenGroups(Queue<NodeGroup> underLoadedGroups,
Collection<NodeGroup> overLoadedGroups,
int index, int avgPartitionPerGroup,
int plusOneGroupCount) {
int maxPartitionPerGroup = avgPartitionPerGroup + 1;
int maxTries = underLoadedGroups.size() * overLoadedGroups.size() * 10;
int tries = 0;
int expectedPartitionCount = plusOneGroupCount > 0 ? maxPartitionPerGroup : avgPartitionPerGroup;
while (tries++ < maxTries && !underLoadedGroups.isEmpty()) {
NodeGroup toGroup = underLoadedGroups.poll();
Iterator<NodeGroup> overLoadedGroupsIter = overLoadedGroups.iterator();
while (overLoadedGroupsIter.hasNext()) {
NodeGroup fromGroup = overLoadedGroupsIter.next();
Iterator<Integer> partitionsIter = fromGroup.getPartitionsIterator(index);
while (partitionsIter.hasNext()
&& fromGroup.getPartitionCount(index) > expectedPartitionCount
&& toGroup.getPartitionCount(index) < expectedPartitionCount) {
Integer partitionId = partitionsIter.next();
if (toGroup.addPartition(index, partitionId)) {
partitionsIter.remove();
}
}
int fromCount = fromGroup.getPartitionCount(index);
if (plusOneGroupCount > 0 && fromCount == maxPartitionPerGroup) {
if (--plusOneGroupCount == 0) {
expectedPartitionCount = avgPartitionPerGroup;
}
}
if (fromCount <= expectedPartitionCount) {
overLoadedGroupsIter.remove();
}
int toCount = toGroup.getPartitionCount(index);
if (plusOneGroupCount > 0 && toCount == maxPartitionPerGroup) {
if (--plusOneGroupCount == 0) {
expectedPartitionCount = avgPartitionPerGroup;
}
}
if (toCount >= expectedPartitionCount) {
break;
}
}
if (toGroup.getPartitionCount(index) < avgPartitionPerGroup/* && !underLoadedGroups.contains(toGroup)*/) {
underLoadedGroups.offer(toGroup);
}
}
}
private void updatePartitionState(Address[][] state, Collection<NodeGroup> groups, int index) {
for (NodeGroup group : groups) {
group.postProcessPartitionTable(index);
for (Address address : group.getNodes()) {
PartitionTable table = group.getPartitionTable(address);
Set<Integer> set = table.getPartitions(index);
for (Integer partitionId : set) {
state[partitionId][index] = address;
}
}
}
}
private void distributeUnownedPartitions(Queue<NodeGroup> groups, Queue<Integer> freePartitions, int index) {
int groupSize = groups.size();
int maxTries = freePartitions.size() * groupSize * 10;
int tries = 0;
Integer partitionId = freePartitions.poll();
while (partitionId != null && tries++ < maxTries) {
NodeGroup group = groups.poll();
if (group.addPartition(index, partitionId)) {
partitionId = freePartitions.poll();
}
groups.offer(group);
}
}
private int tryToDistributeUnownedPartitions(Queue<NodeGroup> underLoadedGroups, Queue<Integer> freePartitions,
int avgPartitionPerGroup, int index, int plusOneGroupCount) {
// distribute free partitions among under-loaded groups
int maxPartitionPerGroup = avgPartitionPerGroup + 1;
int maxTries = freePartitions.size() * underLoadedGroups.size();
int tries = 0;
while (tries++ < maxTries && !freePartitions.isEmpty() && !underLoadedGroups.isEmpty()) {
NodeGroup group = underLoadedGroups.poll();
int size = freePartitions.size();
for (int i = 0; i < size; i++) {
Integer partitionId = freePartitions.poll();
if (!group.addPartition(index, partitionId)) {
freePartitions.offer(partitionId);
} else {
break;
}
}
int count = group.getPartitionCount(index);
if (plusOneGroupCount > 0 && count == maxPartitionPerGroup) {
if (--plusOneGroupCount == 0) {
// all (avg + 1) partitions owned groups are found
// if there is any group has avg number of partitions in under-loaded queue
// remove it.
Iterator<NodeGroup> underLoaded = underLoadedGroups.iterator();
while (underLoaded.hasNext()) {
if (underLoaded.next().getPartitionCount(index) >= avgPartitionPerGroup) {
underLoaded.remove();
}
}
}
} else if ((plusOneGroupCount > 0 && count < maxPartitionPerGroup)
|| (count < avgPartitionPerGroup)) {
underLoadedGroups.offer(group);
}
}
return plusOneGroupCount;
}
private LinkedList<Integer> getUnownedPartitions(Address[][] state, int replicaIndex) {
LinkedList<Integer> freePartitions = new LinkedList<Integer>();
// if owner of a partition can not be found then add partition to free partitions queue.
for (int partitionId = 0; partitionId < state.length; partitionId++) {
Address[] replicas = state[partitionId];
if (replicas[replicaIndex] == null) {
freePartitions.add(partitionId);
}
}
Collections.shuffle(freePartitions);
return freePartitions;
}
private void initializeGroupPartitions(Address[][] state, LinkedList<NodeGroup> groups,
int replicaCount, boolean aggressive) {
// reset partition before reuse
for (NodeGroup nodeGroup : groups) {
nodeGroup.resetPartitions();
}
for (int partitionId = 0; partitionId < state.length; partitionId++) {
Address[] replicas = state[partitionId];
for (int replicaIndex = 0; replicaIndex < InternalPartition.MAX_REPLICA_COUNT; replicaIndex++) {
if (replicaIndex >= replicaCount) {
replicas[replicaIndex] = null;
} else {
Address owner = replicas[replicaIndex];
boolean valid = false;
if (owner != null) {
for (NodeGroup nodeGroup : groups) {
if (nodeGroup.hasNode(owner)) {
if (nodeGroup.ownPartition(owner, replicaIndex, partitionId)) {
valid = true;
}
break;
}
}
}
if (!valid) {
replicas[replicaIndex] = null;
} else if (aggressive && replicaIndex < AGGRESSIVE_INDEX_THRESHOLD) {
for (int i = AGGRESSIVE_INDEX_THRESHOLD; i < replicaCount; i++) {
replicas[i] = null;
}
}
}
}
}
}
private LinkedList<NodeGroup> createNodeGroups(Collection<MemberGroup> memberGroups) {
LinkedList<NodeGroup> nodeGroups = new LinkedList<NodeGroup>();
if (memberGroups == null || memberGroups.isEmpty()) return nodeGroups;
for (MemberGroup memberGroup : memberGroups) {
NodeGroup nodeGroup;
if (memberGroup.size() == 0) {
continue;
}
if (memberGroup instanceof SingleMemberGroup || memberGroup.size() == 1) {
nodeGroup = new SingleNodeGroup();
MemberImpl next = (MemberImpl) memberGroup.iterator().next();
nodeGroup.addNode(next.getAddress());
} else {
nodeGroup = new DefaultNodeGroup();
Iterator<Member> iter = memberGroup.iterator();
while (iter.hasNext()) {
MemberImpl next = (MemberImpl) iter.next();
nodeGroup.addNode(next.getAddress());
}
}
nodeGroups.add(nodeGroup);
}
return nodeGroups;
}
private TestResult testArrangement(Address[][] state, Collection<NodeGroup> groups, int partitionCount) {
float ratio = RANGE_CHECK_RATIO;
int avgPartitionPerGroup = partitionCount / groups.size();
int replicaCount = Math.min(groups.size(), InternalPartition.MAX_REPLICA_COUNT);
Set<Address> set = new HashSet<Address>();
for (int partitionId = 0; partitionId < partitionCount; partitionId++) {
Address[] replicas = state[partitionId];
for (int i = 0; i < replicaCount; i++) {
Address owner = replicas[i];
if (owner == null) {
logger.warning("Partition-Arrangement-Test: Owner is null !!! => partition: "
+ partitionId + " replica: " + i);
return TestResult.FAIL;
}
if (set.contains(owner)) {
// Should not happen!
logger.warning("Partition-Arrangement-Test: " +
owner + " has owned multiple replicas of partition: " + partitionId + " replica: " + i);
return TestResult.FAIL;
}
set.add(owner);
}
set.clear();
}
for (NodeGroup group : groups) {
for (int i = 0; i < replicaCount; i++) {
int partitionCountOfGroup = group.getPartitionCount(i);
if (Math.abs(partitionCountOfGroup - avgPartitionPerGroup) <= MIN_AVG_OWNER_DIFF) {
continue;
}
if ((partitionCountOfGroup < avgPartitionPerGroup / ratio)
|| (partitionCountOfGroup > avgPartitionPerGroup * ratio)) {
if (logger.isFinestEnabled()) {
logger.finest("Replica: " + i + ", PartitionCount: "
+ partitionCountOfGroup + ", AvgPartitionCount: " + avgPartitionPerGroup);
}
return TestResult.RETRY;
}
}
}
return TestResult.PASS;
}
// ----- INNER CLASSES -----
private interface StateInitializer {
void initialize(Address[][] state);
}
private static class EmptyStateInitializer implements StateInitializer {
@Override
public void initialize(Address[][] state) {
for (int i = 0; i < state.length; i++) {
state[i] = new Address[InternalPartition.MAX_REPLICA_COUNT];
}
}
}
private static class CopyStateInitializer implements StateInitializer {
private final InternalPartition[] currentState;
CopyStateInitializer(InternalPartition[] currentState) {
this.currentState = currentState;
}
@Override
public void initialize(Address[][] state) {
if (state.length != currentState.length) {
throw new IllegalArgumentException("Partition counts do not match!");
}
for (int partitionId = 0; partitionId < state.length; partitionId++) {
InternalPartition p = currentState[partitionId];
Address[] replicas = new Address[InternalPartition.MAX_REPLICA_COUNT];
state[partitionId] = replicas;
for (int replicaIndex = 0; replicaIndex < InternalPartition.MAX_REPLICA_COUNT; replicaIndex++) {
replicas[replicaIndex] = p.getReplicaAddress(replicaIndex);
}
}
}
}
private enum TestResult {
PASS, RETRY, FAIL
}
private interface NodeGroup {
void addNode(Address address);
boolean hasNode(Address address);
Set<Address> getNodes();
PartitionTable getPartitionTable(Address address);
void resetPartitions();
int getPartitionCount(int index);
boolean containsPartition(Integer partitionId);
boolean ownPartition(Address address, int index, Integer partitionId);
boolean addPartition(int replicaIndex, Integer partitionId);
Iterator<Integer> getPartitionsIterator(int index);
boolean removePartition(int index, Integer partitionId);
void postProcessPartitionTable(int index);
}
private static class DefaultNodeGroup implements NodeGroup {
final PartitionTable groupPartitionTable = new PartitionTable();
final Map<Address, PartitionTable> nodePartitionTables = new HashMap<Address, PartitionTable>();
final Set<Address> nodes = nodePartitionTables.keySet();
final Collection<PartitionTable> nodeTables = nodePartitionTables.values();
final LinkedList<Integer> partitionQ = new LinkedList<Integer>();
@Override
public void addNode(Address address) {
nodePartitionTables.put(address, new PartitionTable());
}
@Override
public boolean hasNode(Address address) {
return nodes.contains(address);
}
@Override
public Set<Address> getNodes() {
return nodes;
}
@Override
public PartitionTable getPartitionTable(Address address) {
return nodePartitionTables.get(address);
}
@Override
public void resetPartitions() {
groupPartitionTable.reset();
partitionQ.clear();
for (PartitionTable table : nodeTables) {
table.reset();
}
}
@Override
public int getPartitionCount(int index) {
return groupPartitionTable.size(index);
}
@Override
public boolean containsPartition(Integer partitionId) {
return groupPartitionTable.contains(partitionId);
}
@Override
public boolean ownPartition(Address address, int index, Integer partitionId) {
if (!hasNode(address)) {
String error = "Address does not belong to this group: " + address.toString();
logger.warning(error);
return false;
}
if (containsPartition(partitionId)) {
if (logger.isFinestEnabled()) {
String error = "Partition[" + partitionId + "] is already owned by this group! " +
"Duplicate!";
logger.finest(error);
}
return false;
}
groupPartitionTable.add(index, partitionId);
return nodePartitionTables.get(address).add(index, partitionId);
}
@Override
public boolean addPartition(int replicaIndex, Integer partitionId) {
if (containsPartition(partitionId)) {
return false;
}
if (groupPartitionTable.add(replicaIndex, partitionId)) {
partitionQ.add(partitionId);
return true;
}
return false;
}
@Override
public Iterator<Integer> getPartitionsIterator(final int index) {
final Iterator<Integer> iter = groupPartitionTable.getPartitions(index).iterator();
return new Iterator<Integer>() {
Integer current = null;
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public Integer next() {
return (current = iter.next());
}
@Override
public void remove() {
iter.remove();
doRemovePartition(index, current);
}
};
}
@Override
public boolean removePartition(int index, Integer partitionId) {
if (groupPartitionTable.remove(index, partitionId)) {
doRemovePartition(index, partitionId);
return true;
}
return false;
}
private void doRemovePartition(int index, Integer partitionId) {
for (PartitionTable table : nodeTables) {
if (table.remove(index, partitionId)) {
break;
}
}
}
@Override
public void postProcessPartitionTable(int index) {
if (nodes.size() == 1) {
PartitionTable table = nodeTables.iterator().next();
while (!partitionQ.isEmpty()) {
table.add(index, partitionQ.poll());
}
} else {
int totalCount = getPartitionCount(index);
int avgCount = totalCount / nodes.size();
List<PartitionTable> underLoadedStates = new LinkedList<PartitionTable>();
for (PartitionTable table : nodeTables) {
Set<Integer> partitions = table.getPartitions(index);
if (partitions.size() > avgCount) {
Iterator<Integer> iter = partitions.iterator();
while (partitions.size() > avgCount) {
Integer partitionId = iter.next();
iter.remove();
partitionQ.add(partitionId);
}
} else {
underLoadedStates.add(table);
}
}
if (!partitionQ.isEmpty()) {
for (PartitionTable table : underLoadedStates) {
while (table.size(index) < avgCount) {
table.add(index, partitionQ.poll());
}
}
}
while (!partitionQ.isEmpty()) {
for (PartitionTable table : nodeTables) {
table.add(index, partitionQ.poll());
if (partitionQ.isEmpty()) {
break;
}
}
}
}
}
@Override
public String toString() {
return "DefaultNodeGroupRegistry [nodes=" + nodes + "]";
}
}
private static class SingleNodeGroup implements NodeGroup {
final PartitionTable nodeTable = new PartitionTable();
Address address = null;
Set<Address> nodes;
@Override
public void addNode(Address addr) {
if (address != null) {
logger.warning("Single node group already has an address => " + address);
return;
}
this.address = addr;
nodes = Collections.singleton(address);
}
@Override
public boolean hasNode(Address address) {
return this.address != null && this.address.equals(address);
}
@Override
public Set<Address> getNodes() {
return nodes;
}
@Override
public PartitionTable getPartitionTable(Address address) {
return hasNode(address) ? nodeTable : null;
}
@Override
public void resetPartitions() {
nodeTable.reset();
}
@Override
public int getPartitionCount(int index) {
return nodeTable.size(index);
}
@Override
public boolean containsPartition(Integer partitionId) {
return nodeTable.contains(partitionId);
}
@Override
public boolean ownPartition(Address address, int index, Integer partitionId) {
if (!hasNode(address)) {
String error = address + " is different from this node's " + this.address;
logger.warning(error);
return false;
}
if (containsPartition(partitionId)) {
if (logger.isFinestEnabled()) {
String error = "Partition[" + partitionId + "] is already owned by this node " +
address + "! Duplicate!";
logger.finest(error);
}
return false;
}
return nodeTable.add(index, partitionId);
}
@Override
public boolean addPartition(int replicaIndex, Integer partitionId) {
if (containsPartition(partitionId)) {
return false;
}
return nodeTable.add(replicaIndex, partitionId);
}
@Override
public Iterator<Integer> getPartitionsIterator(int index) {
return nodeTable.getPartitions(index).iterator();
}
@Override
public boolean removePartition(int index, Integer partitionId) {
return nodeTable.remove(index, partitionId);
}
@Override
public void postProcessPartitionTable(int index) {
}
@Override
public String toString() {
return "SingleNodeGroupRegistry [address=" + address + "]";
}
}
@SuppressWarnings("unchecked")
private static class PartitionTable {
final Set<Integer>[] partitions = new Set[InternalPartition.MAX_REPLICA_COUNT];
Set<Integer> getPartitions(int index) {
check(index);
Set<Integer> set = partitions[index];
if (set == null) {
set = new HashSet<Integer>();
partitions[index] = set;
}
return set;
}
boolean add(int index, Integer partitionId) {
return getPartitions(index).add(partitionId);
}
boolean contains(int index, Integer partitionId) {
return getPartitions(index).contains(partitionId);
}
boolean contains(Integer partitionId) {
for (Set<Integer> set : partitions) {
if (set != null && set.contains(partitionId)) {
return true;
}
}
return false;
}
boolean remove(int index, Integer partitionId) {
return getPartitions(index).remove(partitionId);
}
int size(int index) {
return getPartitions(index).size();
}
void reset() {
for (Set<Integer> set : partitions) {
if (set != null) {
set.clear();
}
}
}
private void check(int index) {
if (index < 0 || index >= InternalPartition.MAX_REPLICA_COUNT) {
throw new ArrayIndexOutOfBoundsException(index);
}
}
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_partition_impl_PartitionStateGeneratorImpl.java
|
1,475 |
public class RoutingNode implements Iterable<MutableShardRouting> {
private final String nodeId;
private final DiscoveryNode node;
private final List<MutableShardRouting> shards;
public RoutingNode(String nodeId, DiscoveryNode node) {
this(nodeId, node, new ArrayList<MutableShardRouting>());
}
public RoutingNode(String nodeId, DiscoveryNode node, List<MutableShardRouting> shards) {
this.nodeId = nodeId;
this.node = node;
this.shards = shards;
}
@Override
public Iterator<MutableShardRouting> iterator() {
return Iterators.unmodifiableIterator(shards.iterator());
}
Iterator<MutableShardRouting> mutableIterator() {
return shards.iterator();
}
/**
* Returns the nodes {@link DiscoveryNode}.
*
* @return discoveryNode of this node
*/
public DiscoveryNode node() {
return this.node;
}
/**
* Get the id of this node
* @return id of the node
*/
public String nodeId() {
return this.nodeId;
}
public int size() {
return shards.size();
}
/**
* Add a new shard to this node
* @param shard Shard to crate on this Node
*/
void add(MutableShardRouting shard) {
// TODO use Set with ShardIds for faster lookup.
for (MutableShardRouting shardRouting : shards) {
if (shardRouting.shardId().equals(shard.shardId())) {
throw new ElasticsearchIllegalStateException("Trying to add a shard [" + shard.shardId().index().name() + "][" + shard.shardId().id() + "] to a node [" + nodeId + "] where it already exists");
}
}
shards.add(shard);
}
/**
* Determine the number of shards with a specific state
* @param states set of states which should be counted
* @return number of shards
*/
public int numberOfShardsWithState(ShardRoutingState... states) {
int count = 0;
for (MutableShardRouting shardEntry : this) {
for (ShardRoutingState state : states) {
if (shardEntry.state() == state) {
count++;
}
}
}
return count;
}
/**
* Determine the shards with a specific state
* @param states set of states which should be listed
* @return List of shards
*/
public List<MutableShardRouting> shardsWithState(ShardRoutingState... states) {
List<MutableShardRouting> shards = newArrayList();
for (MutableShardRouting shardEntry : this) {
for (ShardRoutingState state : states) {
if (shardEntry.state() == state) {
shards.add(shardEntry);
}
}
}
return shards;
}
/**
* Determine the shards of an index with a specific state
* @param index id of the index
* @param states set of states which should be listed
* @return a list of shards
*/
public List<MutableShardRouting> shardsWithState(String index, ShardRoutingState... states) {
List<MutableShardRouting> shards = newArrayList();
for (MutableShardRouting shardEntry : this) {
if (!shardEntry.index().equals(index)) {
continue;
}
for (ShardRoutingState state : states) {
if (shardEntry.state() == state) {
shards.add(shardEntry);
}
}
}
return shards;
}
/**
* The number of shards on this node that will not be eventually relocated.
*/
public int numberOfOwningShards() {
int count = 0;
for (MutableShardRouting shardEntry : this) {
if (shardEntry.state() != ShardRoutingState.RELOCATING) {
count++;
}
}
return count;
}
public String prettyPrint() {
StringBuilder sb = new StringBuilder();
sb.append("-----node_id[").append(nodeId).append("][" + (node == null ? "X" : "V") + "]\n");
for (MutableShardRouting entry : shards) {
sb.append("--------").append(entry.shortSummary()).append('\n');
}
return sb.toString();
}
public MutableShardRouting get(int i) {
return shards.get(i) ;
}
public Collection<MutableShardRouting> copyShards() {
return new ArrayList<MutableShardRouting>(shards);
}
public boolean isEmpty() {
return shards.isEmpty();
}
}
| 0true
|
src_main_java_org_elasticsearch_cluster_routing_RoutingNode.java
|
734 |
public class DeleteByQueryResponse extends ActionResponse implements Iterable<IndexDeleteByQueryResponse> {
private Map<String, IndexDeleteByQueryResponse> indices = newHashMap();
DeleteByQueryResponse() {
}
@Override
public Iterator<IndexDeleteByQueryResponse> iterator() {
return indices.values().iterator();
}
/**
* The responses from all the different indices.
*/
public Map<String, IndexDeleteByQueryResponse> getIndices() {
return indices;
}
/**
* The response of a specific index.
*/
public IndexDeleteByQueryResponse getIndex(String index) {
return indices.get(index);
}
public RestStatus status() {
RestStatus status = RestStatus.OK;
for (IndexDeleteByQueryResponse indexResponse : indices.values()) {
if (indexResponse.getFailedShards() > 0) {
RestStatus indexStatus = indexResponse.getFailures()[0].status();
if (indexResponse.getFailures().length > 1) {
for (int i = 1; i < indexResponse.getFailures().length; i++) {
if (indexResponse.getFailures()[i].status().getStatus() >= 500) {
indexStatus = indexResponse.getFailures()[i].status();
}
}
}
if (status.getStatus() < indexStatus.getStatus()) {
status = indexStatus;
}
}
}
return status;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
for (int i = 0; i < size; i++) {
IndexDeleteByQueryResponse response = new IndexDeleteByQueryResponse();
response.readFrom(in);
indices.put(response.getIndex(), response);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(indices.size());
for (IndexDeleteByQueryResponse indexResponse : indices.values()) {
indexResponse.writeTo(out);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_deletebyquery_DeleteByQueryResponse.java
|
476 |
public interface EventHandler<E> {
void handle(E event);
/**
* This method is called when registration request response is successfully returned from node.
*
* Note that this method will also be called while first registered node is dead
* and re-registering to a second node.
*/
void onListenerRegister();
}
| 0true
|
hazelcast-client_src_main_java_com_hazelcast_client_spi_EventHandler.java
|
538 |
public static class MyXid implements Xid {
public int getFormatId() {
return 42;
}
@Override
public byte[] getGlobalTransactionId() {
return "GlobalTransactionId".getBytes();
}
@Override
public byte[] getBranchQualifier() {
return "BranchQualifier".getBytes();
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_txn_ClientXaTest.java
|
1,205 |
intIntMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<IntIntOpenHashMap>() {
@Override
public IntIntOpenHashMap newInstance(int sizing) {
return new IntIntOpenHashMap(size(sizing));
}
@Override
public void clear(IntIntOpenHashMap value) {
value.clear();
}
});
| 0true
|
src_main_java_org_elasticsearch_cache_recycler_CacheRecycler.java
|
1,301 |
public class ClusterState implements ToXContent {
public interface Custom {
interface Factory<T extends Custom> {
String type();
T readFrom(StreamInput in) throws IOException;
void writeTo(T customState, StreamOutput out) throws IOException;
void toXContent(T customState, XContentBuilder builder, ToXContent.Params params);
}
}
public static Map<String, Custom.Factory> customFactories = new HashMap<String, Custom.Factory>();
/**
* Register a custom index meta data factory. Make sure to call it from a static block.
*/
public static void registerFactory(String type, Custom.Factory factory) {
customFactories.put(type, factory);
}
@Nullable
public static <T extends Custom> Custom.Factory<T> lookupFactory(String type) {
return customFactories.get(type);
}
public static <T extends Custom> Custom.Factory<T> lookupFactorySafe(String type) throws ElasticsearchIllegalArgumentException {
Custom.Factory<T> factory = customFactories.get(type);
if (factory == null) {
throw new ElasticsearchIllegalArgumentException("No custom state factory registered for type [" + type + "]");
}
return factory;
}
private final long version;
private final RoutingTable routingTable;
private final DiscoveryNodes nodes;
private final MetaData metaData;
private final ClusterBlocks blocks;
private final AllocationExplanation allocationExplanation;
private final ImmutableOpenMap<String, Custom> customs;
// built on demand
private volatile RoutingNodes routingNodes;
private SettingsFilter settingsFilter;
public ClusterState(long version, ClusterState state) {
this(version, state.metaData(), state.routingTable(), state.nodes(), state.blocks(), state.allocationExplanation(), state.customs());
}
public ClusterState(long version, MetaData metaData, RoutingTable routingTable, DiscoveryNodes nodes, ClusterBlocks blocks, AllocationExplanation allocationExplanation, ImmutableOpenMap<String, Custom> customs) {
this.version = version;
this.metaData = metaData;
this.routingTable = routingTable;
this.nodes = nodes;
this.blocks = blocks;
this.allocationExplanation = allocationExplanation;
this.customs = customs;
}
public long version() {
return this.version;
}
public long getVersion() {
return version();
}
public DiscoveryNodes nodes() {
return this.nodes;
}
public DiscoveryNodes getNodes() {
return nodes();
}
public MetaData metaData() {
return this.metaData;
}
public MetaData getMetaData() {
return metaData();
}
public RoutingTable routingTable() {
return routingTable;
}
public RoutingTable getRoutingTable() {
return routingTable();
}
public RoutingNodes routingNodes() {
return routingTable.routingNodes(this);
}
public RoutingNodes getRoutingNodes() {
return readOnlyRoutingNodes();
}
public ClusterBlocks blocks() {
return this.blocks;
}
public ClusterBlocks getBlocks() {
return blocks;
}
public AllocationExplanation allocationExplanation() {
return this.allocationExplanation;
}
public AllocationExplanation getAllocationExplanation() {
return allocationExplanation();
}
public ImmutableOpenMap<String, Custom> customs() {
return this.customs;
}
public ImmutableOpenMap<String, Custom> getCustoms() {
return this.customs;
}
/**
* Returns a built (on demand) routing nodes view of the routing table. <b>NOTE, the routing nodes
* are mutable, use them just for read operations</b>
*/
public RoutingNodes readOnlyRoutingNodes() {
if (routingNodes != null) {
return routingNodes;
}
routingNodes = routingTable.routingNodes(this);
return routingNodes;
}
public ClusterState settingsFilter(SettingsFilter settingsFilter) {
this.settingsFilter = settingsFilter;
return this;
}
public String prettyPrint() {
StringBuilder sb = new StringBuilder();
sb.append(nodes().prettyPrint());
sb.append(routingTable().prettyPrint());
sb.append(readOnlyRoutingNodes().prettyPrint());
return sb.toString();
}
@Override
public String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
builder.startObject();
toXContent(builder, EMPTY_PARAMS);
builder.endObject();
return builder.string();
} catch (IOException e) {
return "{ \"error\" : \"" + e.getMessage() + "\"}";
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
Set<String> metrics = Strings.splitStringByCommaToSet(params.param("metric", "_all"));
boolean isAllMetricsOnly = metrics.size() == 1 && metrics.contains("_all");
if (isAllMetricsOnly || metrics.contains("nodes")) {
builder.field("master_node", nodes().masterNodeId());
}
if (isAllMetricsOnly || metrics.contains("blocks")) {
builder.startObject("blocks");
if (!blocks().global().isEmpty()) {
builder.startObject("global");
for (ClusterBlock block : blocks().global()) {
block.toXContent(builder, params);
}
builder.endObject();
}
if (!blocks().indices().isEmpty()) {
builder.startObject("indices");
for (Map.Entry<String, ImmutableSet<ClusterBlock>> entry : blocks().indices().entrySet()) {
builder.startObject(entry.getKey());
for (ClusterBlock block : entry.getValue()) {
block.toXContent(builder, params);
}
builder.endObject();
}
builder.endObject();
}
builder.endObject();
}
// nodes
if (isAllMetricsOnly || metrics.contains("nodes")) {
builder.startObject("nodes");
for (DiscoveryNode node : nodes()) {
builder.startObject(node.id(), XContentBuilder.FieldCaseConversion.NONE);
builder.field("name", node.name());
builder.field("transport_address", node.address().toString());
builder.startObject("attributes");
for (Map.Entry<String, String> attr : node.attributes().entrySet()) {
builder.field(attr.getKey(), attr.getValue());
}
builder.endObject();
builder.endObject();
}
builder.endObject();
}
// meta data
if (isAllMetricsOnly || metrics.contains("metadata")) {
builder.startObject("metadata");
builder.startObject("templates");
for (ObjectCursor<IndexTemplateMetaData> cursor : metaData().templates().values()) {
IndexTemplateMetaData templateMetaData = cursor.value;
builder.startObject(templateMetaData.name(), XContentBuilder.FieldCaseConversion.NONE);
builder.field("template", templateMetaData.template());
builder.field("order", templateMetaData.order());
builder.startObject("settings");
Settings settings = templateMetaData.settings();
if (settingsFilter != null) {
settings = settingsFilter.filterSettings(settings);
}
settings.toXContent(builder, params);
builder.endObject();
builder.startObject("mappings");
for (ObjectObjectCursor<String, CompressedString> cursor1 : templateMetaData.mappings()) {
byte[] mappingSource = cursor1.value.uncompressed();
XContentParser parser = XContentFactory.xContent(mappingSource).createParser(mappingSource);
Map<String, Object> mapping = parser.map();
if (mapping.size() == 1 && mapping.containsKey(cursor1.key)) {
// the type name is the root value, reduce it
mapping = (Map<String, Object>) mapping.get(cursor1.key);
}
builder.field(cursor1.key);
builder.map(mapping);
}
builder.endObject();
builder.endObject();
}
builder.endObject();
builder.startObject("indices");
for (IndexMetaData indexMetaData : metaData()) {
builder.startObject(indexMetaData.index(), XContentBuilder.FieldCaseConversion.NONE);
builder.field("state", indexMetaData.state().toString().toLowerCase(Locale.ENGLISH));
builder.startObject("settings");
Settings settings = indexMetaData.settings();
if (settingsFilter != null) {
settings = settingsFilter.filterSettings(settings);
}
settings.toXContent(builder, params);
builder.endObject();
builder.startObject("mappings");
for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.mappings()) {
byte[] mappingSource = cursor.value.source().uncompressed();
XContentParser parser = XContentFactory.xContent(mappingSource).createParser(mappingSource);
Map<String, Object> mapping = parser.map();
if (mapping.size() == 1 && mapping.containsKey(cursor.key)) {
// the type name is the root value, reduce it
mapping = (Map<String, Object>) mapping.get(cursor.key);
}
builder.field(cursor.key);
builder.map(mapping);
}
builder.endObject();
builder.startArray("aliases");
for (ObjectCursor<String> cursor : indexMetaData.aliases().keys()) {
builder.value(cursor.value);
}
builder.endArray();
builder.endObject();
}
builder.endObject();
for (ObjectObjectCursor<String, MetaData.Custom> cursor : metaData.customs()) {
builder.startObject(cursor.key);
MetaData.lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params);
builder.endObject();
}
builder.endObject();
}
// routing table
if (isAllMetricsOnly || metrics.contains("routing_table")) {
builder.startObject("routing_table");
builder.startObject("indices");
for (IndexRoutingTable indexRoutingTable : routingTable()) {
builder.startObject(indexRoutingTable.index(), XContentBuilder.FieldCaseConversion.NONE);
builder.startObject("shards");
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
builder.startArray(Integer.toString(indexShardRoutingTable.shardId().id()));
for (ShardRouting shardRouting : indexShardRoutingTable) {
shardRouting.toXContent(builder, params);
}
builder.endArray();
}
builder.endObject();
builder.endObject();
}
builder.endObject();
builder.endObject();
}
// routing nodes
if (isAllMetricsOnly || metrics.contains("routing_table")) {
builder.startObject("routing_nodes");
builder.startArray("unassigned");
for (ShardRouting shardRouting : readOnlyRoutingNodes().unassigned()) {
shardRouting.toXContent(builder, params);
}
builder.endArray();
builder.startObject("nodes");
for (RoutingNode routingNode : readOnlyRoutingNodes()) {
builder.startArray(routingNode.nodeId(), XContentBuilder.FieldCaseConversion.NONE);
for (ShardRouting shardRouting : routingNode) {
shardRouting.toXContent(builder, params);
}
builder.endArray();
}
builder.endObject();
builder.endObject();
}
if (isAllMetricsOnly || metrics.contains("routing_table")) {
builder.startArray("allocations");
for (Map.Entry<ShardId, List<AllocationExplanation.NodeExplanation>> entry : allocationExplanation().explanations().entrySet()) {
builder.startObject();
builder.field("index", entry.getKey().index().name());
builder.field("shard", entry.getKey().id());
builder.startArray("explanations");
for (AllocationExplanation.NodeExplanation nodeExplanation : entry.getValue()) {
builder.field("desc", nodeExplanation.description());
if (nodeExplanation.node() != null) {
builder.startObject("node");
builder.field("id", nodeExplanation.node().id());
builder.field("name", nodeExplanation.node().name());
builder.endObject();
}
}
builder.endArray();
builder.endObject();
}
builder.endArray();
}
if (isAllMetricsOnly || metrics.contains("customs")) {
for (ObjectObjectCursor<String, Custom> cursor : customs) {
builder.startObject(cursor.key);
lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params);
builder.endObject();
}
}
return builder;
}
public static Builder builder() {
return new Builder();
}
public static Builder builder(ClusterState state) {
return new Builder(state);
}
public static class Builder {
private long version = 0;
private MetaData metaData = MetaData.EMPTY_META_DATA;
private RoutingTable routingTable = RoutingTable.EMPTY_ROUTING_TABLE;
private DiscoveryNodes nodes = DiscoveryNodes.EMPTY_NODES;
private ClusterBlocks blocks = ClusterBlocks.EMPTY_CLUSTER_BLOCK;
private AllocationExplanation allocationExplanation = AllocationExplanation.EMPTY;
private final ImmutableOpenMap.Builder<String, Custom> customs;
public Builder() {
customs = ImmutableOpenMap.builder();
}
public Builder(ClusterState state) {
this.version = state.version();
this.nodes = state.nodes();
this.routingTable = state.routingTable();
this.metaData = state.metaData();
this.blocks = state.blocks();
this.allocationExplanation = state.allocationExplanation();
this.customs = ImmutableOpenMap.builder(state.customs());
}
public Builder nodes(DiscoveryNodes.Builder nodesBuilder) {
return nodes(nodesBuilder.build());
}
public Builder nodes(DiscoveryNodes nodes) {
this.nodes = nodes;
return this;
}
public Builder routingTable(RoutingTable.Builder routingTable) {
return routingTable(routingTable.build());
}
public Builder routingResult(RoutingAllocation.Result routingResult) {
this.routingTable = routingResult.routingTable();
this.allocationExplanation = routingResult.explanation();
return this;
}
public Builder routingTable(RoutingTable routingTable) {
this.routingTable = routingTable;
return this;
}
public Builder metaData(MetaData.Builder metaDataBuilder) {
return metaData(metaDataBuilder.build());
}
public Builder metaData(MetaData metaData) {
this.metaData = metaData;
return this;
}
public Builder blocks(ClusterBlocks.Builder blocksBuilder) {
return blocks(blocksBuilder.build());
}
public Builder blocks(ClusterBlocks block) {
this.blocks = block;
return this;
}
public Builder allocationExplanation(AllocationExplanation allocationExplanation) {
this.allocationExplanation = allocationExplanation;
return this;
}
public Builder version(long version) {
this.version = version;
return this;
}
public Custom getCustom(String type) {
return customs.get(type);
}
public Builder putCustom(String type, Custom custom) {
customs.put(type, custom);
return this;
}
public Builder removeCustom(String type) {
customs.remove(type);
return this;
}
public ClusterState build() {
return new ClusterState(version, metaData, routingTable, nodes, blocks, allocationExplanation, customs.build());
}
public static byte[] toBytes(ClusterState state) throws IOException {
BytesStreamOutput os = new BytesStreamOutput();
writeTo(state, os);
return os.bytes().toBytes();
}
public static ClusterState fromBytes(byte[] data, DiscoveryNode localNode) throws IOException {
return readFrom(new BytesStreamInput(data, false), localNode);
}
public static void writeTo(ClusterState state, StreamOutput out) throws IOException {
out.writeLong(state.version());
MetaData.Builder.writeTo(state.metaData(), out);
RoutingTable.Builder.writeTo(state.routingTable(), out);
DiscoveryNodes.Builder.writeTo(state.nodes(), out);
ClusterBlocks.Builder.writeClusterBlocks(state.blocks(), out);
state.allocationExplanation().writeTo(out);
out.writeVInt(state.customs().size());
for (ObjectObjectCursor<String, Custom> cursor : state.customs()) {
out.writeString(cursor.key);
lookupFactorySafe(cursor.key).writeTo(cursor.value, out);
}
}
public static ClusterState readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException {
Builder builder = new Builder();
builder.version = in.readLong();
builder.metaData = MetaData.Builder.readFrom(in);
builder.routingTable = RoutingTable.Builder.readFrom(in);
builder.nodes = DiscoveryNodes.Builder.readFrom(in, localNode);
builder.blocks = ClusterBlocks.Builder.readClusterBlocks(in);
builder.allocationExplanation = AllocationExplanation.readAllocationExplanation(in);
int customSize = in.readVInt();
for (int i = 0; i < customSize; i++) {
String type = in.readString();
Custom customIndexMetaData = lookupFactorySafe(type).readFrom(in);
builder.putCustom(type, customIndexMetaData);
}
return builder.build();
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_cluster_ClusterState.java
|
200 |
public class Router {
private final LoadBalancer loadBalancer;
public Router(LoadBalancer loadBalancer) {
this.loadBalancer = loadBalancer;
}
public Address next() {
final MemberImpl member = (MemberImpl) loadBalancer.next();
if (member == null) {
return null;
} else {
return member.getAddress();
}
}
}
| 0true
|
hazelcast-client_src_main_java_com_hazelcast_client_connection_Router.java
|
373 |
public class AspectUtil {
public static Object exposeRootBean(Object managedBean) {
try {
if (AopUtils.isAopProxy(managedBean) && managedBean instanceof Advised) {
Advised advised = (Advised) managedBean;
managedBean = advised.getTargetSource().getTarget();
}
} catch (Exception e) {
throw new RuntimeException(e);
}
return managedBean;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_jmx_AspectUtil.java
|
5,124 |
public abstract class Aggregator implements Releasable, ReaderContextAware {
/**
* Defines the nature of the aggregator's aggregation execution when nested in other aggregators and the buckets they create.
*/
public static enum BucketAggregationMode {
/**
* In this mode, a new aggregator instance will be created per bucket (created by the parent aggregator)
*/
PER_BUCKET,
/**
* In this mode, a single aggregator instance will be created per parent aggregator, that will handle the aggregations of all its buckets.
*/
MULTI_BUCKETS
}
protected final String name;
protected final Aggregator parent;
protected final AggregationContext context;
protected final int depth;
protected final long estimatedBucketCount;
protected final BucketAggregationMode bucketAggregationMode;
protected final AggregatorFactories factories;
protected final Aggregator[] subAggregators;
/**
* Constructs a new Aggregator.
*
* @param name The name of the aggregation
* @param bucketAggregationMode The nature of execution as a sub-aggregator (see {@link BucketAggregationMode})
* @param factories The factories for all the sub-aggregators under this aggregator
* @param estimatedBucketsCount When served as a sub-aggregator, indicate how many buckets the parent aggregator will generate.
* @param context The aggregation context
* @param parent The parent aggregator (may be {@code null} for top level aggregators)
*/
protected Aggregator(String name, BucketAggregationMode bucketAggregationMode, AggregatorFactories factories, long estimatedBucketsCount, AggregationContext context, Aggregator parent) {
this.name = name;
this.parent = parent;
this.estimatedBucketCount = estimatedBucketsCount;
this.context = context;
this.depth = parent == null ? 0 : 1 + parent.depth();
this.bucketAggregationMode = bucketAggregationMode;
assert factories != null : "sub-factories provided to BucketAggregator must not be null, use AggragatorFactories.EMPTY instead";
this.factories = factories;
this.subAggregators = factories.createSubAggregators(this, estimatedBucketsCount);
}
/**
* @return The name of the aggregation.
*/
public String name() {
return name;
}
/** Return the estimated number of buckets. */
public final long estimatedBucketCount() {
return estimatedBucketCount;
}
/** Return the depth of this aggregator in the aggregation tree. */
public final int depth() {
return depth;
}
/**
* @return The parent aggregator of this aggregator. The addAggregation are hierarchical in the sense that some can
* be composed out of others (more specifically, bucket addAggregation can define other addAggregation that will
* be aggregated per bucket). This method returns the direct parent aggregator that contains this aggregator, or
* {@code null} if there is none (meaning, this aggregator is a top level one)
*/
public Aggregator parent() {
return parent;
}
public Aggregator[] subAggregators() {
return subAggregators;
}
/**
* @return The current aggregation context.
*/
public AggregationContext context() {
return context;
}
/**
* @return The bucket aggregation mode of this aggregator. This mode defines the nature in which the aggregation is executed
* @see BucketAggregationMode
*/
public BucketAggregationMode bucketAggregationMode() {
return bucketAggregationMode;
}
/**
* @return Whether this aggregator is in the state where it can collect documents. Some aggregators can do their aggregations without
* actually collecting documents, for example, an aggregator that computes stats over unmapped fields doesn't need to collect
* anything as it knows to just return "empty" stats as the aggregation result.
*/
public abstract boolean shouldCollect();
/**
* Called during the query phase, to collect & aggregate the given document.
*
* @param doc The document to be collected/aggregated
* @param owningBucketOrdinal The ordinal of the bucket this aggregator belongs to, assuming this aggregator is not a top level aggregator.
* Typically, aggregators with {@code #bucketAggregationMode} set to {@link BucketAggregationMode#MULTI_BUCKETS}
* will heavily depend on this ordinal. Other aggregators may or may not use it and can see this ordinal as just
* an extra information for the aggregation context. For top level aggregators, the ordinal will always be
* equal to 0.
* @throws IOException
*/
public abstract void collect(int doc, long owningBucketOrdinal) throws IOException;
/**
* Called after collection of all document is done.
*/
public final void postCollection() {
for (int i = 0; i < subAggregators.length; i++) {
subAggregators[i].postCollection();
}
doPostCollection();
}
/** Called upon release of the aggregator. */
@Override
public boolean release() {
boolean success = false;
try {
doRelease();
success = true;
} finally {
Releasables.release(success, subAggregators);
}
return true;
}
/** Release instance-specific data. */
protected void doRelease() {}
/**
* Can be overriden by aggregator implementation to be called back when the collection phase ends.
*/
protected void doPostCollection() {
}
/**
* @return The aggregated & built aggregation
*/
public abstract InternalAggregation buildAggregation(long owningBucketOrdinal);
public abstract InternalAggregation buildEmptyAggregation();
protected final InternalAggregations buildEmptySubAggregations() {
List<InternalAggregation> aggs = new ArrayList<InternalAggregation>();
for (Aggregator aggregator : subAggregators) {
aggs.add(aggregator.buildEmptyAggregation());
}
return new InternalAggregations(aggs);
}
/**
* Parses the aggregation request and creates the appropriate aggregator factory for it.
*
* @see {@link AggregatorFactory}
*/
public static interface Parser {
/**
* @return The aggregation type this parser is associated with.
*/
String type();
/**
* Returns the aggregator factory with which this parser is associated, may return {@code null} indicating the
* aggregation should be skipped (e.g. when trying to aggregate on unmapped fields).
*
* @param aggregationName The name of the aggregation
* @param parser The xcontent parser
* @param context The search context
* @return The resolved aggregator factory or {@code null} in case the aggregation should be skipped
* @throws java.io.IOException When parsing fails
*/
AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException;
}
}
| 1no label
|
src_main_java_org_elasticsearch_search_aggregations_Aggregator.java
|
136 |
class FindIf extends Visitor {
Tree.IfStatement result;
@Override
public void visit(Tree.IfStatement that) {
super.visit(that);
if (that.getIfClause()!=null &&
that.getIfClause().getBlock()
.getStatements().contains(statement)) {
result = that;
}
if (that.getElseClause()!=null &&
that.getElseClause().getBlock()
.getStatements().contains(statement)) {
result = that;
}
}
}
| 0true
|
plugins_com.redhat.ceylon.eclipse.ui_src_com_redhat_ceylon_eclipse_code_correct_InvertIfElseProposal.java
|
158 |
public abstract class MultiPartitionClientRequest extends ClientRequest {
@Override
final void process() throws Exception {
ClientEndpoint endpoint = getEndpoint();
OperationFactory operationFactory = new OperationFactoryWrapper(createOperationFactory(), endpoint.getUuid());
Map<Integer, Object> map = clientEngine.invokeOnPartitions(getServiceName(), operationFactory, getPartitions());
Object result = reduce(map);
endpoint.sendResponse(result, getCallId());
}
protected abstract OperationFactory createOperationFactory();
protected abstract Object reduce(Map<Integer, Object> map);
public abstract Collection<Integer> getPartitions();
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_client_MultiPartitionClientRequest.java
|
574 |
public abstract class AbstractVendorService {
private static final Log LOG = LogFactory.getLog(AbstractVendorService.class);
private static final String POST_METHOD = "POST";
protected InputStream postMessage(Map<String, String>content, URL destination, String encodeCharset) throws IOException {
HttpURLConnection connection = (HttpURLConnection) destination.openConnection();
connection.setDoInput(true);
connection.setDoOutput(true);
connection.setRequestMethod(POST_METHOD);
OutputStreamWriter osw = null;
try {
osw = new OutputStreamWriter(connection.getOutputStream());
boolean isFirst = true;
for (String key : content.keySet()) {
if (!isFirst) {
osw.write("&");
}
isFirst = false;
String value = content.get(key);
osw.write(URLEncoder.encode(key, encodeCharset));
osw.write("=");
osw.write(URLEncoder.encode(value, encodeCharset));
}
osw.flush();
osw.close();
} catch (IOException e) {
// We'll try to avoid stopping processing and just log the error if the OutputStream doesn't close
LOG.error("Problem closing the OuputStream to destination: " + destination.toExternalForm(), e);
} finally {
if (osw != null) {
try { osw.close(); } catch (Throwable e) {}
}
}
return new BufferedInputStream(connection.getInputStream());
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_vendor_service_AbstractVendorService.java
|
529 |
public class EnvironmentFactoryBean implements FactoryBean {
private String className;
public EnvironmentFactoryBean(String className) {
this.className = className;
}
public Object getObject() throws Exception {
return Class.forName(className).newInstance();
}
@SuppressWarnings("unchecked")
public Class getObjectType() {
try {
return Class.forName(className);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
}
public boolean isSingleton() {
return false;
}
}
| 0true
|
common_src_main_java_org_broadleafcommerce_common_util_EnvironmentFactoryBean.java
|
86 |
public interface OConsoleReader {
public String readLine();
public void setConsole(OConsoleApplication console);
public OConsoleApplication getConsole();
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_console_OConsoleReader.java
|
331 |
new Thread() {
public void run() {
boolean result = map.tryPut("key1", "value3", 1, TimeUnit.SECONDS);
if (!result) {
latch.countDown();
}
}
}.start();
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_map_ClientMapTest.java
|
1,030 |
public class Config {
private URL configurationUrl;
private File configurationFile;
private ClassLoader classLoader;
private Properties properties = new Properties();
private String instanceName = null;
private GroupConfig groupConfig = new GroupConfig();
private NetworkConfig networkConfig = new NetworkConfig();
private final Map<String, MapConfig> mapConfigs = new ConcurrentHashMap<String, MapConfig>();
private final Map<String, TopicConfig> topicConfigs = new ConcurrentHashMap<String, TopicConfig>();
private final Map<String, QueueConfig> queueConfigs = new ConcurrentHashMap<String, QueueConfig>();
private final Map<String, MultiMapConfig> multiMapConfigs = new ConcurrentHashMap<String, MultiMapConfig>();
private final Map<String, ListConfig> listConfigs = new ConcurrentHashMap<String, ListConfig>();
private final Map<String, SetConfig> setConfigs = new ConcurrentHashMap<String, SetConfig>();
private final Map<String, ExecutorConfig> executorConfigs = new ConcurrentHashMap<String, ExecutorConfig>();
private final Map<String, SemaphoreConfig> semaphoreConfigs = new ConcurrentHashMap<String, SemaphoreConfig>();
private final Map<String, ReplicatedMapConfig> replicatedMapConfigs = new ConcurrentHashMap<String, ReplicatedMapConfig>();
private final Map<String, WanReplicationConfig> wanReplicationConfigs = new ConcurrentHashMap<String, WanReplicationConfig>();
private final Map<String, JobTrackerConfig> jobTrackerConfigs = new ConcurrentHashMap<String, JobTrackerConfig>();
private ServicesConfig servicesConfig = new ServicesConfig();
private SecurityConfig securityConfig = new SecurityConfig();
private final List<ListenerConfig> listenerConfigs = new LinkedList<ListenerConfig>();
private PartitionGroupConfig partitionGroupConfig = new PartitionGroupConfig();
private ManagementCenterConfig managementCenterConfig = new ManagementCenterConfig();
private SerializationConfig serializationConfig = new SerializationConfig();
private ManagedContext managedContext;
private ConcurrentMap<String, Object> userContext = new ConcurrentHashMap<String, Object>();
private MemberAttributeConfig memberAttributeConfig = new MemberAttributeConfig();
private String licenseKey;
public Config() {
}
public Config(String instanceName){
this.instanceName = instanceName;
}
/**
* Returns the class-loader that will be used in serialization.
* <p> If null, then thread context class-loader will be used instead.
*
* @return the class-loader
*/
public ClassLoader getClassLoader() {
return classLoader;
}
/**
* Sets the class-loader to be used during de-serialization
* and as context class-loader of Hazelcast internal threads.
*
* <p>
* If not set (or set to null); thread context class-loader
* will be used in required places.
*
* <p>
* Default value is null.
*
* @param classLoader class-loader to be used during de-serialization
* @return Config instance
*/
public Config setClassLoader(ClassLoader classLoader) {
this.classLoader = classLoader;
return this;
}
public String getProperty(String name) {
String value = properties.getProperty(name);
return value != null ? value : System.getProperty(name);
}
public Config setProperty(String name, String value) {
properties.put(name, value);
return this;
}
public MemberAttributeConfig getMemberAttributeConfig() {
return memberAttributeConfig;
}
public void setMemberAttributeConfig(MemberAttributeConfig memberAttributeConfig) {
this.memberAttributeConfig = memberAttributeConfig;
}
public Properties getProperties() {
return properties;
}
public Config setProperties(Properties properties) {
this.properties = properties;
return this;
}
public String getInstanceName() {
return instanceName;
}
public Config setInstanceName(String instanceName) {
this.instanceName = instanceName;
return this;
}
public GroupConfig getGroupConfig() {
return groupConfig;
}
public Config setGroupConfig(GroupConfig groupConfig) {
this.groupConfig = groupConfig;
return this;
}
public NetworkConfig getNetworkConfig() {
return networkConfig;
}
public Config setNetworkConfig(NetworkConfig networkConfig) {
this.networkConfig = networkConfig;
return this;
}
public MapConfig findMapConfig(String name){
name = getBaseName(name);
MapConfig config;
if ((config = lookupByPattern(mapConfigs, name)) != null) return config.getAsReadOnly();
return getMapConfig("default").getAsReadOnly();
}
public MapConfig getMapConfig(String name) {
name = getBaseName(name);
MapConfig config;
if ((config = lookupByPattern(mapConfigs, name)) != null) return config;
MapConfig defConfig = mapConfigs.get("default");
if (defConfig == null) {
defConfig = new MapConfig();
defConfig.setName("default");
addMapConfig(defConfig);
}
config = new MapConfig(defConfig);
config.setName(name);
addMapConfig(config);
return config;
}
public Config addMapConfig(MapConfig mapConfig) {
mapConfigs.put(mapConfig.getName(), mapConfig);
return this;
}
/**
* @return the mapConfigs
*/
public Map<String, MapConfig> getMapConfigs() {
return mapConfigs;
}
/**
* @param mapConfigs the mapConfigs to set
*/
public Config setMapConfigs(Map<String, MapConfig> mapConfigs) {
this.mapConfigs.clear();
this.mapConfigs.putAll(mapConfigs);
for (final Entry<String, MapConfig> entry : this.mapConfigs.entrySet()) {
entry.getValue().setName(entry.getKey());
}
return this;
}
public QueueConfig findQueueConfig(String name){
name = getBaseName(name);
QueueConfig config;
if ((config = lookupByPattern(queueConfigs, name)) != null) return config.getAsReadOnly();
return getQueueConfig("default").getAsReadOnly();
}
public QueueConfig getQueueConfig(String name) {
name = getBaseName(name);
QueueConfig config;
if ((config = lookupByPattern(queueConfigs, name)) != null) return config;
QueueConfig defConfig = queueConfigs.get("default");
if (defConfig == null) {
defConfig = new QueueConfig();
defConfig.setName("default");
addQueueConfig(defConfig);
}
config = new QueueConfig(defConfig);
config.setName(name);
addQueueConfig(config);
return config;
}
public Config addQueueConfig(QueueConfig queueConfig) {
queueConfigs.put(queueConfig.getName(), queueConfig);
return this;
}
public Map<String, QueueConfig> getQueueConfigs() {
return queueConfigs;
}
public Config setQueueConfigs(Map<String, QueueConfig> queueConfigs) {
this.queueConfigs.clear();
this.queueConfigs.putAll(queueConfigs);
for (Entry<String, QueueConfig> entry : queueConfigs.entrySet()) {
entry.getValue().setName(entry.getKey());
}
return this;
}
public ListConfig findListConfig(String name){
name = getBaseName(name);
ListConfig config;
if ((config = lookupByPattern(listConfigs, name)) != null) return config.getAsReadOnly();
return getListConfig("default").getAsReadOnly();
}
public ListConfig getListConfig(String name) {
name = getBaseName(name);
ListConfig config;
if ((config = lookupByPattern(listConfigs, name)) != null) return config;
ListConfig defConfig = listConfigs.get("default");
if (defConfig == null) {
defConfig = new ListConfig();
defConfig.setName("default");
addListConfig(defConfig);
}
config = new ListConfig(defConfig);
config.setName(name);
addListConfig(config);
return config;
}
public Config addListConfig(ListConfig listConfig) {
listConfigs.put(listConfig.getName(), listConfig);
return this;
}
public Map<String, ListConfig> getListConfigs() {
return listConfigs;
}
public Config setListConfigs(Map<String, ListConfig> listConfigs) {
this.listConfigs.clear();
this.listConfigs.putAll(listConfigs);
for (Entry<String, ListConfig> entry : listConfigs.entrySet()) {
entry.getValue().setName(entry.getKey());
}
return this;
}
public SetConfig findSetConfig(String name){
name = getBaseName(name);
SetConfig config;
if ((config = lookupByPattern(setConfigs, name)) != null) return config.getAsReadOnly();
return getSetConfig("default").getAsReadOnly();
}
public SetConfig getSetConfig(String name) {
name = getBaseName(name);
SetConfig config;
if ((config = lookupByPattern(setConfigs, name)) != null) return config;
SetConfig defConfig = setConfigs.get("default");
if (defConfig == null) {
defConfig = new SetConfig();
defConfig.setName("default");
addSetConfig(defConfig);
}
config = new SetConfig(defConfig);
config.setName(name);
addSetConfig(config);
return config;
}
public Config addSetConfig(SetConfig setConfig) {
setConfigs.put(setConfig.getName(), setConfig);
return this;
}
public Map<String, SetConfig> getSetConfigs() {
return setConfigs;
}
public Config setSetConfigs(Map<String, SetConfig> setConfigs) {
this.setConfigs.clear();
this.setConfigs.putAll(setConfigs);
for (Entry<String, SetConfig> entry : setConfigs.entrySet()) {
entry.getValue().setName(entry.getKey());
}
return this;
}
public MultiMapConfig findMultiMapConfig(String name){
name = getBaseName(name);
MultiMapConfig config;
if ((config = lookupByPattern(multiMapConfigs, name)) != null) return config.getAsReadOnly();
return getMultiMapConfig("default").getAsReadOnly();
}
public MultiMapConfig getMultiMapConfig(String name) {
name = getBaseName(name);
MultiMapConfig config;
if ((config = lookupByPattern(multiMapConfigs, name)) != null) return config;
MultiMapConfig defConfig = multiMapConfigs.get("default");
if (defConfig == null) {
defConfig = new MultiMapConfig();
defConfig.setName("default");
addMultiMapConfig(defConfig);
}
config = new MultiMapConfig(defConfig);
config.setName(name);
addMultiMapConfig(config);
return config;
}
public Config addMultiMapConfig(MultiMapConfig multiMapConfig) {
multiMapConfigs.put(multiMapConfig.getName(), multiMapConfig);
return this;
}
public Map<String, MultiMapConfig> getMultiMapConfigs() {
return multiMapConfigs;
}
public Config setMultiMapConfigs(Map<String, MultiMapConfig> multiMapConfigs) {
this.multiMapConfigs.clear();
this.multiMapConfigs.putAll(multiMapConfigs);
for (final Entry<String, MultiMapConfig> entry : this.multiMapConfigs.entrySet()) {
entry.getValue().setName(entry.getKey());
}
return this;
}
public ReplicatedMapConfig findReplicatedMapConfig(String name){
ReplicatedMapConfig config;
if ((config = lookupByPattern(replicatedMapConfigs, name)) != null) return config.getAsReadOnly();
return getReplicatedMapConfig("default").getAsReadOnly();
}
public ReplicatedMapConfig getReplicatedMapConfig(String name) {
ReplicatedMapConfig config;
if ((config = lookupByPattern(replicatedMapConfigs, name)) != null) return config;
ReplicatedMapConfig defConfig = replicatedMapConfigs.get("default");
if (defConfig == null) {
defConfig = new ReplicatedMapConfig();
defConfig.setName("default");
addReplicatedMapConfig(defConfig);
}
config = new ReplicatedMapConfig(defConfig);
config.setName(name);
addReplicatedMapConfig(config);
return config;
}
public Config addReplicatedMapConfig(ReplicatedMapConfig replicatedMapConfig) {
replicatedMapConfigs.put(replicatedMapConfig.getName(), replicatedMapConfig);
return this;
}
public Map<String, ReplicatedMapConfig> getReplicatedMapConfigs() {
return replicatedMapConfigs;
}
public Config setReplicatedMapConfigs(Map<String, ReplicatedMapConfig> replicatedMapConfigs) {
this.replicatedMapConfigs.clear();
this.replicatedMapConfigs.putAll(replicatedMapConfigs);
for (final Entry<String, ReplicatedMapConfig> entry : this.replicatedMapConfigs.entrySet()) {
entry.getValue().setName(entry.getKey());
}
return this;
}
public TopicConfig findTopicConfig(String name){
name = getBaseName(name);
TopicConfig config;
if ((config = lookupByPattern(topicConfigs, name)) != null) return config.getAsReadOnly();
return getTopicConfig("default").getAsReadOnly();
}
public TopicConfig getTopicConfig(String name) {
name = getBaseName(name);
TopicConfig config;
if ((config = lookupByPattern(topicConfigs, name)) != null) {
return config;
}
TopicConfig defConfig = topicConfigs.get("default");
if (defConfig == null) {
defConfig = new TopicConfig();
defConfig.setName("default");
addTopicConfig(defConfig);
}
config = new TopicConfig(defConfig);
config.setName(name);
addTopicConfig(config);
return config;
}
public Config addTopicConfig(TopicConfig topicConfig) {
topicConfigs.put(topicConfig.getName(), topicConfig);
return this;
}
/**
* @return the topicConfigs
*/
public Map<String, TopicConfig> getTopicConfigs() {
return topicConfigs;
}
/**
* @param mapTopicConfigs the topicConfigs to set
*/
public Config setTopicConfigs(Map<String, TopicConfig> mapTopicConfigs) {
this.topicConfigs.clear();
this.topicConfigs.putAll(mapTopicConfigs);
for (final Entry<String, TopicConfig> entry : this.topicConfigs.entrySet()) {
entry.getValue().setName(entry.getKey());
}
return this;
}
public ExecutorConfig findExecutorConfig(String name){
name = getBaseName(name);
ExecutorConfig config;
if ((config = lookupByPattern(executorConfigs, name)) != null) return config.getAsReadOnly();
return getExecutorConfig("default").getAsReadOnly();
}
/**
* Returns the ExecutorConfig for the given name
*
* @param name name of the executor config
* @return ExecutorConfig
*/
public ExecutorConfig getExecutorConfig(String name) {
name = getBaseName(name);
ExecutorConfig config;
if ((config = lookupByPattern(executorConfigs, name)) != null) return config;
ExecutorConfig defConfig = executorConfigs.get("default");
if (defConfig == null) {
defConfig = new ExecutorConfig();
defConfig.setName("default");
addExecutorConfig(defConfig);
}
config = new ExecutorConfig(defConfig);
config.setName(name);
addExecutorConfig(config);
return config;
}
/**
* Adds a new ExecutorConfig by name
*
* @param executorConfig executor config to add
* @return this config instance
*/
public Config addExecutorConfig(ExecutorConfig executorConfig) {
this.executorConfigs.put(executorConfig.getName(), executorConfig);
return this;
}
public Map<String, ExecutorConfig> getExecutorConfigs() {
return executorConfigs;
}
public Config setExecutorConfigs(Map<String, ExecutorConfig> executorConfigs) {
this.executorConfigs.clear();
this.executorConfigs.putAll(executorConfigs);
for (Entry<String, ExecutorConfig> entry : executorConfigs.entrySet()) {
entry.getValue().setName(entry.getKey());
}
return this;
}
public SemaphoreConfig findSemaphoreConfig(String name){
name = getBaseName(name);
SemaphoreConfig config;
if ((config = lookupByPattern(semaphoreConfigs, name)) != null) return config.getAsReadOnly();
return getSemaphoreConfig("default").getAsReadOnly();
}
/**
* Returns the SemaphoreConfig for the given name
*
* @param name name of the semaphore config
* @return SemaphoreConfig
*/
public SemaphoreConfig getSemaphoreConfig(String name) {
name = getBaseName(name);
SemaphoreConfig config;
if ((config = lookupByPattern(semaphoreConfigs, name)) != null) return config;
SemaphoreConfig defConfig = semaphoreConfigs.get("default");
if (defConfig == null) {
defConfig = new SemaphoreConfig();
defConfig.setName("default");
addSemaphoreConfig(defConfig);
}
config = new SemaphoreConfig(defConfig);
config.setName(name);
addSemaphoreConfig(config);
return config;
}
/**
* Adds a new SemaphoreConfig by name
*
* @param semaphoreConfig semaphore config to add
* @return this config instance
*/
public Config addSemaphoreConfig(SemaphoreConfig semaphoreConfig) {
this.semaphoreConfigs.put(semaphoreConfig.getName(), semaphoreConfig);
return this;
}
/**
* Returns the collection of semaphore configs.
*
* @return collection of semaphore configs.
*/
public Collection<SemaphoreConfig> getSemaphoreConfigs() {
return semaphoreConfigs.values();
}
public Config setSemaphoreConfigs(Map<String, SemaphoreConfig> semaphoreConfigs) {
this.semaphoreConfigs.clear();
this.semaphoreConfigs.putAll(semaphoreConfigs);
for (final Entry<String, SemaphoreConfig> entry : this.semaphoreConfigs.entrySet()) {
entry.getValue().setName(entry.getKey());
}
return this;
}
public WanReplicationConfig getWanReplicationConfig(String name) {
return wanReplicationConfigs.get(name);
}
public Config addWanReplicationConfig(WanReplicationConfig wanReplicationConfig) {
wanReplicationConfigs.put(wanReplicationConfig.getName(), wanReplicationConfig);
return this;
}
public Map<String, WanReplicationConfig> getWanReplicationConfigs() {
return wanReplicationConfigs;
}
public Config setWanReplicationConfigs(Map<String, WanReplicationConfig> wanReplicationConfigs) {
this.wanReplicationConfigs.clear();
this.wanReplicationConfigs.putAll(wanReplicationConfigs);
return this;
}
public JobTrackerConfig findJobTrackerConfig(String name) {
name = getBaseName(name);
JobTrackerConfig config;
if ((config = lookupByPattern(jobTrackerConfigs, name)) != null) return config.getAsReadOnly();
return getJobTrackerConfig(name);
}
public JobTrackerConfig getJobTrackerConfig(String name) {
name = getBaseName(name);
JobTrackerConfig config;
if ((config = lookupByPattern(jobTrackerConfigs, name)) != null) return config;
JobTrackerConfig defConfig = jobTrackerConfigs.get("default");
if (defConfig == null) {
defConfig = new JobTrackerConfig();
defConfig.setName("default");
addJobTrackerConfig(defConfig);
}
config = new JobTrackerConfig(defConfig);
config.setName(name);
addJobTrackerConfig(config);
return config;
}
public Config addJobTrackerConfig(JobTrackerConfig jobTrackerConfig) {
jobTrackerConfigs.put(jobTrackerConfig.getName(), jobTrackerConfig);
return this;
}
public Map<String, JobTrackerConfig> getJobTrackerConfigs() {
return jobTrackerConfigs;
}
public Config setJobTrackerConfigs(Map<String, JobTrackerConfig> jobTrackerConfigs) {
this.jobTrackerConfigs.clear();
this.jobTrackerConfigs.putAll(jobTrackerConfigs);
for (final Entry<String, JobTrackerConfig> entry : this.jobTrackerConfigs.entrySet()) {
entry.getValue().setName(entry.getKey());
}
return this;
}
public ManagementCenterConfig getManagementCenterConfig() {
return managementCenterConfig;
}
public Config setManagementCenterConfig(ManagementCenterConfig managementCenterConfig) {
this.managementCenterConfig = managementCenterConfig;
return this;
}
public ServicesConfig getServicesConfig() {
return servicesConfig;
}
public Config setServicesConfig(ServicesConfig servicesConfig) {
this.servicesConfig = servicesConfig;
return this;
}
public SecurityConfig getSecurityConfig() {
return securityConfig;
}
public Config setSecurityConfig(SecurityConfig securityConfig) {
this.securityConfig = securityConfig;
return this;
}
public Config addListenerConfig(ListenerConfig listenerConfig) {
getListenerConfigs().add(listenerConfig);
return this;
}
public List<ListenerConfig> getListenerConfigs() {
return listenerConfigs;
}
public Config setListenerConfigs(List<ListenerConfig> listenerConfigs) {
this.listenerConfigs.clear();
this.listenerConfigs.addAll(listenerConfigs);
return this;
}
public SerializationConfig getSerializationConfig() {
return serializationConfig;
}
public Config setSerializationConfig(SerializationConfig serializationConfig) {
this.serializationConfig = serializationConfig;
return this;
}
public PartitionGroupConfig getPartitionGroupConfig() {
return partitionGroupConfig;
}
public Config setPartitionGroupConfig(PartitionGroupConfig partitionGroupConfig) {
this.partitionGroupConfig = partitionGroupConfig;
return this;
}
public ManagedContext getManagedContext() {
return managedContext;
}
public Config setManagedContext(final ManagedContext managedContext) {
this.managedContext = managedContext;
return this;
}
public ConcurrentMap<String, Object> getUserContext() {
return userContext;
}
public Config setUserContext(ConcurrentMap<String, Object> userContext) {
if(userContext == null){
throw new IllegalArgumentException("userContext can't be null");
}
this.userContext = userContext;
return this;
}
/**
* @return the configurationUrl
*/
public URL getConfigurationUrl() {
return configurationUrl;
}
/**
* @param configurationUrl the configurationUrl to set
*/
public Config setConfigurationUrl(URL configurationUrl) {
this.configurationUrl = configurationUrl;
return this;
}
/**
* @return the configurationFile
*/
public File getConfigurationFile() {
return configurationFile;
}
/**
* @param configurationFile the configurationFile to set
*/
public Config setConfigurationFile(File configurationFile) {
this.configurationFile = configurationFile;
return this;
}
public String getLicenseKey() {
return licenseKey;
}
public Config setLicenseKey(final String licenseKey) {
this.licenseKey = licenseKey;
return this;
}
private static <T> T lookupByPattern(Map<String, T> map, String name) {
T t = map.get(name);
if (t == null) {
for (Map.Entry<String,T> entry : map.entrySet()) {
String pattern = entry.getKey();
T value = entry.getValue();
if (nameMatches(name, pattern)) {
return value;
}
}
}
return t;
}
public static boolean nameMatches(final String name, final String pattern) {
final int index = pattern.indexOf('*');
if (index == -1) {
return name.equals(pattern);
} else {
final String firstPart = pattern.substring(0, index);
final int indexFirstPart = name.indexOf(firstPart, 0);
if (indexFirstPart == -1) {
return false;
}
final String secondPart = pattern.substring(index + 1);
final int indexSecondPart = name.indexOf(secondPart, index + 1);
return indexSecondPart != -1;
}
}
/**
* @param config
* @return true if config is compatible with this one,
* false if config belongs to another group
* @throws RuntimeException if map, queue, topic configs are incompatible
*/
public boolean isCompatible(final Config config) {
if (config == null) {
throw new IllegalArgumentException("Expected not null config");
}
if (!this.groupConfig.getName().equals(config.getGroupConfig().getName())) {
return false;
}
if (!this.groupConfig.getPassword().equals(config.getGroupConfig().getPassword())) {
throw new HazelcastException("Incompatible group password");
}
checkMapConfigCompatible(config);
return true;
}
private void checkMapConfigCompatible(final Config config) {
Set<String> mapConfigNames = new HashSet<String>(mapConfigs.keySet());
mapConfigNames.addAll(config.mapConfigs.keySet());
for (final String name : mapConfigNames) {
final MapConfig thisMapConfig = lookupByPattern(mapConfigs, name);
final MapConfig thatMapConfig = lookupByPattern(config.mapConfigs, name);
if (thisMapConfig != null && thatMapConfig != null &&
!thisMapConfig.isCompatible(thatMapConfig)) {
throw new HazelcastException(format("Incompatible map config this:\n{0}\nanother:\n{1}",
thisMapConfig, thatMapConfig));
}
}
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append("Config");
sb.append("{groupConfig=").append(groupConfig);
sb.append(", properties=").append(properties);
sb.append(", networkConfig=").append(networkConfig);
sb.append(", mapConfigs=").append(mapConfigs);
sb.append(", topicConfigs=").append(topicConfigs);
sb.append(", queueConfigs=").append(queueConfigs);
sb.append(", multiMapConfigs=").append(multiMapConfigs);
sb.append(", executorConfigs=").append(executorConfigs);
sb.append(", semaphoreConfigs=").append(semaphoreConfigs);
sb.append(", wanReplicationConfigs=").append(wanReplicationConfigs);
sb.append(", listenerConfigs=").append(listenerConfigs);
sb.append(", partitionGroupConfig=").append(partitionGroupConfig);
sb.append(", managementCenterConfig=").append(managementCenterConfig);
sb.append(", securityConfig=").append(securityConfig);
sb.append('}');
return sb.toString();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_config_Config.java
|
1,396 |
threadPool.executor(ThreadPool.Names.MANAGEMENT).execute(new Runnable() {
@Override
public void run() {
try {
if (!mdLock.tryAcquire(request.masterNodeTimeout().nanos(), TimeUnit.NANOSECONDS)) {
listener.onFailure(new ProcessClusterEventTimeoutException(request.masterNodeTimeout(), "acquire index lock"));
return;
}
} catch (InterruptedException e) {
Thread.interrupted();
listener.onFailure(e);
return;
}
createIndex(request, listener, mdLock);
}
});
| 0true
|
src_main_java_org_elasticsearch_cluster_metadata_MetaDataCreateIndexService.java
|
19 |
static class ByteVertex extends Vertex {
private final LongObjectMap<ConcurrentSkipListSet<ByteEntry>> tx;
private final SortedSet<ByteEntry> set;
ByteVertex(long id, LongObjectMap<ConcurrentSkipListSet<ByteEntry>> tx) {
super(id);
this.tx = tx;
this.set = (SortedSet<ByteEntry>) tx.get(id);
}
@Override
public Iterable<Vertex> getNeighbors(final int value) {
// SortedSet<ByteEntry> set = (SortedSet<ByteEntry>) tx.get(id);
return Iterables.transform(Iterables.filter(set, new Predicate<ByteEntry>() {
@Override
public boolean apply(@Nullable ByteEntry entry) {
return !CHECK_VALUE || entry.value.getInt(0) == value;
}
}), new Function<ByteEntry, Vertex>() {
@Override
public Vertex apply(@Nullable ByteEntry entry) {
return new ByteVertex(entry.key.getLong(8), tx);
}
});
}
}
| 0true
|
titan-test_src_main_java_com_thinkaurelius_titan_TestByteBuffer.java
|
641 |
public class TransportIndicesStatusAction extends TransportBroadcastOperationAction<IndicesStatusRequest, IndicesStatusResponse, TransportIndicesStatusAction.IndexShardStatusRequest, ShardStatus> {
private final IndicesService indicesService;
private final RecoveryTarget peerRecoveryTarget;
@Inject
public TransportIndicesStatusAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
IndicesService indicesService, RecoveryTarget peerRecoveryTarget) {
super(settings, threadPool, clusterService, transportService);
this.peerRecoveryTarget = peerRecoveryTarget;
this.indicesService = indicesService;
}
@Override
protected String executor() {
return ThreadPool.Names.MANAGEMENT;
}
@Override
protected String transportAction() {
return IndicesStatusAction.NAME;
}
@Override
protected IndicesStatusRequest newRequest() {
return new IndicesStatusRequest();
}
/**
* Status goes across *all* shards.
*/
@Override
protected GroupShardsIterator shards(ClusterState state, IndicesStatusRequest request, String[] concreteIndices) {
return state.routingTable().allAssignedShardsGrouped(concreteIndices, true);
}
@Override
protected ClusterBlockException checkGlobalBlock(ClusterState state, IndicesStatusRequest request) {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA);
}
@Override
protected ClusterBlockException checkRequestBlock(ClusterState state, IndicesStatusRequest countRequest, String[] concreteIndices) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, concreteIndices);
}
@Override
protected IndicesStatusResponse newResponse(IndicesStatusRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
int successfulShards = 0;
int failedShards = 0;
List<ShardOperationFailedException> shardFailures = null;
final List<ShardStatus> shards = newArrayList();
for (int i = 0; i < shardsResponses.length(); i++) {
Object shardResponse = shardsResponses.get(i);
if (shardResponse == null) {
// simply ignore non active shards
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
failedShards++;
if (shardFailures == null) {
shardFailures = newArrayList();
}
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
} else {
shards.add((ShardStatus) shardResponse);
successfulShards++;
}
}
return new IndicesStatusResponse(shards.toArray(new ShardStatus[shards.size()]), clusterState, shardsResponses.length(), successfulShards, failedShards, shardFailures);
}
@Override
protected IndexShardStatusRequest newShardRequest() {
return new IndexShardStatusRequest();
}
@Override
protected IndexShardStatusRequest newShardRequest(ShardRouting shard, IndicesStatusRequest request) {
return new IndexShardStatusRequest(shard.index(), shard.id(), request);
}
@Override
protected ShardStatus newShardResponse() {
return new ShardStatus();
}
@Override
protected ShardStatus shardOperation(IndexShardStatusRequest request) throws ElasticsearchException {
InternalIndexService indexService = (InternalIndexService) indicesService.indexServiceSafe(request.index());
InternalIndexShard indexShard = (InternalIndexShard) indexService.shardSafe(request.shardId());
ShardStatus shardStatus = new ShardStatus(indexShard.routingEntry());
shardStatus.state = indexShard.state();
try {
shardStatus.storeSize = indexShard.store().estimateSize();
} catch (IOException e) {
// failure to get the store size...
}
if (indexShard.state() == IndexShardState.STARTED) {
// shardStatus.estimatedFlushableMemorySize = indexShard.estimateFlushableMemorySize();
shardStatus.translogId = indexShard.translog().currentId();
shardStatus.translogOperations = indexShard.translog().estimatedNumberOfOperations();
Engine.Searcher searcher = indexShard.acquireSearcher("indices_status");
try {
shardStatus.docs = new DocsStatus();
shardStatus.docs.numDocs = searcher.reader().numDocs();
shardStatus.docs.maxDoc = searcher.reader().maxDoc();
shardStatus.docs.deletedDocs = searcher.reader().numDeletedDocs();
} finally {
searcher.release();
}
shardStatus.mergeStats = indexShard.mergeScheduler().stats();
shardStatus.refreshStats = indexShard.refreshStats();
shardStatus.flushStats = indexShard.flushStats();
}
if (request.recovery) {
// check on going recovery (from peer or gateway)
RecoveryStatus peerRecoveryStatus = indexShard.peerRecoveryStatus();
if (peerRecoveryStatus == null) {
peerRecoveryStatus = peerRecoveryTarget.peerRecoveryStatus(indexShard.shardId());
}
if (peerRecoveryStatus != null) {
PeerRecoveryStatus.Stage stage;
switch (peerRecoveryStatus.stage()) {
case INIT:
stage = PeerRecoveryStatus.Stage.INIT;
break;
case INDEX:
stage = PeerRecoveryStatus.Stage.INDEX;
break;
case TRANSLOG:
stage = PeerRecoveryStatus.Stage.TRANSLOG;
break;
case FINALIZE:
stage = PeerRecoveryStatus.Stage.FINALIZE;
break;
case DONE:
stage = PeerRecoveryStatus.Stage.DONE;
break;
default:
stage = PeerRecoveryStatus.Stage.INIT;
}
shardStatus.peerRecoveryStatus = new PeerRecoveryStatus(stage, peerRecoveryStatus.startTime(), peerRecoveryStatus.time(),
peerRecoveryStatus.phase1TotalSize(), peerRecoveryStatus.phase1ExistingTotalSize(),
peerRecoveryStatus.currentFilesSize(), peerRecoveryStatus.currentTranslogOperations());
}
IndexShardGatewayService gatewayService = indexService.shardInjector(request.shardId()).getInstance(IndexShardGatewayService.class);
org.elasticsearch.index.gateway.RecoveryStatus gatewayRecoveryStatus = gatewayService.recoveryStatus();
if (gatewayRecoveryStatus != null) {
GatewayRecoveryStatus.Stage stage;
switch (gatewayRecoveryStatus.stage()) {
case INIT:
stage = GatewayRecoveryStatus.Stage.INIT;
break;
case INDEX:
stage = GatewayRecoveryStatus.Stage.INDEX;
break;
case TRANSLOG:
stage = GatewayRecoveryStatus.Stage.TRANSLOG;
break;
case DONE:
stage = GatewayRecoveryStatus.Stage.DONE;
break;
default:
stage = GatewayRecoveryStatus.Stage.INIT;
}
shardStatus.gatewayRecoveryStatus = new GatewayRecoveryStatus(stage, gatewayRecoveryStatus.startTime(), gatewayRecoveryStatus.time(),
gatewayRecoveryStatus.index().totalSize(), gatewayRecoveryStatus.index().reusedTotalSize(), gatewayRecoveryStatus.index().currentFilesSize(), gatewayRecoveryStatus.translog().currentTranslogOperations());
}
}
if (request.snapshot) {
IndexShardGatewayService gatewayService = indexService.shardInjector(request.shardId()).getInstance(IndexShardGatewayService.class);
SnapshotStatus snapshotStatus = gatewayService.snapshotStatus();
if (snapshotStatus != null) {
GatewaySnapshotStatus.Stage stage;
switch (snapshotStatus.stage()) {
case DONE:
stage = GatewaySnapshotStatus.Stage.DONE;
break;
case FAILURE:
stage = GatewaySnapshotStatus.Stage.FAILURE;
break;
case TRANSLOG:
stage = GatewaySnapshotStatus.Stage.TRANSLOG;
break;
case FINALIZE:
stage = GatewaySnapshotStatus.Stage.FINALIZE;
break;
case INDEX:
stage = GatewaySnapshotStatus.Stage.INDEX;
break;
default:
stage = GatewaySnapshotStatus.Stage.NONE;
break;
}
shardStatus.gatewaySnapshotStatus = new GatewaySnapshotStatus(stage, snapshotStatus.startTime(), snapshotStatus.time(),
snapshotStatus.index().totalSize(), snapshotStatus.translog().expectedNumberOfOperations());
}
}
return shardStatus;
}
public static class IndexShardStatusRequest extends BroadcastShardOperationRequest {
boolean recovery;
boolean snapshot;
IndexShardStatusRequest() {
}
IndexShardStatusRequest(String index, int shardId, IndicesStatusRequest request) {
super(index, shardId, request);
recovery = request.recovery();
snapshot = request.snapshot();
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
recovery = in.readBoolean();
snapshot = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(recovery);
out.writeBoolean(snapshot);
}
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_indices_status_TransportIndicesStatusAction.java
|
1,458 |
public class DiscoveryNodes implements Iterable<DiscoveryNode> {
public static final DiscoveryNodes EMPTY_NODES = builder().build();
private final ImmutableOpenMap<String, DiscoveryNode> nodes;
private final ImmutableOpenMap<String, DiscoveryNode> dataNodes;
private final ImmutableOpenMap<String, DiscoveryNode> masterNodes;
private final String masterNodeId;
private final String localNodeId;
private DiscoveryNodes(ImmutableOpenMap<String, DiscoveryNode> nodes, ImmutableOpenMap<String, DiscoveryNode> dataNodes, ImmutableOpenMap<String, DiscoveryNode> masterNodes, String masterNodeId, String localNodeId) {
this.nodes = nodes;
this.dataNodes = dataNodes;
this.masterNodes = masterNodes;
this.masterNodeId = masterNodeId;
this.localNodeId = localNodeId;
}
@Override
public UnmodifiableIterator<DiscoveryNode> iterator() {
return nodes.valuesIt();
}
/**
* Is this a valid nodes that has the minimal information set. The minimal set is defined
* by the localNodeId being set.
*/
public boolean valid() {
return localNodeId != null;
}
/**
* Returns <tt>true</tt> if the local node is the master node.
*/
public boolean localNodeMaster() {
if (localNodeId == null) {
// we don't know yet the local node id, return false
return false;
}
return localNodeId.equals(masterNodeId);
}
/**
* Get the number of known nodes
*
* @return number of nodes
*/
public int size() {
return nodes.size();
}
/**
* Get the number of known nodes
*
* @return number of nodes
*/
public int getSize() {
return size();
}
/**
* Get a {@link Map} of the discovered nodes arranged by their ids
*
* @return {@link Map} of the discovered nodes arranged by their ids
*/
public ImmutableOpenMap<String, DiscoveryNode> nodes() {
return this.nodes;
}
/**
* Get a {@link Map} of the discovered nodes arranged by their ids
*
* @return {@link Map} of the discovered nodes arranged by their ids
*/
public ImmutableOpenMap<String, DiscoveryNode> getNodes() {
return nodes();
}
/**
* Get a {@link Map} of the discovered data nodes arranged by their ids
*
* @return {@link Map} of the discovered data nodes arranged by their ids
*/
public ImmutableOpenMap<String, DiscoveryNode> dataNodes() {
return this.dataNodes;
}
/**
* Get a {@link Map} of the discovered data nodes arranged by their ids
*
* @return {@link Map} of the discovered data nodes arranged by their ids
*/
public ImmutableOpenMap<String, DiscoveryNode> getDataNodes() {
return dataNodes();
}
/**
* Get a {@link Map} of the discovered master nodes arranged by their ids
*
* @return {@link Map} of the discovered master nodes arranged by their ids
*/
public ImmutableOpenMap<String, DiscoveryNode> masterNodes() {
return this.masterNodes;
}
/**
* Get a {@link Map} of the discovered master nodes arranged by their ids
*
* @return {@link Map} of the discovered master nodes arranged by their ids
*/
public ImmutableOpenMap<String, DiscoveryNode> getMasterNodes() {
return masterNodes();
}
/**
* Get a {@link Map} of the discovered master and data nodes arranged by their ids
*
* @return {@link Map} of the discovered master and data nodes arranged by their ids
*/
public ImmutableOpenMap<String, DiscoveryNode> masterAndDataNodes() {
ImmutableOpenMap.Builder<String, DiscoveryNode> nodes = ImmutableOpenMap.builder(dataNodes);
nodes.putAll(masterNodes);
return nodes.build();
}
/**
* Get a node by its id
*
* @param nodeId id of the wanted node
* @return wanted node if it exists. Otherwise <code>null</code>
*/
public DiscoveryNode get(String nodeId) {
return nodes.get(nodeId);
}
/**
* Determine if a given node exists
*
* @param nodeId id of the node which existence should be verified
* @return <code>true</code> if the node exists. Otherwise <code>false</code>
*/
public boolean nodeExists(String nodeId) {
return nodes.containsKey(nodeId);
}
/**
* Get the id of the master node
*
* @return id of the master
*/
public String masterNodeId() {
return this.masterNodeId;
}
/**
* Get the id of the master node
*
* @return id of the master
*/
public String getMasterNodeId() {
return masterNodeId();
}
/**
* Get the id of the local node
*
* @return id of the local node
*/
public String localNodeId() {
return this.localNodeId;
}
/**
* Get the id of the local node
*
* @return id of the local node
*/
public String getLocalNodeId() {
return localNodeId();
}
/**
* Get the local node
*
* @return local node
*/
public DiscoveryNode localNode() {
return nodes.get(localNodeId);
}
/**
* Get the local node
*
* @return local node
*/
public DiscoveryNode getLocalNode() {
return localNode();
}
/**
* Get the master node
*
* @return master node
*/
public DiscoveryNode masterNode() {
return nodes.get(masterNodeId);
}
/**
* Get the master node
*
* @return master node
*/
public DiscoveryNode getMasterNode() {
return masterNode();
}
/**
* Get a node by its address
*
* @param address {@link TransportAddress} of the wanted node
* @return node identified by the given address or <code>null</code> if no such node exists
*/
public DiscoveryNode findByAddress(TransportAddress address) {
for (ObjectCursor<DiscoveryNode> cursor : nodes.values()) {
DiscoveryNode node = cursor.value;
if (node.address().equals(address)) {
return node;
}
}
return null;
}
public boolean isAllNodes(String... nodesIds) {
return nodesIds == null || nodesIds.length == 0 || (nodesIds.length == 1 && nodesIds[0].equals("_all"));
}
/**
* Resolve a node with a given id
*
* @param node id of the node to discover
* @return discovered node matching the given id
* @throws org.elasticsearch.ElasticsearchIllegalArgumentException if more than one node matches the request or no nodes have been resolved
*/
public DiscoveryNode resolveNode(String node) {
String[] resolvedNodeIds = resolveNodesIds(node);
if (resolvedNodeIds.length > 1) {
throw new ElasticsearchIllegalArgumentException("resolved [" + node + "] into [" + resolvedNodeIds.length + "] nodes, where expected to be resolved to a single node");
}
if (resolvedNodeIds.length == 0) {
throw new ElasticsearchIllegalArgumentException("failed to resolve [" + node + " ], no matching nodes");
}
return nodes.get(resolvedNodeIds[0]);
}
public String[] resolveNodesIds(String... nodesIds) {
if (isAllNodes(nodesIds)) {
int index = 0;
nodesIds = new String[nodes.size()];
for (DiscoveryNode node : this) {
nodesIds[index++] = node.id();
}
return nodesIds;
} else {
ObjectOpenHashSet<String> resolvedNodesIds = new ObjectOpenHashSet<String>(nodesIds.length);
for (String nodeId : nodesIds) {
if (nodeId.equals("_local")) {
String localNodeId = localNodeId();
if (localNodeId != null) {
resolvedNodesIds.add(localNodeId);
}
} else if (nodeId.equals("_master")) {
String masterNodeId = masterNodeId();
if (masterNodeId != null) {
resolvedNodesIds.add(masterNodeId);
}
} else if (nodeExists(nodeId)) {
resolvedNodesIds.add(nodeId);
} else {
// not a node id, try and search by name
for (DiscoveryNode node : this) {
if (Regex.simpleMatch(nodeId, node.name())) {
resolvedNodesIds.add(node.id());
}
}
for (DiscoveryNode node : this) {
if (Regex.simpleMatch(nodeId, node.getHostAddress())) {
resolvedNodesIds.add(node.id());
} else if (Regex.simpleMatch(nodeId, node.getHostName())) {
resolvedNodesIds.add(node.id());
}
}
int index = nodeId.indexOf(':');
if (index != -1) {
String matchAttrName = nodeId.substring(0, index);
String matchAttrValue = nodeId.substring(index + 1);
if ("data".equals(matchAttrName)) {
if (Booleans.parseBoolean(matchAttrValue, true)) {
resolvedNodesIds.addAll(dataNodes.keys());
} else {
resolvedNodesIds.removeAll(dataNodes.keys());
}
} else if ("master".equals(matchAttrName)) {
if (Booleans.parseBoolean(matchAttrValue, true)) {
resolvedNodesIds.addAll(masterNodes.keys());
} else {
resolvedNodesIds.removeAll(masterNodes.keys());
}
} else {
for (DiscoveryNode node : this) {
for (Map.Entry<String, String> entry : node.attributes().entrySet()) {
String attrName = entry.getKey();
String attrValue = entry.getValue();
if (Regex.simpleMatch(matchAttrName, attrName) && Regex.simpleMatch(matchAttrValue, attrValue)) {
resolvedNodesIds.add(node.id());
}
}
}
}
}
}
}
return resolvedNodesIds.toArray(String.class);
}
}
public DiscoveryNodes removeDeadMembers(Set<String> newNodes, String masterNodeId) {
Builder builder = new Builder().masterNodeId(masterNodeId).localNodeId(localNodeId);
for (DiscoveryNode node : this) {
if (newNodes.contains(node.id())) {
builder.put(node);
}
}
return builder.build();
}
public DiscoveryNodes newNode(DiscoveryNode node) {
return new Builder(this).put(node).build();
}
/**
* Returns the changes comparing this nodes to the provided nodes.
*/
public Delta delta(DiscoveryNodes other) {
List<DiscoveryNode> removed = newArrayList();
List<DiscoveryNode> added = newArrayList();
for (DiscoveryNode node : other) {
if (!this.nodeExists(node.id())) {
removed.add(node);
}
}
for (DiscoveryNode node : this) {
if (!other.nodeExists(node.id())) {
added.add(node);
}
}
DiscoveryNode previousMasterNode = null;
DiscoveryNode newMasterNode = null;
if (masterNodeId != null) {
if (other.masterNodeId == null || !other.masterNodeId.equals(masterNodeId)) {
previousMasterNode = other.masterNode();
newMasterNode = masterNode();
}
}
return new Delta(previousMasterNode, newMasterNode, localNodeId, ImmutableList.copyOf(removed), ImmutableList.copyOf(added));
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
for (DiscoveryNode node : this) {
sb.append(node).append(',');
}
sb.append("}");
return sb.toString();
}
public String prettyPrint() {
StringBuilder sb = new StringBuilder();
sb.append("nodes: \n");
for (DiscoveryNode node : this) {
sb.append(" ").append(node);
if (node == localNode()) {
sb.append(", local");
}
if (node == masterNode()) {
sb.append(", master");
}
sb.append("\n");
}
return sb.toString();
}
public Delta emptyDelta() {
return new Delta(null, null, localNodeId, DiscoveryNode.EMPTY_LIST, DiscoveryNode.EMPTY_LIST);
}
public static class Delta {
private final String localNodeId;
private final DiscoveryNode previousMasterNode;
private final DiscoveryNode newMasterNode;
private final ImmutableList<DiscoveryNode> removed;
private final ImmutableList<DiscoveryNode> added;
public Delta(String localNodeId, ImmutableList<DiscoveryNode> removed, ImmutableList<DiscoveryNode> added) {
this(null, null, localNodeId, removed, added);
}
public Delta(@Nullable DiscoveryNode previousMasterNode, @Nullable DiscoveryNode newMasterNode, String localNodeId, ImmutableList<DiscoveryNode> removed, ImmutableList<DiscoveryNode> added) {
this.previousMasterNode = previousMasterNode;
this.newMasterNode = newMasterNode;
this.localNodeId = localNodeId;
this.removed = removed;
this.added = added;
}
public boolean hasChanges() {
return masterNodeChanged() || !removed.isEmpty() || !added.isEmpty();
}
public boolean masterNodeChanged() {
return newMasterNode != null;
}
public DiscoveryNode previousMasterNode() {
return previousMasterNode;
}
public DiscoveryNode newMasterNode() {
return newMasterNode;
}
public boolean removed() {
return !removed.isEmpty();
}
public ImmutableList<DiscoveryNode> removedNodes() {
return removed;
}
public boolean added() {
return !added.isEmpty();
}
public ImmutableList<DiscoveryNode> addedNodes() {
return added;
}
public String shortSummary() {
StringBuilder sb = new StringBuilder();
if (!removed() && masterNodeChanged()) {
if (newMasterNode.id().equals(localNodeId)) {
// we are the master, no nodes we removed, we are actually the first master
sb.append("new_master ").append(newMasterNode());
} else {
// we are not the master, so we just got this event. No nodes were removed, so its not a *new* master
sb.append("detected_master ").append(newMasterNode());
}
} else {
if (masterNodeChanged()) {
sb.append("master {new ").append(newMasterNode());
if (previousMasterNode() != null) {
sb.append(", previous ").append(previousMasterNode());
}
sb.append("}");
}
if (removed()) {
if (masterNodeChanged()) {
sb.append(", ");
}
sb.append("removed {");
for (DiscoveryNode node : removedNodes()) {
sb.append(node).append(',');
}
sb.append("}");
}
}
if (added()) {
// don't print if there is one added, and it is us
if (!(addedNodes().size() == 1 && addedNodes().get(0).id().equals(localNodeId))) {
if (removed() || masterNodeChanged()) {
sb.append(", ");
}
sb.append("added {");
for (DiscoveryNode node : addedNodes()) {
if (!node.id().equals(localNodeId)) {
// don't print ourself
sb.append(node).append(',');
}
}
sb.append("}");
}
}
return sb.toString();
}
}
public static Builder builder() {
return new Builder();
}
public static Builder builder(DiscoveryNodes nodes) {
return new Builder(nodes);
}
public static class Builder {
private final ImmutableOpenMap.Builder<String, DiscoveryNode> nodes;
private String masterNodeId;
private String localNodeId;
public Builder() {
nodes = ImmutableOpenMap.builder();
}
public Builder(DiscoveryNodes nodes) {
this.masterNodeId = nodes.masterNodeId();
this.localNodeId = nodes.localNodeId();
this.nodes = ImmutableOpenMap.builder(nodes.nodes());
}
public Builder put(DiscoveryNode node) {
nodes.put(node.id(), node);
return this;
}
public Builder remove(String nodeId) {
nodes.remove(nodeId);
return this;
}
public Builder masterNodeId(String masterNodeId) {
this.masterNodeId = masterNodeId;
return this;
}
public Builder localNodeId(String localNodeId) {
this.localNodeId = localNodeId;
return this;
}
public DiscoveryNodes build() {
ImmutableOpenMap.Builder<String, DiscoveryNode> dataNodesBuilder = ImmutableOpenMap.builder();
ImmutableOpenMap.Builder<String, DiscoveryNode> masterNodesBuilder = ImmutableOpenMap.builder();
for (ObjectObjectCursor<String, DiscoveryNode> nodeEntry : nodes) {
if (nodeEntry.value.dataNode()) {
dataNodesBuilder.put(nodeEntry.key, nodeEntry.value);
}
if (nodeEntry.value.masterNode()) {
masterNodesBuilder.put(nodeEntry.key, nodeEntry.value);
}
}
return new DiscoveryNodes(nodes.build(), dataNodesBuilder.build(), masterNodesBuilder.build(), masterNodeId, localNodeId);
}
public static void writeTo(DiscoveryNodes nodes, StreamOutput out) throws IOException {
if (nodes.masterNodeId() == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeString(nodes.masterNodeId);
}
out.writeVInt(nodes.size());
for (DiscoveryNode node : nodes) {
node.writeTo(out);
}
}
public static DiscoveryNodes readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException {
Builder builder = new Builder();
if (in.readBoolean()) {
builder.masterNodeId(in.readString());
}
if (localNode != null) {
builder.localNodeId(localNode.id());
}
int size = in.readVInt();
for (int i = 0; i < size; i++) {
DiscoveryNode node = DiscoveryNode.readNode(in);
if (localNode != null && node.id().equals(localNode.id())) {
// reuse the same instance of our address and local node id for faster equality
node = localNode;
}
builder.put(node);
}
return builder.build();
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_cluster_node_DiscoveryNodes.java
|
274 |
public final class ExceptionsHelper {
private static final ESLogger logger = Loggers.getLogger(ExceptionsHelper.class);
public static RuntimeException convertToRuntime(Throwable t) {
if (t instanceof RuntimeException) {
return (RuntimeException) t;
}
return new ElasticsearchException(t.getMessage(), t);
}
public static ElasticsearchException convertToElastic(Throwable t) {
if (t instanceof ElasticsearchException) {
return (ElasticsearchException) t;
}
return new ElasticsearchException(t.getMessage(), t);
}
public static RestStatus status(Throwable t) {
if (t instanceof ElasticsearchException) {
return ((ElasticsearchException) t).status();
}
return RestStatus.INTERNAL_SERVER_ERROR;
}
public static Throwable unwrapCause(Throwable t) {
int counter = 0;
Throwable result = t;
while (result instanceof ElasticsearchWrapperException) {
if (result.getCause() == null) {
return result;
}
if (result.getCause() == result) {
return result;
}
if (counter++ > 10) {
// dear god, if we got more than 10 levels down, WTF? just bail
logger.warn("Exception cause unwrapping ran for 10 levels...", t);
return result;
}
result = result.getCause();
}
return result;
}
public static String detailedMessage(Throwable t) {
return detailedMessage(t, false, 0);
}
public static String detailedMessage(Throwable t, boolean newLines, int initialCounter) {
if (t == null) {
return "Unknown";
}
int counter = initialCounter + 1;
if (t.getCause() != null) {
StringBuilder sb = new StringBuilder();
while (t != null) {
sb.append(t.getClass().getSimpleName());
if (t.getMessage() != null) {
sb.append("[");
sb.append(t.getMessage());
sb.append("]");
}
if (!newLines) {
sb.append("; ");
}
t = t.getCause();
if (t != null) {
if (newLines) {
sb.append("\n");
for (int i = 0; i < counter; i++) {
sb.append("\t");
}
} else {
sb.append("nested: ");
}
}
counter++;
}
return sb.toString();
} else {
return t.getClass().getSimpleName() + "[" + t.getMessage() + "]";
}
}
}
| 1no label
|
src_main_java_org_elasticsearch_ExceptionsHelper.java
|
374 |
public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryRequest> {
private String name;
private String type;
private Settings settings = EMPTY_SETTINGS;
PutRepositoryRequest() {
}
/**
* Constructs a new put repository request with the provided name.
*/
public PutRepositoryRequest(String name) {
this.name = name;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (name == null) {
validationException = addValidationError("name is missing", validationException);
}
if (type == null) {
validationException = addValidationError("type is missing", validationException);
}
return validationException;
}
/**
* Sets the name of the repository.
*
* @param name repository name
*/
public PutRepositoryRequest name(String name) {
this.name = name;
return this;
}
/**
* The name of the repository.
*
* @return repository name
*/
public String name() {
return this.name;
}
/**
* The type of the repository
* <p/>
* <ul>
* <li>"fs" - shared filesystem repository</li>
* </ul>
*
* @param type repository type
* @return this request
*/
public PutRepositoryRequest type(String type) {
this.type = type;
return this;
}
/**
* Returns repository type
*
* @return repository type
*/
public String type() {
return this.type;
}
/**
* Sets the repository settings
*
* @param settings repository settings
* @return this request
*/
public PutRepositoryRequest settings(Settings settings) {
this.settings = settings;
return this;
}
/**
* Sets the repository settings
*
* @param settings repository settings
* @return this request
*/
public PutRepositoryRequest settings(Settings.Builder settings) {
this.settings = settings.build();
return this;
}
/**
* Sets the repository settings.
*
* @param source repository settings in json, yaml or properties format
* @return this request
*/
public PutRepositoryRequest settings(String source) {
this.settings = ImmutableSettings.settingsBuilder().loadFromSource(source).build();
return this;
}
/**
* Sets the repository settings.
*
* @param source repository settings
* @return this request
*/
public PutRepositoryRequest settings(Map<String, Object> source) {
try {
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
builder.map(source);
settings(builder.string());
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
}
return this;
}
/**
* Returns repository settings
*
* @return repository settings
*/
public Settings settings() {
return this.settings;
}
/**
* Parses repository definition.
*
* @param repositoryDefinition repository definition
*/
public PutRepositoryRequest source(XContentBuilder repositoryDefinition) {
return source(repositoryDefinition.bytes());
}
/**
* Parses repository definition.
*
* @param repositoryDefinition repository definition
*/
public PutRepositoryRequest source(Map repositoryDefinition) {
Map<String, Object> source = repositoryDefinition;
for (Map.Entry<String, Object> entry : source.entrySet()) {
String name = entry.getKey();
if (name.equals("type")) {
type(entry.getValue().toString());
} else if (name.equals("settings")) {
if (!(entry.getValue() instanceof Map)) {
throw new ElasticsearchIllegalArgumentException("Malformed settings section, should include an inner object");
}
settings((Map<String, Object>) entry.getValue());
}
}
return this;
}
/**
* Parses repository definition.
* JSON, Smile and YAML formats are supported
*
* @param repositoryDefinition repository definition
*/
public PutRepositoryRequest source(String repositoryDefinition) {
try {
return source(XContentFactory.xContent(repositoryDefinition).createParser(repositoryDefinition).mapOrderedAndClose());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("failed to parse repository source [" + repositoryDefinition + "]", e);
}
}
/**
* Parses repository definition.
* JSON, Smile and YAML formats are supported
*
* @param repositoryDefinition repository definition
*/
public PutRepositoryRequest source(byte[] repositoryDefinition) {
return source(repositoryDefinition, 0, repositoryDefinition.length);
}
/**
* Parses repository definition.
* JSON, Smile and YAML formats are supported
*
* @param repositoryDefinition repository definition
*/
public PutRepositoryRequest source(byte[] repositoryDefinition, int offset, int length) {
try {
return source(XContentFactory.xContent(repositoryDefinition, offset, length).createParser(repositoryDefinition, offset, length).mapOrderedAndClose());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("failed to parse repository source", e);
}
}
/**
* Parses repository definition.
* JSON, Smile and YAML formats are supported
*
* @param repositoryDefinition repository definition
*/
public PutRepositoryRequest source(BytesReference repositoryDefinition) {
try {
return source(XContentFactory.xContent(repositoryDefinition).createParser(repositoryDefinition).mapOrderedAndClose());
} catch (IOException e) {
throw new ElasticsearchIllegalArgumentException("failed to parse template source", e);
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
name = in.readString();
type = in.readString();
settings = readSettingsFromStream(in);
readTimeout(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(name);
out.writeString(type);
writeSettingsToStream(settings, out);
writeTimeout(out);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_repositories_put_PutRepositoryRequest.java
|
180 |
public interface IDAuthority {
/**
* Returns a block of new ids in the form of {@link IDBlock}. It is guaranteed that
* the block of ids for the particular partition id is uniquely assigned,
* that is, the block of ids has not been previously and will not
* subsequently be assigned again when invoking this method on the local or
* any remote machine that is connected to the underlying storage backend.
* <p/>
* In other words, this method has to ensure that ids are uniquely assigned
* per partition.
* <p/>
* It is furthermore guaranteed that any id of the returned IDBlock is smaller than the upper bound
* for the given partition as read from the {@link IDBlockSizer} set on this IDAuthority and that the
* number of ids returned is equal to the block size of the IDBlockSizer.
*
* @param partition
* Partition for which to request an id block
* @param idNamespace namespace for ids within a partition
* @param timeout
* When a call to this method is unable to return a id block
* before this timeout elapses, the implementation must give up
* and throw a {@code StorageException} ASAP
* @return a range of ids for the {@code partition} parameter
*/
public IDBlock getIDBlock(int partition, int idNamespace, Duration timeout)
throws BackendException;
/**
* Returns the lower and upper limits of the key range assigned to this local machine as an array with two entries.
*
* @return
* @throws BackendException
*/
public List<KeyRange> getLocalIDPartition() throws BackendException;
/**
* Sets the {@link IDBlockSizer} to be used by this IDAuthority. The IDBlockSizer specifies the block size for
* each partition guaranteeing that the same partition will always be assigned the same block size.
* <p/>
* The IDBlockSizer cannot be changed for an IDAuthority that has already been used (i.e. after invoking {@link #getIDBlock(int)}.
*
* @param sizer The IDBlockSizer to be used by this IDAuthority
*/
public void setIDBlockSizer(IDBlockSizer sizer);
/**
* Closes the IDAuthority and any underlying storage backend.
*
* @throws BackendException
*/
public void close() throws BackendException;
/**
* Return the globally unique string used by this {@code IDAuthority}
* instance to recognize its ID allocations and distinguish its allocations
* from those belonging to other {@code IDAuthority} instances.
*
* This should normally be the value of
* {@link GraphDatabaseConfiguration#UNIQUE_INSTANCE_ID}, though that's not
* strictly technically necessary.
*
* @return unique ID string
*/
public String getUniqueID();
}
| 0true
|
titan-core_src_main_java_com_thinkaurelius_titan_diskstorage_IDAuthority.java
|
10 |
private static class StreamLogger extends Thread {
private final BufferedReader reader;
private static final Logger log =
LoggerFactory.getLogger(StreamLogger.class);
private StreamLogger(InputStream is) {
this.reader = new BufferedReader(new InputStreamReader(is));
}
@Override
public void run() {
String line;
try {
while (null != (line = reader.readLine())) {
log.info("> {}", line);
if (Thread.currentThread().isInterrupted()) {
break;
}
}
log.info("End of stream.");
} catch (IOException e) {
log.error("Unexpected IOException while reading stream {}", reader, e);
}
}
}
| 0true
|
titan-hbase-parent_titan-hbase-core_src_test_java_com_thinkaurelius_titan_HBaseStorageSetup.java
|
1,080 |
public static enum Operation {
UPSERT,
INDEX,
DELETE,
NONE
}
| 0true
|
src_main_java_org_elasticsearch_action_update_UpdateHelper.java
|
4,943 |
public class RestSearchAction extends BaseRestHandler {
@Inject
public RestSearchAction(Settings settings, Client client, RestController controller) {
super(settings, client);
controller.registerHandler(GET, "/_search", this);
controller.registerHandler(POST, "/_search", this);
controller.registerHandler(GET, "/{index}/_search", this);
controller.registerHandler(POST, "/{index}/_search", this);
controller.registerHandler(GET, "/{index}/{type}/_search", this);
controller.registerHandler(POST, "/{index}/{type}/_search", this);
}
@Override
public void handleRequest(final RestRequest request, final RestChannel channel) {
SearchRequest searchRequest;
try {
searchRequest = RestSearchAction.parseSearchRequest(request);
searchRequest.listenerThreaded(false);
SearchOperationThreading operationThreading = SearchOperationThreading.fromString(request.param("operation_threading"), null);
if (operationThreading != null) {
if (operationThreading == SearchOperationThreading.NO_THREADS) {
// since we don't spawn, don't allow no_threads, but change it to a single thread
operationThreading = SearchOperationThreading.SINGLE_THREAD;
}
searchRequest.operationThreading(operationThreading);
}
} catch (Exception e) {
if (logger.isDebugEnabled()) {
logger.debug("failed to parse search request parameters", e);
}
try {
XContentBuilder builder = restContentBuilder(request);
channel.sendResponse(new XContentRestResponse(request, BAD_REQUEST, builder.startObject().field("error", e.getMessage()).endObject()));
} catch (IOException e1) {
logger.error("Failed to send failure response", e1);
}
return;
}
client.search(searchRequest, new ActionListener<SearchResponse>() {
@Override
public void onResponse(SearchResponse response) {
try {
XContentBuilder builder = restContentBuilder(request);
builder.startObject();
response.toXContent(builder, request);
builder.endObject();
channel.sendResponse(new XContentRestResponse(request, response.status(), builder));
} catch (Exception e) {
if (logger.isDebugEnabled()) {
logger.debug("failed to execute search (building response)", e);
}
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(new XContentThrowableRestResponse(request, e));
} catch (IOException e1) {
logger.error("Failed to send failure response", e1);
}
}
});
}
public static SearchRequest parseSearchRequest(RestRequest request) {
String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
SearchRequest searchRequest = new SearchRequest(indices);
// get the content, and put it in the body
if (request.hasContent()) {
searchRequest.source(request.content(), request.contentUnsafe());
} else {
String source = request.param("source");
if (source != null) {
searchRequest.source(source);
}
}
// add extra source based on the request parameters
searchRequest.extraSource(parseSearchSource(request));
searchRequest.searchType(request.param("search_type"));
String scroll = request.param("scroll");
if (scroll != null) {
searchRequest.scroll(new Scroll(parseTimeValue(scroll, null)));
}
searchRequest.types(Strings.splitStringByCommaToArray(request.param("type")));
searchRequest.routing(request.param("routing"));
searchRequest.preference(request.param("preference"));
searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions()));
return searchRequest;
}
public static SearchSourceBuilder parseSearchSource(RestRequest request) {
SearchSourceBuilder searchSourceBuilder = null;
String queryString = request.param("q");
if (queryString != null) {
QueryStringQueryBuilder queryBuilder = QueryBuilders.queryString(queryString);
queryBuilder.defaultField(request.param("df"));
queryBuilder.analyzer(request.param("analyzer"));
queryBuilder.analyzeWildcard(request.paramAsBoolean("analyze_wildcard", false));
queryBuilder.lowercaseExpandedTerms(request.paramAsBoolean("lowercase_expanded_terms", true));
queryBuilder.lenient(request.paramAsBoolean("lenient", null));
String defaultOperator = request.param("default_operator");
if (defaultOperator != null) {
if ("OR".equals(defaultOperator)) {
queryBuilder.defaultOperator(QueryStringQueryBuilder.Operator.OR);
} else if ("AND".equals(defaultOperator)) {
queryBuilder.defaultOperator(QueryStringQueryBuilder.Operator.AND);
} else {
throw new ElasticsearchIllegalArgumentException("Unsupported defaultOperator [" + defaultOperator + "], can either be [OR] or [AND]");
}
}
if (searchSourceBuilder == null) {
searchSourceBuilder = new SearchSourceBuilder();
}
searchSourceBuilder.query(queryBuilder);
}
int from = request.paramAsInt("from", -1);
if (from != -1) {
if (searchSourceBuilder == null) {
searchSourceBuilder = new SearchSourceBuilder();
}
searchSourceBuilder.from(from);
}
int size = request.paramAsInt("size", -1);
if (size != -1) {
if (searchSourceBuilder == null) {
searchSourceBuilder = new SearchSourceBuilder();
}
searchSourceBuilder.size(size);
}
if (request.hasParam("explain")) {
if (searchSourceBuilder == null) {
searchSourceBuilder = new SearchSourceBuilder();
}
searchSourceBuilder.explain(request.paramAsBoolean("explain", null));
}
if (request.hasParam("version")) {
if (searchSourceBuilder == null) {
searchSourceBuilder = new SearchSourceBuilder();
}
searchSourceBuilder.version(request.paramAsBoolean("version", null));
}
if (request.hasParam("timeout")) {
if (searchSourceBuilder == null) {
searchSourceBuilder = new SearchSourceBuilder();
}
searchSourceBuilder.timeout(request.paramAsTime("timeout", null));
}
String sField = request.param("fields");
if (sField != null) {
if (searchSourceBuilder == null) {
searchSourceBuilder = new SearchSourceBuilder();
}
if (!Strings.hasText(sField)) {
searchSourceBuilder.noFields();
} else {
String[] sFields = Strings.splitStringByCommaToArray(sField);
if (sFields != null) {
for (String field : sFields) {
searchSourceBuilder.field(field);
}
}
}
}
FetchSourceContext fetchSourceContext = FetchSourceContext.parseFromRestRequest(request);
if (fetchSourceContext != null) {
if (searchSourceBuilder == null) {
searchSourceBuilder = new SearchSourceBuilder();
}
searchSourceBuilder.fetchSource(fetchSourceContext);
}
if (request.hasParam("track_scores")) {
if (searchSourceBuilder == null) {
searchSourceBuilder = new SearchSourceBuilder();
}
searchSourceBuilder.trackScores(request.paramAsBoolean("track_scores", false));
}
String sSorts = request.param("sort");
if (sSorts != null) {
if (searchSourceBuilder == null) {
searchSourceBuilder = new SearchSourceBuilder();
}
String[] sorts = Strings.splitStringByCommaToArray(sSorts);
for (String sort : sorts) {
int delimiter = sort.lastIndexOf(":");
if (delimiter != -1) {
String sortField = sort.substring(0, delimiter);
String reverse = sort.substring(delimiter + 1);
if ("asc".equals(reverse)) {
searchSourceBuilder.sort(sortField, SortOrder.ASC);
} else if ("desc".equals(reverse)) {
searchSourceBuilder.sort(sortField, SortOrder.DESC);
}
} else {
searchSourceBuilder.sort(sort);
}
}
}
String sIndicesBoost = request.param("indices_boost");
if (sIndicesBoost != null) {
if (searchSourceBuilder == null) {
searchSourceBuilder = new SearchSourceBuilder();
}
String[] indicesBoost = Strings.splitStringByCommaToArray(sIndicesBoost);
for (String indexBoost : indicesBoost) {
int divisor = indexBoost.indexOf(',');
if (divisor == -1) {
throw new ElasticsearchIllegalArgumentException("Illegal index boost [" + indexBoost + "], no ','");
}
String indexName = indexBoost.substring(0, divisor);
String sBoost = indexBoost.substring(divisor + 1);
try {
searchSourceBuilder.indexBoost(indexName, Float.parseFloat(sBoost));
} catch (NumberFormatException e) {
throw new ElasticsearchIllegalArgumentException("Illegal index boost [" + indexBoost + "], boost not a float number");
}
}
}
String sStats = request.param("stats");
if (sStats != null) {
if (searchSourceBuilder == null) {
searchSourceBuilder = new SearchSourceBuilder();
}
searchSourceBuilder.stats(Strings.splitStringByCommaToArray(sStats));
}
String suggestField = request.param("suggest_field");
if (suggestField != null) {
String suggestText = request.param("suggest_text", queryString);
int suggestSize = request.paramAsInt("suggest_size", 5);
if (searchSourceBuilder == null) {
searchSourceBuilder = new SearchSourceBuilder();
}
String suggestMode = request.param("suggest_mode");
searchSourceBuilder.suggest().addSuggestion(
termSuggestion(suggestField).field(suggestField).text(suggestText).size(suggestSize)
.suggestMode(suggestMode)
);
}
return searchSourceBuilder;
}
}
| 1no label
|
src_main_java_org_elasticsearch_rest_action_search_RestSearchAction.java
|
415 |
private static final class MultiExecutionCallbackWrapper implements MultiExecutionCallback {
private final AtomicInteger members;
private final MultiExecutionCallback multiExecutionCallback;
private final Map<Member, Object> values;
private MultiExecutionCallbackWrapper(int memberSize, MultiExecutionCallback multiExecutionCallback) {
this.multiExecutionCallback = multiExecutionCallback;
this.members = new AtomicInteger(memberSize);
values = new HashMap<Member, Object>(memberSize);
}
public void onResponse(Member member, Object value) {
multiExecutionCallback.onResponse(member, value);
values.put(member, value);
int waitingResponse = members.decrementAndGet();
if (waitingResponse == 0) {
onComplete(values);
}
}
public void onComplete(Map<Member, Object> values) {
multiExecutionCallback.onComplete(values);
}
}
| 0true
|
hazelcast-client_src_main_java_com_hazelcast_client_proxy_ClientExecutorServiceProxy.java
|
517 |
MessageListener listener = new MessageListener() {
public void onMessage(Message message) {
latch.countDown();
}
};
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_topic_ClientTopicTest.java
|
693 |
constructors[COLLECTION_SIZE] = new ConstructorFunction<Integer, Portable>() {
public Portable createNew(Integer arg) {
return new CollectionSizeRequest();
}
};
| 0true
|
hazelcast_src_main_java_com_hazelcast_collection_CollectionPortableHook.java
|
572 |
@RunWith(HazelcastParallelClassRunner.class)
@Category(QuickTest.class)
public class ClusterMembershipTest extends HazelcastTestSupport {
@Test
public void testMembershipListener() {
TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(2);
HazelcastInstance hz1 = factory.newHazelcastInstance();
MembershipListenerImpl listener = new MembershipListenerImpl();
hz1.getCluster().addMembershipListener(listener);
//start a second instance
HazelcastInstance hz2 = factory.newHazelcastInstance();
assertEventuallySizeAtLeast(listener.events, 1);
assertMembershipAddedEvent(listener.events.get(0), hz2.getCluster().getLocalMember(), hz1.getCluster().getLocalMember(), hz2.getCluster().getLocalMember());
//terminate the second instance
Member member2 = hz2.getCluster().getLocalMember();
hz2.shutdown();
assertEventuallySizeAtLeast(listener.events, 2);
assertMembershipRemovedEvent(listener.events.get(1), member2, hz1.getCluster().getLocalMember());
}
@Test
public void testMembershipListenerSequentialInvocation() throws InterruptedException {
final Config config = new Config();
final int nodeCount = 10;
final TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(nodeCount);
final CountDownLatch eventLatch = new CountDownLatch(nodeCount - 1);
final CountDownLatch nodeLatch = new CountDownLatch(nodeCount);
config.addListenerConfig(new ListenerConfig().setImplementation(new MembershipListener() {
final AtomicBoolean flag = new AtomicBoolean(false);
public void memberAdded(MembershipEvent membershipEvent) {
if (flag.compareAndSet(false, true)) {
try {
Thread.sleep((long) (Math.random() * 500) + 50);
eventLatch.countDown();
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
flag.set(false);
}
}
}
public void memberRemoved(MembershipEvent membershipEvent) {
}
public void memberAttributeChanged(MemberAttributeEvent memberAttributeEvent) {
}
}));
final ExecutorService ex = Executors.newFixedThreadPool(nodeCount / 2);
for (int i = 0; i < nodeCount; i++) {
ex.execute(new Runnable() {
public void run() {
factory.newHazelcastInstance(config);
nodeLatch.countDown();
}
});
}
try {
assertTrue(nodeLatch.await(30, TimeUnit.SECONDS));
assertTrue(eventLatch.await(30, TimeUnit.SECONDS));
} finally {
ex.shutdownNow();
}
}
@Test
public void testInitialMembershipListener() {
TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(2);
HazelcastInstance hz1 = factory.newHazelcastInstance();
InitialMembershipListenerImpl listener = new InitialMembershipListenerImpl();
hz1.getCluster().addMembershipListener(listener);
assertEventuallySizeAtLeast(listener.events, 1);
assertInitialMembershipEvent(listener.events.get(0), hz1.getCluster().getLocalMember());
HazelcastInstance hz2 = factory.newHazelcastInstance();
assertEventuallySizeAtLeast(listener.events, 2);
assertMembershipAddedEvent(listener.events.get(1), hz2.getCluster().getLocalMember(), hz1.getCluster().getLocalMember(), hz2.getCluster().getLocalMember());
Member member2 = hz2.getCluster().getLocalMember();
hz2.shutdown();
assertEventuallySizeAtLeast(listener.events, 3);
assertMembershipRemovedEvent(listener.events.get(2), member2, hz1.getCluster().getLocalMember());
}
@Test
public void testInitialMembershipListenerRegistrationWithMultipleInitialMembers() {
TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory(2);
HazelcastInstance hz1 = factory.newHazelcastInstance();
HazelcastInstance hz2 = factory.newHazelcastInstance();
InitialMembershipListenerImpl listener = new InitialMembershipListenerImpl();
hz1.getCluster().addMembershipListener(listener);
assertEventuallySizeAtLeast(listener.events, 1);
assertInitialMembershipEvent(listener.events.get(0), hz1.getCluster().getLocalMember(), hz2.getCluster().getLocalMember());
}
public void assertInitialMembershipEvent(EventObject e, Member... expectedMembers) {
assertTrue(e instanceof InitialMembershipEvent);
InitialMembershipEvent initialMembershipEvent = (InitialMembershipEvent) e;
Set<Member> foundMembers = initialMembershipEvent.getMembers();
assertEquals(new HashSet<Member>(Arrays.asList(expectedMembers)), foundMembers);
}
public void assertMembershipAddedEvent(EventObject e, Member addedMember, Member... expectedMembers) {
assertMembershipEvent(e, MembershipEvent.MEMBER_ADDED, addedMember, expectedMembers);
}
public void assertMembershipRemovedEvent(EventObject e, Member addedMember, Member... expectedMembers) {
assertMembershipEvent(e, MembershipEvent.MEMBER_REMOVED, addedMember, expectedMembers);
}
public void assertMembershipEvent(EventObject e, int type, Member changedMember, Member... expectedMembers) {
assertTrue(e instanceof MembershipEvent);
MembershipEvent membershipEvent = (MembershipEvent) e;
Set<Member> foundMembers = membershipEvent.getMembers();
assertEquals(type, membershipEvent.getEventType());
assertEquals(changedMember, membershipEvent.getMember());
assertEquals(new HashSet<Member>(Arrays.asList(expectedMembers)), foundMembers);
}
public void assertEventuallySizeAtLeast(List list, int expectedSize) {
long startTimeMs = System.currentTimeMillis();
for (; ; ) {
if (list.size() >= expectedSize) {
return;
}
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
if (System.currentTimeMillis() - startTimeMs > TimeUnit.SECONDS.toMillis(10)) {
fail("Timeout, size of the list didn't reach size: " + expectedSize + " in time");
}
}
}
private static class MembershipListenerImpl implements MembershipListener {
private List<EventObject> events = Collections.synchronizedList(new LinkedList<EventObject>());
public void memberAdded(MembershipEvent e) {
events.add(e);
}
public void memberRemoved(MembershipEvent e) {
events.add(e);
}
public void memberAttributeChanged(MemberAttributeEvent memberAttributeEvent) {
}
}
private static class InitialMembershipListenerImpl implements InitialMembershipListener {
private List<EventObject> events = Collections.synchronizedList(new LinkedList<EventObject>());
public void init(InitialMembershipEvent e) {
events.add(e);
}
public void memberAdded(MembershipEvent e) {
events.add(e);
}
public void memberRemoved(MembershipEvent e) {
events.add(e);
}
public void memberAttributeChanged(MemberAttributeEvent memberAttributeEvent) {
}
public void assertEventCount(int expected) {
assertEquals(expected, events.size());
}
}
}
| 0true
|
hazelcast_src_test_java_com_hazelcast_cluster_ClusterMembershipTest.java
|
2,571 |
clusterService.submitStateUpdateTask("zen-disco-join (elected_as_master)", Priority.URGENT, new ProcessedClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
DiscoveryNodes.Builder builder = new DiscoveryNodes.Builder()
.localNodeId(localNode.id())
.masterNodeId(localNode.id())
// put our local node
.put(localNode);
// update the fact that we are the master...
latestDiscoNodes = builder.build();
ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(NO_MASTER_BLOCK).build();
return ClusterState.builder(currentState).nodes(latestDiscoNodes).blocks(clusterBlocks).build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
sendInitialStateEventIfNeeded();
}
});
| 1no label
|
src_main_java_org_elasticsearch_discovery_zen_ZenDiscovery.java
|
2,894 |
public static class GreaterLessPredicate extends EqualPredicate {
boolean equal;
boolean less;
public GreaterLessPredicate() {
}
public GreaterLessPredicate(String attribute, Comparable value, boolean equal, boolean less) {
super(attribute, value);
this.equal = equal;
this.less = less;
}
@Override
public boolean apply(Map.Entry mapEntry) {
final Comparable entryValue = readAttribute(mapEntry);
final Comparable attributeValue = convert(mapEntry, entryValue, value);
final int result = entryValue.compareTo(attributeValue);
return equal && result == 0 || (less ? (result < 0) : (result > 0));
}
@Override
public Set<QueryableEntry> filter(QueryContext queryContext) {
Index index = getIndex(queryContext);
final ComparisonType comparisonType;
if (less) {
comparisonType = equal ? ComparisonType.LESSER_EQUAL : ComparisonType.LESSER;
} else {
comparisonType = equal ? ComparisonType.GREATER_EQUAL : ComparisonType.GREATER;
}
return index.getSubRecords(comparisonType, value);
}
@Override
public void readData(ObjectDataInput in) throws IOException {
super.readData(in);
equal = in.readBoolean();
less = in.readBoolean();
}
@Override
public void writeData(ObjectDataOutput out) throws IOException {
super.writeData(out);
out.writeBoolean(equal);
out.writeBoolean(less);
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append(attribute);
sb.append(less ? "<" : ">");
if (equal) {
sb.append("=");
}
sb.append(value);
return sb.toString();
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_query_Predicates.java
|
360 |
public static class ExceptionThrowingMapper
implements Mapper<Integer, Integer, String, Integer> {
@Override
public void map(Integer key, Integer value, Context<String, Integer> context) {
throw new NullPointerException("BUMM!");
}
}
| 0true
|
hazelcast-client_src_test_java_com_hazelcast_client_mapreduce_ClientMapReduceTest.java
|
798 |
public class PercolateShardRequest extends BroadcastShardOperationRequest {
private String documentType;
private BytesReference source;
private BytesReference docSource;
private boolean onlyCount;
public PercolateShardRequest() {
}
public PercolateShardRequest(String index, int shardId) {
super(index, shardId);
}
public PercolateShardRequest(String index, int shardId, PercolateRequest request) {
super(index, shardId, request);
this.documentType = request.documentType();
this.source = request.source();
this.docSource = request.docSource();
this.onlyCount = request.onlyCount();
}
public PercolateShardRequest(ShardId shardId, PercolateRequest request) {
super(shardId.index().name(), shardId.id());
this.documentType = request.documentType();
this.source = request.source();
this.docSource = request.docSource();
this.onlyCount = request.onlyCount();
}
public String documentType() {
return documentType;
}
public BytesReference source() {
return source;
}
public BytesReference docSource() {
return docSource;
}
public boolean onlyCount() {
return onlyCount;
}
void documentType(String documentType) {
this.documentType = documentType;
}
void source(BytesReference source) {
this.source = source;
}
void docSource(BytesReference docSource) {
this.docSource = docSource;
}
void onlyCount(boolean onlyCount) {
this.onlyCount = onlyCount;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
documentType = in.readString();
source = in.readBytesReference();
docSource = in.readBytesReference();
onlyCount = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(documentType);
out.writeBytesReference(source);
out.writeBytesReference(docSource);
out.writeBoolean(onlyCount);
}
}
| 1no label
|
src_main_java_org_elasticsearch_action_percolate_PercolateShardRequest.java
|
1,943 |
public class MapGetRequest extends KeyBasedClientRequest implements Portable, RetryableRequest, SecureRequest {
private String name;
private Data key;
private transient long startTime;
public MapGetRequest() {
}
public MapGetRequest(String name, Data key) {
this.name = name;
this.key = key;
}
protected Object getKey() {
return key;
}
@Override
protected Operation prepareOperation() {
return new GetOperation(name, key);
}
@Override
protected void beforeProcess() {
startTime = System.currentTimeMillis();
}
@Override
protected void afterResponse() {
final long latency = System.currentTimeMillis() - startTime;
final MapService mapService = getService();
MapContainer mapContainer = mapService.getMapContainer(name);
if (mapContainer.getMapConfig().isStatisticsEnabled()) {
mapService.getLocalMapStatsImpl(name).incrementGets(latency);
}
}
public String getServiceName() {
return MapService.SERVICE_NAME;
}
@Override
public int getFactoryId() {
return MapPortableHook.F_ID;
}
public int getClassId() {
return MapPortableHook.GET;
}
public void write(PortableWriter writer) throws IOException {
writer.writeUTF("n", name);
final ObjectDataOutput out = writer.getRawDataOutput();
key.writeData(out);
}
public void read(PortableReader reader) throws IOException {
name = reader.readUTF("n");
final ObjectDataInput in = reader.getRawDataInput();
key = new Data();
key.readData(in);
}
public MapPermission getRequiredPermission() {
return new MapPermission(name, ActionConstants.ACTION_READ);
}
}
| 1no label
|
hazelcast_src_main_java_com_hazelcast_map_client_MapGetRequest.java
|
1,130 |
public class FulfillmentType implements Serializable, BroadleafEnumerationType {
private static final long serialVersionUID = 1L;
private static final Map<String, FulfillmentType> TYPES = new LinkedHashMap<String, FulfillmentType>();
public static final FulfillmentType DIGITAL = new FulfillmentType("DIGITAL", "Digital");
public static final FulfillmentType PHYSICAL_SHIP = new FulfillmentType("PHYSICAL_SHIP", "Physical Ship");
public static final FulfillmentType PHYSICAL_PICKUP = new FulfillmentType("PHYSICAL_PICKUP", "Physical Pickup");
public static final FulfillmentType PHYSICAL_PICKUP_OR_SHIP = new FulfillmentType("PHYSICAL_PICKUP_OR_SHIP", "Physical Pickup or Ship");
public static final FulfillmentType GIFT_CARD = new FulfillmentType("GIFT_CARD", "Gift Card");
@Deprecated
public static final FulfillmentType SHIPPING = new FulfillmentType("SHIPPING", "Shipping");
public static FulfillmentType getInstance(final String type) {
return TYPES.get(type);
}
private String type;
private String friendlyType;
public FulfillmentType() {
//do nothing
}
public FulfillmentType(final String type, final String friendlyType) {
this.friendlyType = friendlyType;
setType(type);
}
@Override
public String getType() {
return type;
}
@Override
public String getFriendlyType() {
return friendlyType;
}
private void setType(final String type) {
this.type = type;
if (!TYPES.containsKey(type)) {
TYPES.put(type, this);
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((type == null) ? 0 : type.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
FulfillmentType other = (FulfillmentType) obj;
if (type == null) {
if (other.type != null)
return false;
} else if (!type.equals(other.type))
return false;
return true;
}
}
| 1no label
|
core_broadleaf-framework_src_main_java_org_broadleafcommerce_core_order_service_type_FulfillmentType.java
|
82 |
public class OConsoleApplication {
protected enum RESULT {
OK, ERROR, EXIT
};
protected InputStream in = System.in; // System.in;
protected PrintStream out = System.out;
protected PrintStream err = System.err;
protected String wordSeparator = " ";
protected String[] helpCommands = { "help", "?" };
protected String[] exitCommands = { "exit", "bye", "quit" };
protected Map<String, String> properties = new HashMap<String, String>();
// protected OConsoleReader reader = new TTYConsoleReader();
protected OConsoleReader reader = new DefaultConsoleReader();
protected boolean interactiveMode;
protected String[] args;
protected static final String[] COMMENT_PREFIXS = new String[] { "#", "--", "//" };
public void setReader(OConsoleReader iReader) {
this.reader = iReader;
reader.setConsole(this);
}
public OConsoleApplication(String[] iArgs) {
this.args = iArgs;
}
public int run() {
interactiveMode = isInteractiveMode(args);
onBefore();
int result = 0;
if (interactiveMode) {
// EXECUTE IN INTERACTIVE MODE
// final BufferedReader reader = new BufferedReader(new InputStreamReader(in));
String consoleInput;
while (true) {
out.println();
out.print("orientdb> ");
consoleInput = reader.readLine();
if (consoleInput == null || consoleInput.length() == 0)
continue;
if (!executeCommands(new ODFACommandStream(consoleInput), false))
break;
}
} else {
// EXECUTE IN BATCH MODE
result = executeBatch(getCommandLine(args)) ? 0 : 1;
}
onAfter();
return result;
}
protected boolean isInteractiveMode(String[] args) {
return args.length == 0;
}
protected boolean executeBatch(final String commandLine) {
final File commandFile = new File(commandLine);
OCommandStream scanner;
try {
scanner = new ODFACommandStream(commandFile);
} catch (FileNotFoundException e) {
scanner = new ODFACommandStream(commandLine);
}
return executeCommands(scanner, true);
}
protected boolean executeCommands(final OCommandStream commandStream, final boolean iExitOnException) {
final StringBuilder commandBuffer = new StringBuilder();
try {
while (commandStream.hasNext()) {
String commandLine = commandStream.nextCommand();
if (commandLine.isEmpty())
// EMPTY LINE
continue;
if (isComment(commandLine))
continue;
// SCRIPT CASE: MANAGE ENSEMBLING ALL TOGETHER
if (isCollectingCommands(commandLine)) {
// BEGIN: START TO COLLECT
commandBuffer.append(commandLine);
commandLine = null;
} else if (commandLine.startsWith("end") && commandBuffer.length() > 0) {
// END: FLUSH IT
commandLine = commandBuffer.toString();
commandBuffer.setLength(0);
} else if (commandBuffer.length() > 0) {
// BUFFER IT
commandBuffer.append(';');
commandBuffer.append(commandLine);
commandLine = null;
}
if (commandLine != null) {
final RESULT status = execute(commandLine);
commandLine = null;
if (status == RESULT.EXIT || status == RESULT.ERROR && iExitOnException)
return false;
}
}
if (commandBuffer.length() > 0) {
final RESULT status = execute(commandBuffer.toString());
if (status == RESULT.EXIT || status == RESULT.ERROR && iExitOnException)
return false;
}
} finally {
commandStream.close();
}
return true;
}
protected boolean isComment(final String commandLine) {
for (String comment : COMMENT_PREFIXS)
if (commandLine.startsWith(comment))
return true;
return false;
}
protected boolean isCollectingCommands(final String iLine) {
return false;
}
protected RESULT execute(String iCommand) {
iCommand = iCommand.trim();
if (iCommand.length() == 0)
// NULL LINE: JUMP IT
return RESULT.OK;
if (isComment(iCommand))
// COMMENT: JUMP IT
return RESULT.OK;
String[] commandWords = OStringParser.getWords(iCommand, wordSeparator);
for (String cmd : helpCommands)
if (cmd.equals(commandWords[0])) {
help();
return RESULT.OK;
}
for (String cmd : exitCommands)
if (cmd.equals(commandWords[0])) {
return RESULT.EXIT;
}
Method lastMethodInvoked = null;
final StringBuilder lastCommandInvoked = new StringBuilder();
final String commandLowerCase = iCommand.toLowerCase();
for (Entry<Method, Object> entry : getConsoleMethods().entrySet()) {
final Method m = entry.getKey();
final String methodName = m.getName();
final ConsoleCommand ann = m.getAnnotation(ConsoleCommand.class);
final StringBuilder commandName = new StringBuilder();
char ch;
int commandWordCount = 1;
for (int i = 0; i < methodName.length(); ++i) {
ch = methodName.charAt(i);
if (Character.isUpperCase(ch)) {
commandName.append(" ");
ch = Character.toLowerCase(ch);
commandWordCount++;
}
commandName.append(ch);
}
if (!commandLowerCase.equals(commandName.toString()) && !commandLowerCase.startsWith(commandName.toString() + " ")) {
if (ann == null)
continue;
String[] aliases = ann.aliases();
if (aliases == null || aliases.length == 0)
continue;
boolean aliasMatch = false;
for (String alias : aliases) {
if (iCommand.startsWith(alias.split(" ")[0])) {
aliasMatch = true;
commandWordCount = 1;
break;
}
}
if (!aliasMatch)
continue;
}
Object[] methodArgs;
// BUILD PARAMETERS
if (ann != null && !ann.splitInWords()) {
methodArgs = new String[] { iCommand.substring(iCommand.indexOf(' ') + 1) };
} else {
if (m.getParameterTypes().length > commandWords.length - commandWordCount) {
// METHOD PARAMS AND USED PARAMS MISMATCH: CHECK FOR OPTIONALS
for (int paramNum = m.getParameterAnnotations().length - 1; paramNum > -1; paramNum--) {
final Annotation[] paramAnn = m.getParameterAnnotations()[paramNum];
if (paramAnn != null)
for (int annNum = paramAnn.length - 1; annNum > -1; annNum--) {
if (paramAnn[annNum] instanceof ConsoleParameter) {
final ConsoleParameter annotation = (ConsoleParameter) paramAnn[annNum];
if (annotation.optional())
commandWords = OArrays.copyOf(commandWords, commandWords.length + 1);
break;
}
}
}
}
methodArgs = OArrays.copyOfRange(commandWords, commandWordCount, commandWords.length);
}
try {
m.invoke(entry.getValue(), methodArgs);
} catch (IllegalArgumentException e) {
lastMethodInvoked = m;
// GET THE COMMAND NAME
lastCommandInvoked.setLength(0);
for (int i = 0; i < commandWordCount; ++i) {
if (lastCommandInvoked.length() > 0)
lastCommandInvoked.append(" ");
lastCommandInvoked.append(commandWords[i]);
}
continue;
} catch (Exception e) {
// e.printStackTrace();
// err.println();
if (e.getCause() != null)
onException(e.getCause());
else
e.printStackTrace();
return RESULT.ERROR;
}
return RESULT.OK;
}
if (lastMethodInvoked != null)
syntaxError(lastCommandInvoked.toString(), lastMethodInvoked);
error("\n!Unrecognized command: '%s'", iCommand);
return RESULT.ERROR;
}
protected void syntaxError(String iCommand, Method m) {
error(
"\n!Wrong syntax. If you're using a file make sure all commands are delimited by semicolon (;) or a linefeed (\\n)\n\r\n\r Expected: %s ",
iCommand);
String paramName = null;
String paramDescription = null;
boolean paramOptional = false;
StringBuilder buffer = new StringBuilder("\n\nWhere:\n\n");
for (Annotation[] annotations : m.getParameterAnnotations()) {
for (Annotation ann : annotations) {
if (ann instanceof com.orientechnologies.common.console.annotation.ConsoleParameter) {
paramName = ((com.orientechnologies.common.console.annotation.ConsoleParameter) ann).name();
paramDescription = ((com.orientechnologies.common.console.annotation.ConsoleParameter) ann).description();
paramOptional = ((com.orientechnologies.common.console.annotation.ConsoleParameter) ann).optional();
break;
}
}
if (paramName == null)
paramName = "?";
if (paramOptional)
message("[<%s>] ", paramName);
else
message("<%s> ", paramName);
buffer.append("* ");
buffer.append(String.format("%-15s", paramName));
if (paramDescription != null)
buffer.append(String.format("%-15s", paramDescription));
buffer.append("\n");
}
message(buffer.toString());
}
/**
* Returns a map of all console method and the object they can be called on.
*
* @return Map<Method,Object>
*/
protected Map<Method, Object> getConsoleMethods() {
// search for declared command collections
final Iterator<OConsoleCommandCollection> ite = ServiceRegistry.lookupProviders(OConsoleCommandCollection.class);
final Collection<Object> candidates = new ArrayList<Object>();
candidates.add(this);
while (ite.hasNext()) {
try {
// make a copy and set it's context
final OConsoleCommandCollection cc = ite.next().getClass().newInstance();
cc.setContext(this);
candidates.add(cc);
} catch (InstantiationException ex) {
Logger.getLogger(OConsoleApplication.class.getName()).log(Level.WARNING, ex.getMessage());
} catch (IllegalAccessException ex) {
Logger.getLogger(OConsoleApplication.class.getName()).log(Level.WARNING, ex.getMessage());
}
}
final Map<Method, Object> consoleMethods = new TreeMap<Method, Object>(new Comparator<Method>() {
public int compare(Method o1, Method o2) {
int res = o1.getName().compareTo(o2.getName());
if (res == 0)
res = o1.toString().compareTo(o2.toString());
return res;
}
});
for (final Object candidate : candidates) {
final Method[] methods = candidate.getClass().getMethods();
for (Method m : methods) {
if (Modifier.isAbstract(m.getModifiers()) || Modifier.isStatic(m.getModifiers()) || !Modifier.isPublic(m.getModifiers())) {
continue;
}
if (m.getReturnType() != Void.TYPE) {
continue;
}
consoleMethods.put(m, candidate);
}
}
return consoleMethods;
}
protected Map<String, Object> addCommand(Map<String, Object> commandsTree, String commandLine) {
return commandsTree;
}
protected void help() {
message("\nAVAILABLE COMMANDS:\n");
for (Method m : getConsoleMethods().keySet()) {
com.orientechnologies.common.console.annotation.ConsoleCommand annotation = m
.getAnnotation(com.orientechnologies.common.console.annotation.ConsoleCommand.class);
if (annotation == null)
continue;
message("* %-70s%s\n", getCorrectMethodName(m), annotation.description());
}
message("* %-70s%s\n", getClearName("help"), "Print this help");
message("* %-70s%s\n", getClearName("exit"), "Close the console");
}
public static String getCorrectMethodName(Method m) {
StringBuilder buffer = new StringBuilder();
buffer.append(getClearName(m.getName()));
for (int i = 0; i < m.getParameterAnnotations().length; i++) {
for (int j = 0; j < m.getParameterAnnotations()[i].length; j++) {
if (m.getParameterAnnotations()[i][j] instanceof com.orientechnologies.common.console.annotation.ConsoleParameter) {
buffer
.append(" <"
+ ((com.orientechnologies.common.console.annotation.ConsoleParameter) m.getParameterAnnotations()[i][j]).name()
+ ">");
}
}
}
return buffer.toString();
}
public static String getClearName(String iJavaName) {
StringBuilder buffer = new StringBuilder();
char c;
if (iJavaName != null) {
buffer.append(iJavaName.charAt(0));
for (int i = 1; i < iJavaName.length(); ++i) {
c = iJavaName.charAt(i);
if (Character.isUpperCase(c)) {
buffer.append(' ');
}
buffer.append(Character.toLowerCase(c));
}
}
return buffer.toString();
}
protected String getCommandLine(String[] iArguments) {
StringBuilder command = new StringBuilder();
for (int i = 0; i < iArguments.length; ++i) {
if (i > 0)
command.append(" ");
command.append(iArguments[i]);
}
return command.toString();
}
protected void onBefore() {
}
protected void onAfter() {
}
protected void onException(Throwable throwable) {
throwable.printStackTrace();
}
public void message(final String iMessage, final Object... iArgs) {
final int verboseLevel = getVerboseLevel();
if (verboseLevel > 1)
out.printf(iMessage, iArgs);
}
public void error(final String iMessage, final Object... iArgs) {
final int verboseLevel = getVerboseLevel();
if (verboseLevel > 0)
out.printf(iMessage, iArgs);
}
public int getVerboseLevel() {
final String v = properties.get("verbose");
final int verboseLevel = v != null ? Integer.parseInt(v) : 2;
return verboseLevel;
}
}
| 0true
|
commons_src_main_java_com_orientechnologies_common_console_OConsoleApplication.java
|
137 |
public final class ClientTypes {
public static final String JAVA = "JVM";
public static final String CSHARP = "CSP";
public static final String CPP = "CPP";
public static final String PYTHON = "PHY";
public static final String RUBY = "RBY";
private ClientTypes() {
}
}
| 0true
|
hazelcast_src_main_java_com_hazelcast_client_ClientTypes.java
|
400 |
public class CreateSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder<CreateSnapshotRequest, CreateSnapshotResponse, CreateSnapshotRequestBuilder> {
/**
* Constructs a new create snapshot request builder
*
* @param clusterAdminClient cluster admin client
*/
public CreateSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient) {
super((InternalClusterAdminClient) clusterAdminClient, new CreateSnapshotRequest());
}
/**
* Constructs a new create snapshot request builder with specified repository and snapshot names
*
* @param clusterAdminClient cluster admin client
* @param repository repository name
* @param snapshot snapshot name
*/
public CreateSnapshotRequestBuilder(ClusterAdminClient clusterAdminClient, String repository, String snapshot) {
super((InternalClusterAdminClient) clusterAdminClient, new CreateSnapshotRequest(repository, snapshot));
}
/**
* Sets the snapshot name
*
* @param snapshot snapshot name
* @return this builder
*/
public CreateSnapshotRequestBuilder setSnapshot(String snapshot) {
request.snapshot(snapshot);
return this;
}
/**
* Sets the repository name
*
* @param repository repository name
* @return this builder
*/
public CreateSnapshotRequestBuilder setRepository(String repository) {
request.repository(repository);
return this;
}
/**
* Sets a list of indices that should be included into the snapshot
* <p/>
* The list of indices supports multi-index syntax. For example: "+test*" ,"-test42" will index all indices with
* prefix "test" except index "test42". Aliases are supported. An empty list or {"_all"} will snapshot all open
* indices in the cluster.
*
* @param indices
* @return this builder
*/
public CreateSnapshotRequestBuilder setIndices(String... indices) {
request.indices(indices);
return this;
}
/**
* Specifies the indices options. Like what type of requested indices to ignore. For example indices that don't exist.
*
* @param indicesOptions the desired behaviour regarding indices options
* @return this request
*/
public CreateSnapshotRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
request.indicesOptions(indicesOptions);
return this;
}
/**
* If set to true the request should wait for the snapshot completion before returning.
*
* @param waitForCompletion true if
* @return this builder
*/
public CreateSnapshotRequestBuilder setWaitForCompletion(boolean waitForCompletion) {
request.waitForCompletion(waitForCompletion);
return this;
}
/**
* If set to true the request should snapshot indices with unavailable shards
*
* @param partial true if request should snapshot indices with unavailable shards
* @return this builder
*/
public CreateSnapshotRequestBuilder setPartial(boolean partial) {
request.partial(partial);
return this;
}
/**
* Sets repository-specific snapshot settings.
* <p/>
* See repository documentation for more information.
*
* @param settings repository-specific snapshot settings
* @return this builder
*/
public CreateSnapshotRequestBuilder setSettings(Settings settings) {
request.settings(settings);
return this;
}
/**
* Sets repository-specific snapshot settings.
* <p/>
* See repository documentation for more information.
*
* @param settings repository-specific snapshot settings
* @return this builder
*/
public CreateSnapshotRequestBuilder setSettings(Settings.Builder settings) {
request.settings(settings);
return this;
}
/**
* Sets repository-specific snapshot settings in YAML, JSON or properties format
* <p/>
* See repository documentation for more information.
*
* @param source repository-specific snapshot settings
* @return this builder
*/
public CreateSnapshotRequestBuilder setSettings(String source) {
request.settings(source);
return this;
}
/**
* Sets repository-specific snapshot settings.
* <p/>
* See repository documentation for more information.
*
* @param settings repository-specific snapshot settings
* @return this builder
*/
public CreateSnapshotRequestBuilder setSettings(Map<String, Object> settings) {
request.settings(settings);
return this;
}
/**
* Set to true if snapshot should include global cluster state
*
* @param includeGlobalState true if snapshot should include global cluster state
* @return this builder
*/
public CreateSnapshotRequestBuilder setIncludeGlobalState(boolean includeGlobalState) {
request.includeGlobalState(includeGlobalState);
return this;
}
@Override
protected void doExecute(ActionListener<CreateSnapshotResponse> listener) {
((ClusterAdminClient) client).createSnapshot(request, listener);
}
}
| 0true
|
src_main_java_org_elasticsearch_action_admin_cluster_snapshots_create_CreateSnapshotRequestBuilder.java
|
109 |
public class OIOUtilsTest {
@Test
public void shouldGetTimeAsMilis() {
assertGetTimeAsMilis("2h", 2 * 3600 * 1000);
assertGetTimeAsMilis("500ms", 500);
assertGetTimeAsMilis("4d", 4 * 24 * 3600 * 1000);
assertGetTimeAsMilis("6w", 6l * 7 * 24 * 3600 * 1000);
}
private void assertGetTimeAsMilis(String data, long expected) {
assertEquals(OIOUtils.getTimeAsMillisecs(data), expected);
}
}
| 0true
|
commons_src_test_java_com_orientechnologies_common_io_OIOUtilsTest.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.