proj_name
stringclasses 131
values | relative_path
stringlengths 30
228
| class_name
stringlengths 1
68
| func_name
stringlengths 1
48
| masked_class
stringlengths 78
9.82k
| func_body
stringlengths 46
9.61k
| len_input
int64 29
2.01k
| len_output
int64 14
1.94k
| total
int64 55
2.05k
| relevant_context
stringlengths 0
38.4k
|
---|---|---|---|---|---|---|---|---|---|
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/hash/serialization/impl/SerializableReader.java | SerializableReader | read | class SerializableReader<T extends Serializable> implements SizedReader<T>, BytesReader<T> {
@NotNull
@Override
public T read(@NotNull Bytes in, long size, @Nullable T using) {
return read(in, using);
}
@NotNull
@Override
public T read(Bytes in, @Nullable T using) {<FILL_FUNCTION_BODY>}
@Override
public void readMarshallable(@NotNull WireIn wireIn) {
// no fields to read
}
@Override
public void writeMarshallable(@NotNull WireOut wireOut) {
// no fields to write
}
} |
try {
return (T) new ObjectInputStream(in.inputStream()).readObject();
} catch (IOException | ClassNotFoundException e) {
throw new RuntimeException(e);
}
| 169 | 50 | 219 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/hash/serialization/impl/SizedMarshallableDataAccess.java | SizedMarshallableDataAccess | getUsing | class SizedMarshallableDataAccess<T> extends InstanceCreatingMarshaller<T>
implements DataAccess<T>, Data<T> {
// Config fields
private SizedReader<T> sizedReader;
private SizedWriter<? super T> sizedWriter;
// Cache fields
private transient boolean bytesInit;
private transient Bytes bytes;
private transient long size;
private transient VanillaBytes targetBytes;
/**
* State field
*/
private transient T instance;
public SizedMarshallableDataAccess(
Class<T> tClass, SizedReader<T> sizedReader, SizedWriter<? super T> sizedWriter) {
this(tClass, sizedReader, sizedWriter, DEFAULT_BYTES_CAPACITY);
}
protected SizedMarshallableDataAccess(
Type tClass, SizedReader<T> sizedReader, SizedWriter<? super T> sizedWriter,
long bytesCapacity) {
super(tClass);
this.sizedWriter = sizedWriter;
this.sizedReader = sizedReader;
initTransients(bytesCapacity);
}
SizedReader<T> sizedReader() {
return sizedReader;
}
SizedWriter<? super T> sizedWriter() {
return sizedWriter;
}
private void initTransients(long bytesCapacity) {
bytes = DefaultElasticBytes.allocateDefaultElasticBytes(bytesCapacity);
targetBytes = VanillaBytes.vanillaBytes();
}
@Override
public RandomDataInput bytes() {
if (!bytesInit) {
bytes.clear();
sizedWriter.write(bytes, size, instance);
bytesInit = true;
}
return bytes.bytesStore();
}
@Override
public long offset() {
return 0;
}
@Override
public long size() {
return size;
}
@Override
public void writeTo(RandomDataOutput target, long targetOffset) {
if (bytesInit || !(target instanceof BytesStore)) {
target.write(targetOffset, bytes(), offset(), size);
} else {
targetBytes.bytesStore((BytesStore) target, targetOffset, size);
targetBytes.writePosition(targetOffset);
sizedWriter.write(targetBytes, size, instance);
targetBytes.bytesStore(BytesStore.empty(), 0, 0);
}
}
@Override
public T get() {
return instance;
}
@Override
public T getUsing(@Nullable T using) {<FILL_FUNCTION_BODY>}
@Override
public int hashCode() {
return dataHashCode();
}
@Override
public boolean equals(Object obj) {
return dataEquals(obj);
}
@Override
public String toString() {
return get().toString();
}
@Override
public Data<T> getData(@NotNull T instance) {
this.instance = instance;
this.size = sizedWriter.size(instance);
bytesInit = false;
return this;
}
@Override
public void uninit() {
instance = null;
}
@Override
public DataAccess<T> copy() {
return new SizedMarshallableDataAccess<>(
tType(), copyIfNeeded(sizedReader), copyIfNeeded(sizedWriter),
bytes.realCapacity());
}
@Override
public void readMarshallable(@NotNull WireIn wireIn) {
super.readMarshallable(wireIn);
sizedReader = wireIn.read(() -> "sizedReader").object(SizedReader.class);
sizedWriter = wireIn.read(() -> "sizedWriter").object(SizedWriter.class);
initTransients(DEFAULT_BYTES_CAPACITY);
}
@Override
public void writeMarshallable(@NotNull WireOut wireOut) {
super.writeMarshallable(wireOut);
wireOut.write(() -> "sizedReader").typedMarshallable(sizedReader);
wireOut.write(() -> "sizedWriter").typedMarshallable(sizedWriter);
}
} |
if (using == null)
using = createInstance();
T result = sizedReader.read(bytes, size(), using);
bytes.readPosition(0);
return result;
| 1,049 | 48 | 1,097 | <methods>public void readMarshallable(net.openhft.chronicle.wire.WireIn) ,public void writeMarshallable(net.openhft.chronicle.wire.WireOut) <variables>private java.lang.reflect.Type tClass |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/hash/serialization/impl/StopBitSizeMarshaller.java | StopBitSizeMarshaller | minStoringLengthOfSizesInRange | class StopBitSizeMarshaller
implements SizeMarshaller, EnumMarshallable<StopBitSizeMarshaller> {
public static final StopBitSizeMarshaller INSTANCE = new StopBitSizeMarshaller();
private static final long MIN_ENCODABLE_SIZE = Long.MIN_VALUE;
private static final long MAX_ENCODABLE_SIZE = Long.MAX_VALUE;
private StopBitSizeMarshaller() {
}
private static void rangeChecks(long minSize, long maxSize) {
if (minSize > maxSize)
throw new IllegalArgumentException("minSize = " + minSize + ", maxSize = " + maxSize);
}
@Override
public int storingLength(long size) {
return BytesUtil.stopBitLength(size);
}
@Override
public long minStorableSize() {
return MIN_ENCODABLE_SIZE;
}
@Override
public long maxStorableSize() {
return MAX_ENCODABLE_SIZE;
}
@Override
public int minStoringLengthOfSizesInRange(long minSize, long maxSize) {<FILL_FUNCTION_BODY>}
@Override
public int maxStoringLengthOfSizesInRange(long minSize, long maxSize) {
rangeChecks(minSize, maxSize);
return max(storingLength(minSize), storingLength(maxSize));
}
@Override
public void writeSize(Bytes out, long sizeToWrite) {
BytesUtil.writeStopBit(out, sizeToWrite);
}
@Override
public long readSize(Bytes in) {
return BytesUtil.readStopBit(in);
}
@NotNull
@Override
public StopBitSizeMarshaller readResolve() {
return INSTANCE;
}
} |
rangeChecks(minSize, maxSize);
// different signs
if (minSize * maxSize < 0) {
// the range includes 0 which encoding length is 1
return 1;
}
return min(storingLength(minSize), storingLength(maxSize));
| 466 | 74 | 540 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/hash/serialization/impl/StringBuilderSizedReader.java | StringBuilderSizedReader | read | class StringBuilderSizedReader
implements SizedReader<StringBuilder>, EnumMarshallable<StringBuilderSizedReader> {
public static final StringBuilderSizedReader INSTANCE = new StringBuilderSizedReader();
private StringBuilderSizedReader() {
}
@NotNull
@Override
public StringBuilder read(Bytes in, long size, @Nullable StringBuilder using) {<FILL_FUNCTION_BODY>}
@NotNull
@Override
public StringBuilderSizedReader readResolve() {
return INSTANCE;
}
} |
if (0 > size || size > Integer.MAX_VALUE)
throw new IllegalStateException("positive int size expected, " + size + " given");
int csLen = (int) size;
if (using == null) {
using = new StringBuilder(csLen);
} else {
using.setLength(0);
using.ensureCapacity(csLen);
}
BytesUtil.parseUtf8(in, using, csLen);
return using;
| 141 | 123 | 264 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/hash/serialization/impl/StringBuilderUtf8DataAccess.java | StringBuilderUtf8DataAccess | getUsing | class StringBuilderUtf8DataAccess
extends AbstractCharSequenceUtf8DataAccess<StringBuilder> {
public StringBuilderUtf8DataAccess() {
this(DEFAULT_BYTES_CAPACITY);
}
private StringBuilderUtf8DataAccess(long bytesCapacity) {
super(bytesCapacity);
}
@Override
public StringBuilder getUsing(@Nullable StringBuilder using) {<FILL_FUNCTION_BODY>}
@Override
public DataAccess<StringBuilder> copy() {
return new StringBuilderUtf8DataAccess(bytes().realCapacity());
}
} |
if (using != null) {
using.setLength(0);
} else {
using = new StringBuilder(cs.length());
}
using.append(cs);
return using;
| 155 | 55 | 210 | <methods>public net.openhft.chronicle.bytes.RandomDataInput bytes() ,public java.lang.StringBuilder get() ,public Data<java.lang.StringBuilder> getData(java.lang.StringBuilder) ,public long offset() ,public void readMarshallable(net.openhft.chronicle.wire.WireIn) ,public long size() ,public void uninit() ,public void writeMarshallable(net.openhft.chronicle.wire.WireOut) <variables>private transient Bytes#RAW bytes,transient java.lang.StringBuilder cs |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/hash/serialization/impl/StringBytesReader.java | StringBytesReader | read | class StringBytesReader implements BytesReader<String>, StatefulCopyable<StringBytesReader> {
/**
* Cache field
*/
private transient StringBuilder sb;
public StringBytesReader() {
initTransients();
}
private void initTransients() {
sb = new StringBuilder();
}
@NotNull
@Override
public String read(Bytes in, @Nullable String using) {<FILL_FUNCTION_BODY>}
@Override
public StringBytesReader copy() {
return new StringBytesReader();
}
@Override
public void readMarshallable(@NotNull WireIn wireIn) {
// no fields to read
initTransients();
}
@Override
public void writeMarshallable(@NotNull WireOut wireOut) {
// no fields to write
}
} |
if (in.readUtf8(sb)) {
return sb.toString();
} else {
throw new NullPointerException("BytesReader couldn't read null");
}
| 215 | 48 | 263 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/hash/serialization/impl/StringSizedReader.java | StringSizedReader | read | class StringSizedReader implements SizedReader<String>, StatefulCopyable<StringSizedReader> {
/**
* Cache field
*/
private transient StringBuilder sb;
public StringSizedReader() {
initTransients();
}
private void initTransients() {
sb = new StringBuilder();
}
@NotNull
@Override
public String read(@NotNull Bytes in, long size, @Nullable String using) {<FILL_FUNCTION_BODY>}
@Override
public StringSizedReader copy() {
return new StringSizedReader();
}
@Override
public void readMarshallable(@NotNull WireIn wireIn) {
// no fields to read
initTransients();
}
@Override
public void writeMarshallable(@NotNull WireOut wireOut) {
// no fields to write
}
} |
if (0 > size || size > Integer.MAX_VALUE)
throw new IllegalStateException("positive int size expected, " + size + " given");
sb.setLength(0);
BytesUtil.parseUtf8(in, sb, (int) size);
return sb.toString();
| 225 | 75 | 300 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/hash/serialization/impl/TypedMarshallableReaderWriter.java | TypedMarshallableReaderWriter | read | class TypedMarshallableReaderWriter<V extends Marshallable>
extends CachingCreatingMarshaller<V> {
public TypedMarshallableReaderWriter(Class<V> vClass) {
super(vClass);
}
@NotNull
@Override
public V read(Bytes in, long size, @Nullable V using) {<FILL_FUNCTION_BODY>}
protected void writeToWire(Wire wire, @NotNull V toWrite) {
wire.getValueOut().object(toWrite);
}
} |
BinaryWire wire = Wires.binaryWireForRead(in, in.readPosition(), size);
return (V) wire.getValueIn().object(using, tClass());
| 136 | 47 | 183 | <methods>public void <init>(Class<V>) ,public long size(V) ,public void write(Bytes#RAW, long, V) <variables>static final ThreadLocal<java.lang.Object> LAST_TL,static final ThreadLocal<net.openhft.chronicle.wire.Wire> WIRE_TL |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/hash/serialization/impl/ValueDataAccess.java | ValueDataAccess | allocateBytesStoreForInstance | class ValueDataAccess<T> extends AbstractData<T> implements DataAccess<T> {
/**
* Config field
*/
private Class<T> valueType;
// Cache fields
private transient Class<? extends T> nativeClass;
private transient Class<? extends T> heapClass;
private transient Byteable nativeInstance;
private transient Copyable nativeInstanceAsCopyable;
/**
* State field
*/
private transient Byteable instance;
public ValueDataAccess(Class<T> valueType) {
this.valueType = valueType;
initTransients();
}
/**
* Returns the interface of values serialized.
*/
protected Class<T> valueType() {
return valueType;
}
protected Class<? extends T> nativeClass() {
return nativeClass;
}
protected Class<? extends T> heapClass() {
return heapClass;
}
private void initTransients() {
nativeInstance = (Byteable) Values.newNativeReference(valueType);
nativeInstanceAsCopyable = (Copyable) nativeInstance;
nativeClass = (Class<? extends T>) nativeInstance.getClass();
heapClass = Values.heapClassFor(valueType);
nativeInstance.bytesStore(allocateBytesStoreForInstance(), 0, nativeInstance.maxSize());
}
private BytesStore allocateBytesStoreForInstance() {<FILL_FUNCTION_BODY>}
protected T createInstance() {
try {
return heapClass.newInstance();
} catch (InstantiationException | IllegalAccessException e) {
throw new RuntimeException(e);
}
}
@Override
public RandomDataInput bytes() {
return instance.bytesStore();
}
@Override
public long offset() {
return instance.offset();
}
@Override
public long size() {
return instance.maxSize();
}
@Override
public T get() {
return (T) instance;
}
@Override
public T getUsing(@Nullable T using) {
if (using == null)
using = createInstance();
((Copyable) using).copyFrom(instance);
return using;
}
@Override
public Data<T> getData(@NotNull T instance) {
if (instance.getClass() == nativeClass) {
this.instance = (Byteable) instance;
} else {
nativeInstanceAsCopyable.copyFrom(instance);
this.instance = nativeInstance;
}
return this;
}
@Override
public void uninit() {
instance = null;
}
@Override
public DataAccess<T> copy() {
return new ValueDataAccess<>(valueType);
}
@Override
public void readMarshallable(@NotNull WireIn wireIn) {
valueType = wireIn.read("valueType").typeLiteral();
initTransients();
}
@Override
public void writeMarshallable(@NotNull WireOut wireOut) {
wireOut.write("valueType").typeLiteral(valueType);
}
} |
long instanceSize = nativeInstance.maxSize();
if (instanceSize > 0x7FFFFFF0) {
return BytesStore.nativeStoreWithFixedCapacity(instanceSize);
} else {
return BytesStore.wrap(ByteBuffer.allocate(Maths.toUInt31(instanceSize)));
}
| 794 | 82 | 876 | <methods>public boolean equals(java.lang.Object) ,public int hashCode() ,public java.lang.String toString() <variables> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/hash/serialization/impl/ValueReader.java | ValueReader | initTransients | class ValueReader<T>
implements SizedReader<T>, BytesReader<T>, StatefulCopyable<ValueReader<T>> {
/**
* Config field
*/
private Class<T> valueType;
// Cache fields
private transient Class<? extends T> nativeClass;
private transient Class<? extends T> heapClass;
private transient Byteable nativeReference;
public ValueReader(Class<T> valueType) {
this.valueType = valueType;
initTransients();
}
/**
* Returns the interface of values deserialized.
*/
protected Class<T> valueType() {
return valueType;
}
protected Class<? extends T> nativeClass() {
return nativeClass;
}
protected Class<? extends T> heapClass() {
return heapClass;
}
private void initTransients() {<FILL_FUNCTION_BODY>}
protected T createInstance() {
try {
return heapClass.newInstance();
} catch (InstantiationException | IllegalAccessException e) {
throw new RuntimeException(e);
}
}
@NotNull
@Override
public T read(@NotNull Bytes in, long size, @Nullable T using) {
if (size != nativeReference.maxSize())
throw new IllegalArgumentException();
return read(in, using);
}
@NotNull
@Override
public T read(Bytes in, @Nullable T using) {
if (using != null && using.getClass() == nativeClass) {
((Byteable) using).bytesStore(in.bytesStore(), in.readPosition(),
nativeReference.maxSize());
return using;
}
if (using == null)
using = createInstance();
nativeReference.bytesStore(in.bytesStore(), in.readPosition(),
nativeReference.maxSize());
((Copyable) using).copyFrom(nativeReference);
return using;
}
@Override
public ValueReader<T> copy() {
return new ValueReader<>(valueType);
}
@Override
public void readMarshallable(@NotNull WireIn wireIn) {
valueType = wireIn.read(() -> "valueType").typeLiteral();
initTransients();
}
@Override
public void writeMarshallable(@NotNull WireOut wireOut) {
wireOut.write(() -> "valueType").typeLiteral(valueType);
}
} |
nativeClass = Values.nativeClassFor(valueType);
heapClass = Values.heapClassFor(valueType);
nativeReference = (Byteable) Values.newNativeReference(valueType);
| 622 | 53 | 675 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/ChronicleHashCorruptionImpl.java | ChronicleHashCorruptionImpl | set | class ChronicleHashCorruptionImpl implements ChronicleHashCorruption {
private int segmentIndex;
private Supplier<String> messageSupplier;
private Throwable exception;
private String message;
public static void report(
ChronicleHashCorruption.Listener corruptionListener,
ChronicleHashCorruptionImpl corruption, int segmentIndex,
Supplier<String> messageSupplier) {
corruption.set(segmentIndex, messageSupplier, null);
corruptionListener.onCorruption(corruption);
}
public static void reportException(
ChronicleHashCorruption.Listener corruptionListener,
ChronicleHashCorruptionImpl corruption, int segmentIndex,
Supplier<String> messageSupplier, Throwable exception) {
corruption.set(segmentIndex, messageSupplier, exception);
corruptionListener.onCorruption(corruption);
}
public static String format(String message, Object... args) {
return MessageFormatter.arrayFormat(message, args).getMessage();
}
private void set(int segmentIndex, Supplier<String> messageSupplier, Throwable exception) {<FILL_FUNCTION_BODY>}
@Override
public String message() {
if (message == null) {
message = messageSupplier.get();
}
return message;
}
@Nullable
@Override
public Throwable exception() {
return exception;
}
@Override
public int segmentIndex() {
return segmentIndex;
}
} |
this.segmentIndex = segmentIndex;
this.messageSupplier = messageSupplier;
this.exception = exception;
this.message = null;
| 384 | 42 | 426 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/ChronicleMapEntrySet.java | ChronicleMapEntrySet | contains | class ChronicleMapEntrySet<K, V> extends AbstractSet<Map.Entry<K, V>> {
private final AbstractChronicleMap<K, V> map;
public ChronicleMapEntrySet(AbstractChronicleMap<K, V> map) {
this.map = map;
}
@NotNull
public Iterator<Map.Entry<K, V>> iterator() {
return new ChronicleMapIterator.OfEntries<>(map);
}
public final boolean contains(Object o) {<FILL_FUNCTION_BODY>}
public final boolean remove(Object o) {
if (!(o instanceof Map.Entry))
return false;
Map.Entry<?, ?> e = (Map.Entry<?, ?>) o;
try {
Object key = e.getKey();
Object value = e.getValue();
return map.remove(key, value);
} catch (ClassCastException | NullPointerException ex) {
return false;
}
}
public final int size() {
return map.size();
}
public final boolean isEmpty() {
return map.isEmpty();
}
public final void clear() {
map.clear();
}
} |
if (!(o instanceof Map.Entry))
return false;
Map.Entry<?, ?> e = (Map.Entry<?, ?>) o;
try {
V v = map.get(e.getKey());
return v != null && v.equals(e.getValue());
} catch (ClassCastException | NullPointerException ex) {
return false;
}
| 311 | 98 | 409 | <methods>public boolean equals(java.lang.Object) ,public int hashCode() ,public boolean removeAll(Collection<?>) <variables> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/ChronicleMapIterator.java | ChronicleMapIterator | remove | class ChronicleMapIterator<K, V, E> implements Iterator<E>, Consumer<MapEntry<K, V>> {
final AbstractChronicleMap<K, V> map;
private final Thread ownerThread = Thread.currentThread();
private final Queue<E> entryBuffer = new ArrayDeque<>();
E returned;
private int segmentIndex;
ChronicleMapIterator(AbstractChronicleMap<K, V> map) {
this.map = map;
segmentIndex = map.segments() - 1;
}
private void checkSingleThreaded() {
if (ownerThread != Thread.currentThread()) {
throw new IllegalStateException(map.toIdentityString() +
": Iterator should be accessed only from a single thread");
}
}
private void fillEntryBuffer() {
if (!entryBuffer.isEmpty())
return;
while (true) {
if (segmentIndex < 0)
return;
try (MapSegmentContext<K, V, ?> c = map.segmentContext(segmentIndex)) {
segmentIndex--;
if (c.size() == 0)
continue;
c.forEachSegmentEntry(this);
return;
}
}
}
@Override
public void accept(MapEntry<K, V> e) {
entryBuffer.add(read(e));
}
abstract E read(MapEntry<K, V> entry);
@Override
public boolean hasNext() {
checkSingleThreaded();
fillEntryBuffer();
return !entryBuffer.isEmpty();
}
@Override
public E next() {
checkSingleThreaded();
fillEntryBuffer();
E e;
if ((e = entryBuffer.poll()) == null)
throw new NoSuchElementException(map.toIdentityString());
return returned = e;
}
@Override
public void remove() {<FILL_FUNCTION_BODY>}
abstract void removeReturned();
static class OfEntries<K, V> extends ChronicleMapIterator<K, V, Entry<K, V>> {
OfEntries(AbstractChronicleMap<K, V> map) {
super(map);
}
@Override
Entry<K, V> read(MapEntry<K, V> entry) {
K key = entry.key().getUsing(null);
V value = entry.value().getUsing(null);
return new WriteThroughEntry<>(map, key, value);
}
@Override
void removeReturned() {
map.remove(returned.getKey(), returned.getValue());
}
}
static class OfKeys<K, V> extends ChronicleMapIterator<K, V, K> {
OfKeys(AbstractChronicleMap<K, V> map) {
super(map);
}
@Override
K read(MapEntry<K, V> entry) {
return entry.key().getUsing(null);
}
@Override
void removeReturned() {
map.remove(returned);
}
}
} |
checkSingleThreaded();
if (returned == null)
throw new IllegalStateException(map.toIdentityString());
removeReturned();
returned = null;
| 789 | 45 | 834 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/JsonSerializer.java | JsonSerializer | xStream | class JsonSerializer {
private JsonSerializer() {
}
static final String LOG_ERROR_SUGGEST_X_STREAM =
"map.getAll(<file>) and map.putAll(<file>) methods require the JSON XStream serializer, " +
"we don't include these artifacts by default as some users don't require this functionality. " +
"Please add the following artifacts to your project\n" +
"<dependency>\n" +
" <groupId>xstream</groupId>\n" +
" <artifactId>xstream</artifactId>\n" +
" <version>1.2.2</version>\n" +
"</dependency>\n" +
"<dependency>\n" +
" <groupId>org.codehaus.jettison</groupId>\n" +
" <artifactId>jettison</artifactId>\n" +
" <version>1.3.6</version>\n" +
"</dependency>\n";
static synchronized <K, V> void getAll(final File toFile,
final Map<K, V> map,
final List<?> jsonConverters) throws IOException {
final XStream xstream = xStream(map, jsonConverters);
try (OutputStream outputStream = createOutputStream(toFile)) {
xstream.toXML(map, outputStream);
}
}
static synchronized <K, V> void putAll(final File fromFile,
final Map<K, V> map,
final List<?> jsonConverters) throws IOException {
final XStream xstream = xStream(map, jsonConverters);
try (InputStream inputStream = createInputStream(fromFile)) {
xstream.fromXML(inputStream);
}
}
private static InputStream createInputStream(final File toFile) throws IOException {
if (toFile.getName().toLowerCase().endsWith(".gz"))
return new GZIPInputStream(new FileInputStream(toFile));
else
return new FileInputStream(toFile);
}
private static OutputStream createOutputStream(final File toFile) throws IOException {
if (toFile.getName().toLowerCase().endsWith(".gz"))
return new GZIPOutputStream(new FileOutputStream(toFile));
else
return new FileOutputStream(toFile);
}
private static <K, V> XStream xStream(final Map<K, V> map, final List<?> jsonConverters) {<FILL_FUNCTION_BODY>}
private static <K, V> void registerChronicleMapConverter(final Map<K, V> map, final XStream xstream) {
xstream.registerConverter(new VanillaChronicleMapConverter<>(map));
}
} |
try {
final XStream xstream = new XStream(new JettisonMappedXmlDriver());
xstream.setMode(XStream.NO_REFERENCES);
xstream.alias("cmap", map.getClass());
registerChronicleMapConverter(map, xstream);
xstream.registerConverter(new ByteBufferConverter());
xstream.registerConverter(new ValueConverter());
xstream.registerConverter(new StringBuilderConverter());
xstream.registerConverter(new CharSequenceConverter());
for (Object c : jsonConverters) {
if (c instanceof Converter) {
xstream.registerConverter((Converter) c);
} else {
Jvm.warn().on(JsonSerializer.class,
"Skipping Converter of type class=" + c.getClass().getName() + " as " +
" expecting an object of type com.thoughtworks.xstream.converters" +
".Converter");
}
}
return xstream;
} catch (NoClassDefFoundError e) {
throw new RuntimeException(LOG_ERROR_SUGGEST_X_STREAM, e);
}
| 702 | 286 | 988 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/MapDiagnostics.java | MapDiagnostics | printMapStats | class MapDiagnostics {
private MapDiagnostics() {
}
public static void main(String[] args) throws IOException {
String mapFile = args[0];
try (ChronicleMap map = ChronicleMap.of(Object.class, Object.class)
.createPersistedTo(new File(mapFile))) {
printMapStats(map);
}
}
public static <K, V> void printMapStats(ChronicleMap<K, V> map) {<FILL_FUNCTION_BODY>}
} |
for (int i = 0; i < map.segments(); i++) {
try (MapSegmentContext<K, V, ?> c = map.segmentContext(i)) {
System.out.printf("segment %d contains %d entries\n", i, c.size());
c.forEachSegmentEntry(e -> System.out.printf("%s, %d bytes -> %s, %d bytes\n",
e.key(), e.key().size(), e.value(), e.value().size()));
}
}
| 137 | 134 | 271 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/MapMethodsSupport.java | MapMethodsSupport | returnCurrentValueIfPresent | class MapMethodsSupport {
private MapMethodsSupport() {
}
static <V> void returnCurrentValueIfPresent(
MapQueryContext<?, V, ?> q, ReturnValue<V> returnValue) {<FILL_FUNCTION_BODY>}
static <V> boolean tryReturnCurrentValueIfPresent(
MapQueryContext<?, V, ?> q, ReturnValue<V> returnValue) {
if (q.readLock().tryLock()) {
MapEntry<?, V> entry = q.entry();
if (entry != null) {
returnValue.returnValue(entry.value());
return true;
}
// Key is absent
q.readLock().unlock();
}
q.updateLock().lock();
MapEntry<?, V> entry = q.entry();
if (entry != null) {
returnValue.returnValue(entry.value());
return true;
}
return false;
}
} |
MapEntry<?, V> entry = q.entry();
if (entry != null)
returnValue.returnValue(entry.value());
| 241 | 38 | 279 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/OldDeletedEntriesCleanupThread.java | OldDeletedEntriesCleanupThread | cleanupSegment | class OldDeletedEntriesCleanupThread extends Thread
implements MapClosable, Predicate<ReplicableEntry> {
/**
* Don't store a strong ref to a map in order to avoid it's leaking, if the user forgets to close() map, from where this thread is shut down
* explicitly. Dereference map within a single method, {@link #cleanupSegment()}. The map has a chance to be collected by GC when this thread is
* sleeping after cleaning up a segment.
*/
private final WeakReference<ReplicatedChronicleMap<?, ?, ?>> mapRef;
/**
* {@code cleanupTimeout}, {@link #cleanupTimeoutUnit} and {@link #segments} are parts of the
* cleaned Map's state, extracted in order to minimize accesses to the map.
*
* @see ChronicleHashBuilderPrivateAPI#removedEntryCleanupTimeout(long, TimeUnit)
*/
private final long cleanupTimeout;
private final TimeUnit cleanupTimeoutUnit;
private final int segments;
/**
* {@code segmentsPermutation} and {@link #inverseSegmentsPermutation} determine random order,
* in which segments are cleaned up.
*/
private final int[] segmentsPermutation;
private final int[] inverseSegmentsPermutation;
/**
* This object is used to determine that this thread is parked from {@link #sleepMillis(long)}
* or {@link #sleepNanos(long)}, not somewhere inside ChronicleMap logic, to interrupt()
* selectively in {@link #close()}.
*/
private final Object cleanupSleepingHandle = new Object();
private volatile boolean shutdown;
private long prevSegment0ScanStart = -1;
private long removedCompletely;
private long startTime = System.currentTimeMillis();
OldDeletedEntriesCleanupThread(ReplicatedChronicleMap<?, ?, ?> map) {
super("Cleanup Thread for " + map.toIdentityString());
setDaemon(true);
this.mapRef = new WeakReference<>(map);
cleanupTimeout = map.cleanupTimeout;
cleanupTimeoutUnit = map.cleanupTimeoutUnit;
segments = map.segments();
segmentsPermutation = randomPermutation(map.segments());
inverseSegmentsPermutation = inversePermutation(segmentsPermutation);
}
private static int[] randomPermutation(int n) {
int[] a = new int[n];
for (int i = 0; i < n; i++) {
a[i] = i;
}
shuffle(a);
return a;
}
// Implementing Fisher–Yates shuffle
private static void shuffle(int[] a) {
SecureRandom rnd = new SecureRandom();
for (int i = a.length - 1; i > 0; i--) {
int index = rnd.nextInt(i + 1);
int e = a[index];
a[index] = a[i];
a[i] = e;
}
}
private static int[] inversePermutation(int[] permutation) {
int n = permutation.length;
int[] inverse = new int[n];
for (int i = 0; i < n; i++) {
inverse[permutation[i]] = i;
}
return inverse;
}
@Override
public void run() {
throwExceptionIfClosed();
if (System.currentTimeMillis() - startTime < 1_000)
return;
while (!shutdown) {
int nextSegmentIndex;
try {
nextSegmentIndex = cleanupSegment();
} catch (Exception e) {
if (shutdown)
break;
throw e;
}
if (nextSegmentIndex == -1)
return;
if (nextSegmentIndex == 0) {
long currentTime = currentTime();
long mapScanTime = systemTimeIntervalBetween(
prevSegment0ScanStart, currentTime, cleanupTimeoutUnit);
Jvm.debug().on(getClass(), "Old deleted entries scan time: " + mapScanTime + " " + cleanupTimeoutUnit);
if (mapScanTime < cleanupTimeout) {
long timeToSleep = cleanupTimeoutUnit.toMillis(cleanupTimeout - mapScanTime);
if (timeToSleep > 0) {
sleepMillis(timeToSleep);
} else {
sleepNanos(cleanupTimeoutUnit.toNanos(cleanupTimeout - mapScanTime));
}
}
}
}
}
/**
* @return next segment index to cleanup, or -1 if cleanup thread should be shut down
*/
private int cleanupSegment() {<FILL_FUNCTION_BODY>}
@Override
public boolean test(ReplicableEntry e) {
throwExceptionIfClosed();
if (shutdown)
return false;
if (e instanceof MapAbsentEntry) {
long deleteTimeout = systemTimeIntervalBetween(
e.originTimestamp(), currentTime(), cleanupTimeoutUnit);
if (deleteTimeout > cleanupTimeout && !e.isChanged()) {
e.doRemoveCompletely();
removedCompletely++;
}
}
return true;
}
private void sleepMillis(long millis) {
long deadline = System.currentTimeMillis() + millis;
while (System.currentTimeMillis() < deadline && !shutdown)
LockSupport.parkUntil(cleanupSleepingHandle, deadline);
}
private void sleepNanos(long nanos) {
long deadline = System.nanoTime() + nanos;
while (System.nanoTime() < deadline && !shutdown)
LockSupport.parkNanos(cleanupSleepingHandle, deadline);
}
@Override
public void close() {
shutdown = true;
// this means blocked in sleepMillis() or sleepNanos()
if (LockSupport.getBlocker(this) == cleanupSleepingHandle)
this.interrupt(); // unblock
}
private int nextSegmentIndex(int segmentIndex) {
int permutationIndex = inverseSegmentsPermutation[segmentIndex];
int nextPermutationIndex = (permutationIndex + 1) % segments;
return segmentsPermutation[nextPermutationIndex];
}
} |
ReplicatedChronicleMap<?, ?, ?> map = mapRef.get();
if (map == null)
return -1;
int segmentIndex = map.globalMutableState().getCurrentCleanupSegmentIndex();
int nextSegmentIndex;
try (MapSegmentContext<?, ?, ?> context = map.segmentContext(segmentIndex)) {
if (segmentIndex == 0)
prevSegment0ScanStart = currentTime();
removedCompletely = 0;
if (((ReplicatedHashSegmentContext<?, ?>) context)
.forEachSegmentReplicableEntryWhile(this)) {
Jvm.debug().on(getClass(),
"Removed " + removedCompletely + " old deleted entries " +
"in the segment " + segmentIndex);
nextSegmentIndex = nextSegmentIndex(segmentIndex);
map.globalMutableState().setCurrentCleanupSegmentIndex(nextSegmentIndex);
return nextSegmentIndex;
} else {
// forEachWhile returned false => interrupted => shutdown = true
assert shutdown;
return -1;
}
}
| 1,619 | 272 | 1,891 | <methods>public void <init>() ,public void <init>(java.lang.Runnable) ,public void <init>(java.lang.String) ,public void <init>(java.lang.ThreadGroup, java.lang.Runnable) ,public void <init>(java.lang.ThreadGroup, java.lang.String) ,public void <init>(java.lang.Runnable, java.lang.String) ,public void <init>(java.lang.ThreadGroup, java.lang.Runnable, java.lang.String) ,public void <init>(java.lang.ThreadGroup, java.lang.Runnable, java.lang.String, long) ,public void <init>(java.lang.ThreadGroup, java.lang.Runnable, java.lang.String, long, boolean) ,public static int activeCount() ,public final void checkAccess() ,public int countStackFrames() ,public static native java.lang.Thread currentThread() ,public static void dumpStack() ,public static int enumerate(java.lang.Thread[]) ,public static Map<java.lang.Thread,java.lang.StackTraceElement[]> getAllStackTraces() ,public java.lang.ClassLoader getContextClassLoader() ,public static java.lang.Thread.UncaughtExceptionHandler getDefaultUncaughtExceptionHandler() ,public long getId() ,public final java.lang.String getName() ,public final int getPriority() ,public java.lang.StackTraceElement[] getStackTrace() ,public java.lang.Thread.State getState() ,public final java.lang.ThreadGroup getThreadGroup() ,public java.lang.Thread.UncaughtExceptionHandler getUncaughtExceptionHandler() ,public static native boolean holdsLock(java.lang.Object) ,public void interrupt() ,public static boolean interrupted() ,public final boolean isAlive() ,public final boolean isDaemon() ,public boolean isInterrupted() ,public final void join() throws java.lang.InterruptedException,public final synchronized void join(long) throws java.lang.InterruptedException,public final synchronized void join(long, int) throws java.lang.InterruptedException,public static void onSpinWait() ,public final void resume() ,public void run() ,public void setContextClassLoader(java.lang.ClassLoader) ,public final void setDaemon(boolean) ,public static void setDefaultUncaughtExceptionHandler(java.lang.Thread.UncaughtExceptionHandler) ,public final synchronized void setName(java.lang.String) ,public final void setPriority(int) ,public void setUncaughtExceptionHandler(java.lang.Thread.UncaughtExceptionHandler) ,public static native void sleep(long) throws java.lang.InterruptedException,public static void sleep(long, int) throws java.lang.InterruptedException,public synchronized void start() ,public final void stop() ,public final void suspend() ,public java.lang.String toString() ,public static native void yield() <variables>private static final java.lang.StackTraceElement[] EMPTY_STACK_TRACE,public static final int MAX_PRIORITY,public static final int MIN_PRIORITY,public static final int NORM_PRIORITY,private volatile sun.nio.ch.Interruptible blocker,private final java.lang.Object blockerLock,private java.lang.ClassLoader contextClassLoader,private boolean daemon,private static volatile java.lang.Thread.UncaughtExceptionHandler defaultUncaughtExceptionHandler,private volatile long eetop,private java.lang.ThreadGroup group,java.lang.ThreadLocal.ThreadLocalMap inheritableThreadLocals,private java.security.AccessControlContext inheritedAccessControlContext,private volatile boolean interrupted,private volatile java.lang.String name,volatile java.lang.Object parkBlocker,private int priority,private final long stackSize,private boolean stillborn,private java.lang.Runnable target,private static int threadInitNumber,int threadLocalRandomProbe,int threadLocalRandomSecondarySeed,long threadLocalRandomSeed,java.lang.ThreadLocal.ThreadLocalMap threadLocals,private static long threadSeqNumber,private volatile int threadStatus,private final long tid,private volatile java.lang.Thread.UncaughtExceptionHandler uncaughtExceptionHandler |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/SelectedSelectionKeySet.java | SelectedSelectionKeySet | flip | class SelectedSelectionKeySet extends AbstractSet<SelectionKey> {
private SelectionKey[] keysA;
private int keysASize;
private SelectionKey[] keysB;
private int keysBSize;
private boolean isA = true;
SelectedSelectionKeySet() {
keysA = new SelectionKey[1024];
keysB = keysA.clone();
}
@Override
public boolean add(SelectionKey o) {
if (o == null) {
return false;
}
if (isA) {
int size = keysASize;
keysA[size++] = o;
keysASize = size;
if (size == keysA.length) {
doubleCapacityA();
}
} else {
int size = keysBSize;
keysB[size++] = o;
keysBSize = size;
if (size == keysB.length) {
doubleCapacityB();
}
}
return true;
}
private void doubleCapacityA() {
SelectionKey[] newKeysA = new SelectionKey[keysA.length << 1];
System.arraycopy(keysA, 0, newKeysA, 0, keysASize);
keysA = newKeysA;
}
private void doubleCapacityB() {
SelectionKey[] newKeysB = new SelectionKey[keysB.length << 1];
System.arraycopy(keysB, 0, newKeysB, 0, keysBSize);
keysB = newKeysB;
}
SelectionKey[] flip() {<FILL_FUNCTION_BODY>}
@Override
public int size() {
if (isA) {
return keysASize;
} else {
return keysBSize;
}
}
@Override
public boolean remove(Object o) {
return false;
}
@Override
public boolean contains(Object o) {
return false;
}
@NotNull
@Override
public Iterator<SelectionKey> iterator() {
throw new UnsupportedOperationException();
}
} |
if (isA) {
isA = false;
keysA[keysASize] = null;
keysBSize = 0;
return keysA;
} else {
isA = true;
keysB[keysBSize] = null;
keysASize = 0;
return keysB;
}
| 545 | 85 | 630 | <methods>public boolean equals(java.lang.Object) ,public int hashCode() ,public boolean removeAll(Collection<?>) <variables> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/channel/MapHandler.java | MapHandler | createMapHandler | class MapHandler<VALUE, REPLY> extends AbstractHandler<MapHandler<VALUE, REPLY>> {
protected MapService<VALUE, REPLY> mapService;
private String mapName;
protected MapHandler(String mapName) {
this.mapName = mapName;
}
public static <V, O> MapHandler<V, O> createMapHandler(String mapName, MapService<V, O> mapService) {<FILL_FUNCTION_BODY>}
@Override
public void run(ChronicleContext context, ChronicleChannel channel) {
try (ChronicleMap<Bytes<?>, VALUE> map = MapChannel.createMap(mapName, mapService, context)) {
REPLY REPLY = channel.methodWriter(mapService().replyClass());
mapService.map(map);
mapService.reply(REPLY);
try (AffinityLock lock = context.affinityLock()) {
channel.eventHandlerAsRunnable(mapService).run();
}
} catch (IOException ioe) {
throw Jvm.rethrow(ioe);
}
}
@Override
public ChronicleChannel asInternalChannel(ChronicleContext context, ChronicleChannelCfg channelCfg) {
return new MapChannel(mapName, mapService, context, channelCfg);
}
protected MapService<VALUE, REPLY> mapService() {
return mapService;
}
} |
MapHandler<V, O> mh = new MapHandler<>(mapName);
mh.mapService = mapService;
return mh;
| 365 | 40 | 405 | <methods>public void <init>() ,public java.lang.Boolean buffered() ,public MapHandler<VALUE,REPLY> buffered(java.lang.Boolean) <variables>private java.lang.Boolean buffered |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/channel/internal/MapChannel.java | MapChannel | createMap | class MapChannel<VALUE, REPLY> extends SimpleCloseable implements ChronicleChannel {
private static final OkHeader OK = new OkHeader();
private final String mapName;
private final MapService<VALUE, REPLY> mapService;
private final ChronicleChannelCfg channelCfg;
private final ChronicleMap<Bytes<?>, VALUE> map;
// TODO FIX this runs out of memory.
private final Wire replyWire = WireType.BINARY_LIGHT.apply(Bytes.allocateElasticOnHeap());
public MapChannel(String mapName, MapService<VALUE, REPLY> mapService, ChronicleContext context, ChronicleChannelCfg channelCfg) {
this.mapName = mapName;
this.mapService = mapService;
this.channelCfg = channelCfg;
try {
map = createMap(mapName, mapService, context);
mapService.map(map);
REPLY reply = replyWire.methodWriter(mapService.replyClass());
mapService.reply(reply);
} catch (IOException e) {
throw new IORuntimeException(e);
}
}
public static <VALUE, REPLY> ChronicleMap<Bytes<?>, VALUE> createMap(String mapName, MapService<VALUE, REPLY> mapService, ChronicleContext context) throws IOException {<FILL_FUNCTION_BODY>}
@Override
public ChronicleChannelCfg channelCfg() {
return channelCfg;
}
@Override
public ChannelHeader headerOut() {
return OK;
}
@Override
public ChannelHeader headerIn() {
return OK;
}
@Override
public void testMessage(long now) {
throw new UnsupportedOperationException();
}
@Override
public long lastTestMessage() {
throw new UnsupportedOperationException();
}
@Override
public @NotNull DocumentContext readingDocument() {
return replyWire.readingDocument();
}
@Override
public <T> @NotNull T methodWriter(@NotNull Class<T> tClass, Class... additional) {
// if the class doesn't match it throws a ClassCastException'
return (T) mapService;
}
@Override
public DocumentContext writingDocument(boolean metaData) throws UnrecoverableTimeoutException {
throw new UnsupportedOperationException();
}
@Override
public DocumentContext acquireWritingDocument(boolean metaData) throws UnrecoverableTimeoutException {
throw new UnsupportedOperationException();
}
} |
// assume it has to already exist, but if not take a guess on sizes
final Class<VALUE> valueClass = mapService.valueClass();
final Class<Bytes<?>> bytesClass = (Class) Bytes.class;
final ChronicleMapBuilder<Bytes<?>, VALUE> builder = ChronicleMap.of(bytesClass, valueClass)
.keyMarshaller(new BytesSizedMarshaller())
.averageKeySize(32)
.averageValueSize(256)
.entries(1000000)
.putReturnsNull(true)
.removeReturnsNull(true);
if (BytesMarshallable.class.isAssignableFrom(valueClass)) {
//noinspection unchecked,rawtypes
builder.valueMarshaller(new BytesMarshallableReaderWriter<>((Class) valueClass));
} else if (Marshallable.class.isAssignableFrom(valueClass)) {
//noinspection unchecked,rawtypes
builder.valueMarshaller(new MarshallableReaderWriter<>((Class) valueClass));
}
return builder
.createPersistedTo(context.toFile(mapName + ".cm3"));
| 645 | 296 | 941 | <methods>public final void close() ,public boolean isClosed() <variables>private volatile transient boolean closed |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/impl/stage/data/DummyValueZeroData.java | DummyValueZeroData | getUsing | class DummyValueZeroData<V> extends AbstractData<V> {
private final Bytes zeroBytes = ZeroBytesStore.INSTANCE.bytesForRead();
@StageRef
VanillaChronicleMapHolder<?, ?, ?> mh;
@StageRef
ValueBytesInterop<V> vi;
@StageRef
CheckOnEachPublicOperation checkOnEachPublicOperation;
@Override
public RandomDataInput bytes() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return ZeroBytesStore.INSTANCE;
}
@Override
public long offset() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return 0;
}
@Override
public long size() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return Math.max(0, mh.m().valueSizeMarshaller.minStorableSize());
}
@Override
public V get() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
// Not optimized and creates garbage, because this isn't the primary
// use case. Zero data should only be used in bytes form
return getUsing(null);
}
@Override
public V getUsing(V using) {<FILL_FUNCTION_BODY>}
private IllegalStateException zeroReadException(Exception cause) {
return new IllegalStateException(mh.h().toIdentityString() +
": Most probable cause of this exception - zero bytes of\n" +
"the minimum positive encoding length, supported by the specified or default\n" +
"valueSizeMarshaller() is not correct serialized form of any value. You should\n" +
"configure defaultValueProvider() in ChronicleMapBuilder", cause);
}
} |
checkOnEachPublicOperation.checkOnEachPublicOperation();
zeroBytes.readPosition(0);
try {
return vi.valueReader.read(zeroBytes, size(), using);
} catch (Exception e) {
throw zeroReadException(e);
}
| 443 | 69 | 512 | <methods>public boolean equals(java.lang.Object) ,public int hashCode() ,public java.lang.String toString() <variables> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/impl/stage/data/bytes/EntryValueBytesData.java | EntryValueBytesData | innerGetUsing | class EntryValueBytesData<V> extends AbstractData<V> {
@StageRef
VanillaChronicleMapHolder<?, V, ?> mh;
@StageRef
ValueBytesInterop<V> vi;
@StageRef
SegmentStages s;
@StageRef
MapEntryStages<?, V> entry;
@StageRef
CheckOnEachPublicOperation checkOnEachPublicOperation;
@Stage("CachedEntryValue")
private V cachedEntryValue =
mh.m().valueType() == CharSequence.class ? (V) new StringBuilder() : null;
@Stage("CachedEntryValue")
private boolean cachedEntryValueRead = false;
private void initCachedEntryValue() {
cachedEntryValue = innerGetUsing(cachedEntryValue);
cachedEntryValueRead = true;
}
public boolean cachedEntryValueInit() {
return cachedEntryValueRead;
}
public void closeCachedEntryValue() {
cachedEntryValueRead = false;
}
@Override
public RandomDataInput bytes() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return s.segmentBS;
}
@Override
public long offset() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return entry.valueOffset;
}
@Override
public long size() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return entry.valueSize;
}
@Override
public V get() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return cachedEntryValue;
}
@Override
public V getUsing(V using) {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return innerGetUsing(using);
}
private V innerGetUsing(V usingValue) {<FILL_FUNCTION_BODY>}
} |
Bytes segmentBytes = s.segmentBytesForRead();
segmentBytes.readPosition(entry.valueOffset);
return vi.valueReader.read(segmentBytes, size(), usingValue);
| 482 | 50 | 532 | <methods>public boolean equals(java.lang.Object) ,public int hashCode() ,public java.lang.String toString() <variables> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/impl/stage/data/bytes/WrappedValueBytesData.java | WrappedValueBytesData | getUnusedWrappedValueBytesData | class WrappedValueBytesData<V> extends AbstractData<V> {
@Stage("WrappedValueBytes")
private final VanillaBytes wrappedValueBytes = VanillaBytes.vanillaBytes();
@StageRef
ValueBytesInterop<V> vi;
@StageRef
CheckOnEachPublicOperation checkOnEachPublicOperation;
private WrappedValueBytesData<V> next;
@Stage("WrappedValueBytesStore")
private BytesStore wrappedValueBytesStore;
@Stage("WrappedValueBytesStore")
private long wrappedValueBytesOffset;
@Stage("WrappedValueBytesStore")
private long wrappedValueBytesSize;
@Stage("WrappedValueBytes")
private boolean wrappedValueBytesUsed = false;
@Stage("CachedWrappedValue")
private V cachedWrappedValue;
@Stage("CachedWrappedValue")
private boolean cachedWrappedValueRead = false;
boolean nextInit() {
return true;
}
void closeNext() {
// do nothing
}
@Stage("Next")
public WrappedValueBytesData<V> getUnusedWrappedValueBytesData() {<FILL_FUNCTION_BODY>}
boolean wrappedValueBytesStoreInit() {
return wrappedValueBytesStore != null;
}
public void initWrappedValueBytesStore(BytesStore bytesStore, long offset, long size) {
wrappedValueBytesStore = bytesStore;
wrappedValueBytesOffset = offset;
wrappedValueBytesSize = size;
}
void closeWrappedValueBytesStore() {
wrappedValueBytesStore = null;
if (next != null)
next.closeWrappedValueBytesStore();
}
boolean wrappedValueBytesInit() {
return wrappedValueBytesUsed;
}
void initWrappedValueBytes() {
wrappedValueBytes.bytesStore(
wrappedValueBytesStore, wrappedValueBytesOffset, wrappedValueBytesSize);
wrappedValueBytesUsed = true;
}
void closeWrappedValueBytes() {
wrappedValueBytes.bytesStore(BytesStore.empty(), 0, 0);
wrappedValueBytesUsed = false;
}
private void initCachedWrappedValue() {
cachedWrappedValue = innerGetUsing(cachedWrappedValue);
cachedWrappedValueRead = true;
}
@Override
public RandomDataInput bytes() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return wrappedValueBytes.bytesStore();
}
@Override
public long offset() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return wrappedValueBytesOffset;
}
@Override
public long size() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return wrappedValueBytesSize;
}
@Override
public V get() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return cachedWrappedValue;
}
@Override
public V getUsing(V using) {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return innerGetUsing(using);
}
private V innerGetUsing(V usingValue) {
wrappedValueBytes.readPosition(wrappedValueBytesOffset);
return vi.valueReader.read(wrappedValueBytes, wrappedValueBytesSize, usingValue);
}
} |
if (!wrappedValueBytesStoreInit())
return this;
if (next == null)
next = new WrappedValueBytesData<>();
return next.getUnusedWrappedValueBytesData();
| 834 | 53 | 887 | <methods>public boolean equals(java.lang.Object) ,public int hashCode() ,public java.lang.String toString() <variables> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/impl/stage/data/instance/WrappedValueInstanceDataHolder.java | WrappedValueInstanceDataHolder | getUnusedWrappedValueHolder | class WrappedValueInstanceDataHolder<V> {
public Data<V> wrappedData = null;
@StageRef
VanillaChronicleMapHolder<?, V, ?> mh;
private final DataAccess<V> wrappedValueDataAccess = mh.m().valueDataAccess.copy();
private WrappedValueInstanceDataHolder<V> next;
private V value;
boolean nextInit() {
return true;
}
void closeNext() {
// do nothing
}
@Stage("Next")
public WrappedValueInstanceDataHolder<V> getUnusedWrappedValueHolder() {<FILL_FUNCTION_BODY>}
public boolean valueInit() {
return value != null;
}
public void initValue(V value) {
mh.m().checkValue(value);
this.value = value;
}
public void closeValue() {
value = null;
if (next != null)
next.closeValue();
}
private void initWrappedData() {
wrappedData = wrappedValueDataAccess.getData(value);
}
private void closeWrappedData() {
wrappedData = null;
wrappedValueDataAccess.uninit();
}
} |
if (!valueInit())
return this;
if (next == null)
next = new WrappedValueInstanceDataHolder<>();
return next.getUnusedWrappedValueHolder();
| 319 | 49 | 368 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/impl/stage/entry/ReplicatedMapEntryStages.java | ReplicatedMapEntryStages | updatedReplicationStateOnPresentEntry | class ReplicatedMapEntryStages<K, V> extends MapEntryStages<K, V>
implements MapReplicableEntry<K, V> {
@StageRef
ReplicatedChronicleMapHolder<?, ?, ?> mh;
@StageRef
ReplicationUpdate ru;
@Stage("ReplicationState")
long replicationBytesOffset = -1;
void initReplicationState() {
replicationBytesOffset = keyEnd();
}
void updateReplicationState(byte identifier, long timestamp) {
initDelayedUpdateChecksum(true);
Bytes segmentBytes = s.segmentBytesForWrite();
segmentBytes.writePosition(replicationBytesOffset);
segmentBytes.writeLong(timestamp);
segmentBytes.writeByte(identifier);
}
private long timestampOffset() {
return replicationBytesOffset;
}
public long timestamp() {
return s.segmentBS.readLong(replicationBytesOffset);
}
private long identifierOffset() {
return replicationBytesOffset + 8L;
}
byte identifier() {
return s.segmentBS.readByte(identifierOffset());
}
private long entryDeletedOffset() {
return replicationBytesOffset + 9L;
}
@Override
public boolean entryDeleted() {
return s.segmentBS.readBoolean(entryDeletedOffset());
}
public void writeEntryPresent() {
s.segmentBS.writeBoolean(entryDeletedOffset(), false);
}
public void writeEntryDeleted() {
s.segmentBS.writeBoolean(entryDeletedOffset(), true);
}
@Override
public byte originIdentifier() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return identifier();
}
@Override
public long originTimestamp() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return timestamp();
}
@Override
long countValueSizeOffset() {
return super.countValueSizeOffset() + ADDITIONAL_ENTRY_BYTES;
}
@Override
public void updateOrigin(byte newIdentifier, long newTimestamp) {
checkOnEachPublicOperation.checkOnEachPublicOperation();
s.innerWriteLock.lock();
updateReplicationState(newIdentifier, newTimestamp);
}
@Override
public void dropChanged() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
s.innerUpdateLock.lock();
ru.dropChange();
}
@Override
public void dropChangedFor(byte remoteIdentifier) {
checkOnEachPublicOperation.checkOnEachPublicOperation();
s.innerUpdateLock.lock();
ru.dropChangeFor(remoteIdentifier);
}
@Override
public void raiseChanged() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
s.innerUpdateLock.lock();
ru.raiseChange();
}
@Override
public void raiseChangedFor(byte remoteIdentifier) {
checkOnEachPublicOperation.checkOnEachPublicOperation();
s.innerUpdateLock.lock();
ru.raiseChangeFor(remoteIdentifier);
}
@Override
public void raiseChangedForAllExcept(byte remoteIdentifier) {
checkOnEachPublicOperation.checkOnEachPublicOperation();
s.innerUpdateLock.lock();
ru.raiseChangeForAllExcept(remoteIdentifier);
}
@Override
public boolean isChanged() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
s.innerReadLock.lock();
return ru.changed();
}
public void updatedReplicationStateOnPresentEntry() {<FILL_FUNCTION_BODY>}
public void updatedReplicationStateOnAbsentEntry() {
if (!ru.replicationUpdateInit()) {
s.innerWriteLock.lock();
updateReplicationState(mh.m().identifier(), currentTime());
}
}
@Override
protected void relocation(Data<V> newValue, long newEntrySize) {
long oldPos = pos;
long oldTierIndex = s.tierIndex;
super.relocation(newValue, newEntrySize);
ru.moveChange(oldTierIndex, oldPos, pos);
}
@Override
long sizeOfEverythingBeforeValue(long keySize, long valueSize) {
return super.sizeOfEverythingBeforeValue(keySize, valueSize) + ADDITIONAL_ENTRY_BYTES;
}
} |
if (!ru.replicationUpdateInit()) {
s.innerWriteLock.lock();
long timestamp = Math.max(timestamp() + 1, currentTime());
updateReplicationState(mh.m().identifier(), timestamp);
}
| 1,123 | 61 | 1,184 | <methods>public non-sealed void <init>() ,public boolean entryDeleted() ,public long entryEnd() ,public final long entrySize(long, long) ,public final void freeExtraAllocatedChunks() ,public void initValue(Data<?>) ,public void innerDefaultReplaceValue(Data<V>) ,public long innerEntrySize(long, long) ,public long newEntrySize(Data<V>, long, long) ,public long newSizeOfEverythingBeforeValue(Data<V>) ,public Data<V> value() ,public void writeValue(Data<?>) <variables>public net.openhft.chronicle.hash.impl.stage.entry.AllocatedChunks allocatedChunks,public EntryValueBytesData<V> entryValue,KeySearch<K> ks,public VanillaChronicleMapHolder<?,?,?> mh,public long valueOffset,public long valueSize,public long valueSizeOffset |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/impl/stage/input/ReplicatedInput.java | ReplicatedInput | processReplicatedEvent | class ReplicatedInput<K, V, R> implements RemoteOperationContext<K>,
MapRemoteQueryContext<K, V, R>, Replica.QueryContext<K, V> {
@StageRef
CheckOnEachPublicOperation checkOnEachPublicOperation;
@StageRef
ReplicatedChronicleMapHolder<K, V, R> mh;
@StageRef
ReplicationUpdate<K> ru;
@StageRef
ReplicatedMapQuery<K, V, ?> q;
@StageRef
SegmentStages s;
@StageRef
DummyValueZeroData<V> dummyValue;
@Override
public Data<V> dummyZeroValue() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return dummyValue;
}
public void processReplicatedEvent(byte remoteNodeIdentifier, Bytes replicatedInputBytes) {<FILL_FUNCTION_BODY>}
@Override
public void remotePut(
Data<V> newValue,
byte remoteEntryIdentifier, long remoteEntryTimestamp, byte remoteNodeIdentifier) {
ru.initReplicationUpdate(remoteEntryIdentifier, remoteEntryTimestamp, remoteNodeIdentifier);
s.innerUpdateLock.lock();
mh.m().remoteOperations.put(this, newValue);
}
@Override
public void remoteRemove(
byte remoteEntryIdentifier, long remoteEntryTimestamp, byte remoteNodeIdentifier) {
ru.initReplicationUpdate(remoteEntryIdentifier, remoteEntryTimestamp, remoteNodeIdentifier);
s.innerWriteLock.lock();
mh.m().remoteOperations.remove(this);
}
} |
long timestamp = replicatedInputBytes.readStopBit();
byte identifier = replicatedInputBytes.readByte();
ru.initReplicationUpdate(identifier, timestamp, remoteNodeIdentifier);
boolean isDeleted = replicatedInputBytes.readBoolean();
long keySize = mh.m().keySizeMarshaller.readSize(replicatedInputBytes);
long keyOffset = replicatedInputBytes.readPosition();
q.initInputKey(q.getInputKeyBytesAsData(replicatedInputBytes, keyOffset, keySize));
replicatedInputBytes.readSkip(keySize);
if (isDeleted) {
s.innerUpdateLock.lock();
mh.m().remoteOperations.remove(this);
} else {
long valueSize = mh.m().valueSizeMarshaller.readSize(replicatedInputBytes);
long valueOffset = replicatedInputBytes.readPosition();
Data<V> value = q.wrapValueBytesAsData(replicatedInputBytes, valueOffset, valueSize);
replicatedInputBytes.readSkip(valueSize);
s.innerWriteLock.lock();
mh.m().remoteOperations.put(this, value);
}
| 414 | 296 | 710 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/impl/stage/iter/MapSegmentIteration.java | MapSegmentIteration | doReplaceValue | class MapSegmentIteration<K, V, R> extends HashSegmentIteration<K, MapEntry<K, V>>
implements MapEntry<K, V>, IterationContext<K, V, R> {
@StageRef
MapEntryStages<K, V> entry;
@StageRef
WrappedValueInstanceDataHolder<V> wrappedValueInstanceDataHolder;
@StageRef
WrappedValueInstanceDataHolderAccess<K, V, ?> wrappedValueInstanceDataHolderAccess;
@Override
public void hookAfterEachIteration() {
throwExceptionIfClosed();
wrappedValueInstanceDataHolder.closeValue();
}
@Override
public void doReplaceValue(Data<V> newValue) {<FILL_FUNCTION_BODY>}
@NotNull
@Override
public WrappedValueInstanceDataHolderAccess<K, V, ?> context() {
return wrappedValueInstanceDataHolderAccess;
}
} |
throwExceptionIfClosed();
checkOnEachPublicOperation.checkOnEachPublicOperation();
try {
entry.innerDefaultReplaceValue(newValue);
} finally {
s.innerWriteLock.unlock();
}
| 238 | 60 | 298 | <methods>public non-sealed void <init>() ,public void checkEntryNotRemovedOnThisIteration() ,public void doRemove() ,public java.lang.Object entryForIteration() ,public void forEachSegmentEntry(Consumer<? super MapEntry<K,V>>) ,public boolean forEachSegmentEntryWhile(Predicate<? super MapEntry<K,V>>) ,public boolean forEachTierEntryWhile(Predicate<? super T>, int, long, long) ,public abstract boolean hashLookupEntryInit() ,public void hookAfterEachIteration() ,public void initHashLookupEntry(long) ,public boolean innerForEachSegmentEntryWhile(Predicate<? super T>) ,public void iterationRemove() ,public boolean shouldTestEntry() ,public long tierEntriesForIteration() <variables>public net.openhft.chronicle.hash.impl.stage.hash.CheckOnEachPublicOperation checkOnEachPublicOperation,HashEntryStages<K> e,public boolean entryRemovedOnThisIteration,public long hashLookupEntry,VanillaChronicleHashHolder<?> hh,protected net.openhft.chronicle.hash.impl.stage.entry.HashLookupPos hlp,public net.openhft.chronicle.hash.impl.stage.iter.IterationSegmentStages s |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/impl/stage/iter/ReplicatedMapSegmentIteration.java | ReplicatedMapSegmentIteration | doInsert | class ReplicatedMapSegmentIteration<K, V, R> extends MapSegmentIteration<K, V, R>
implements ReplicatedIterationContext<K, V, R>, ReplicableEntry,
ReplicatedHashSegmentContext<K, MapEntry<K, V>> {
@StageRef
VanillaChronicleMapHolder<K, V, R> mh;
@StageRef
ReplicatedMapEntryStages<K, V> e;
@StageRef
ReplicationUpdate<K> ru;
@StageRef
DummyValueZeroData<V> dummyValue;
@StageRef
ReplicatedMapAbsentDelegatingForIteration<K, V> absentEntryDelegating;
@StageRef
ReplicatedMapEntryDelegating<K, V> entryDelegating;
EntriesToTest entriesToTest = null;
void initEntriesToTest(EntriesToTest entriesToTest) {
this.entriesToTest = entriesToTest;
}
@Override
public boolean shouldTestEntry() {
throwExceptionIfClosed();
return entriesToTest == ALL || !e.entryDeleted();
}
@Override
public Object entryForIteration() {
throwExceptionIfClosed();
return !e.entryDeleted() ? entryDelegating : absentEntryDelegating;
}
@Override
public long tierEntriesForIteration() {
throwExceptionIfClosed();
return entriesToTest == ALL ? s.tierEntries() : s.tierEntries() - s.tierDeleted();
}
@Override
public void doReplaceValue(Data<V> newValue) {
throwExceptionIfClosed();
checkOnEachPublicOperation.checkOnEachPublicOperation();
try {
entry.innerDefaultReplaceValue(newValue);
e.updatedReplicationStateOnPresentEntry();
ru.updateChange();
} finally {
s.innerWriteLock.unlock();
}
}
@Override
public boolean forEachSegmentEntryWhile(Predicate<? super MapEntry<K, V>> predicate) {
throwExceptionIfClosed();
checkOnEachPublicOperation.checkOnEachPublicOperation();
initEntriesToTest(PRESENT);
s.innerUpdateLock.lock();
return innerForEachSegmentEntryWhile(predicate);
}
@Override
public boolean forEachSegmentReplicableEntryWhile(
Predicate<? super ReplicableEntry> predicate) {
throwExceptionIfClosed();
checkOnEachPublicOperation.checkOnEachPublicOperation();
initEntriesToTest(ALL);
s.innerUpdateLock.lock();
return innerForEachSegmentEntryWhile(predicate);
}
@Override
public void forEachSegmentReplicableEntry(Consumer<? super ReplicableEntry> action) {
throwExceptionIfClosed();
forEachSegmentReplicableEntryWhile(e -> {
action.accept(e);
return true;
});
}
@Override
public void doRemove() {
throwExceptionIfClosed();
checkOnEachPublicOperation.checkOnEachPublicOperation();
try {
if (e.valueSize > dummyValue.size())
e.innerDefaultReplaceValue(dummyValue);
e.updatedReplicationStateOnPresentEntry();
e.writeEntryDeleted();
ru.updateChange();
s.tierDeleted(s.tierDeleted() + 1);
} finally {
s.innerWriteLock.unlock();
}
initEntryRemovedOnThisIteration(true);
}
@Override
public void doRemoveCompletely() {
throwExceptionIfClosed();
boolean wasDeleted = e.entryDeleted();
super.doRemove();
ru.dropChange();
if (wasDeleted)
s.tierDeleted(s.tierDeleted() - 1);
}
public void doInsert(Data<V> value) {
throwExceptionIfClosed();
checkOnEachPublicOperation.checkOnEachPublicOperation();
if (e.entryDeleted()) {
try {
s.tierDeleted(s.tierDeleted() - 1);
e.innerDefaultReplaceValue(value);
s.incrementModCount();
e.writeEntryPresent();
e.updatedReplicationStateOnPresentEntry();
ru.updateChange();
} finally {
s.innerWriteLock.unlock();
}
} else {
throw new IllegalStateException(mh.h().toIdentityString() +
": Entry is present in the map when doInsert() is called");
}
}
public void doInsert() {<FILL_FUNCTION_BODY>}
enum EntriesToTest {PRESENT, ALL}
} |
throwExceptionIfClosed();
if (mh.set() == null)
throw new IllegalStateException(mh.h().toIdentityString() +
": Called SetAbsentEntry.doInsert() from Map context");
doInsert((Data<V>) DummyValueData.INSTANCE);
| 1,219 | 78 | 1,297 | <methods>public non-sealed void <init>() ,public WrappedValueInstanceDataHolderAccess<K,V,?> context() ,public void doReplaceValue(Data<V>) ,public void hookAfterEachIteration() <variables>MapEntryStages<K,V> entry,WrappedValueInstanceDataHolder<V> wrappedValueInstanceDataHolder,WrappedValueInstanceDataHolderAccess<K,V,?> wrappedValueInstanceDataHolderAccess |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/impl/stage/iter/ReplicatedTierRecovery.java | ReplicatedTierRecovery | cleanupModificationIterationBits | class ReplicatedTierRecovery extends TierRecovery {
@StageRef
ReplicatedChronicleMapHolder<?, ?, ?> rh;
@StageRef
SegmentStages s;
@StageRef
ReplicatedMapEntryStages<?, ?> e;
@Override
public void removeDuplicatesInSegment(
ChronicleHashCorruption.Listener corruptionListener,
ChronicleHashCorruptionImpl corruption) {
super.removeDuplicatesInSegment(corruptionListener, corruption);
recoverTierDeleted(corruptionListener, corruption);
cleanupModificationIterationBits();
}
private void recoverTierDeleted(
ChronicleHashCorruption.Listener corruptionListener,
ChronicleHashCorruptionImpl corruption) {
VanillaChronicleHash<?, ?, ?, ?> h = rh.h();
CompactOffHeapLinearHashTable hl = h.hashLookup;
long hlAddr = s.tierBaseAddr;
long deleted = 0;
long hlPos = 0;
do {
long hlEntry = hl.readEntry(hlAddr, hlPos);
if (!hl.empty(hlEntry)) {
e.readExistingEntry(hl.value(hlEntry));
if (e.entryDeleted()) {
deleted++;
}
}
hlPos = hl.step(hlPos);
} while (hlPos != 0);
if (s.tierDeleted() != deleted) {
long finalDeleted = deleted;
report(corruptionListener, corruption, s.segmentIndex, () ->
format("wrong deleted counter for tier with index {}, stored: {}, should be: {}",
s.tierIndex, s.tierDeleted(), finalDeleted)
);
s.tierDeleted(deleted);
}
}
private void cleanupModificationIterationBits() {<FILL_FUNCTION_BODY>}
} |
ReplicatedChronicleMap<?, ?, ?> m = rh.m();
ReplicatedChronicleMap<?, ?, ?>.ModificationIterator[] its =
m.acquireAllModificationIterators();
ReusableBitSet freeList = s.freeList;
for (long pos = 0; pos < m.actualChunksPerSegmentTier; ) {
long nextPos = freeList.nextSetBit(pos);
if (nextPos > pos) {
for (ReplicatedChronicleMap<?, ?, ?>.ModificationIterator it : its) {
it.clearRange0(s.tierIndex, pos, nextPos);
}
}
if (nextPos > 0) {
e.readExistingEntry(nextPos);
if (e.entrySizeInChunks > 1) {
for (ReplicatedChronicleMap<?, ?, ?>.ModificationIterator it : its) {
it.clearRange0(s.tierIndex, nextPos + 1, nextPos + e.entrySizeInChunks);
}
}
pos = nextPos + e.entrySizeInChunks;
} else {
for (ReplicatedChronicleMap<?, ?, ?>.ModificationIterator it : its) {
it.clearRange0(s.tierIndex, pos, m.actualChunksPerSegmentTier);
}
break;
}
}
| 506 | 356 | 862 | <methods>public non-sealed void <init>() ,public int recoverTier(int, net.openhft.chronicle.hash.ChronicleHashCorruption.Listener, net.openhft.chronicle.map.ChronicleHashCorruptionImpl) ,public void removeDuplicatesInSegment(net.openhft.chronicle.hash.ChronicleHashCorruption.Listener, net.openhft.chronicle.map.ChronicleHashCorruptionImpl) <variables>MapEntryStages<?,?> e,net.openhft.chronicle.hash.impl.stage.iter.IterationKeyHashCode khc,VanillaChronicleMapHolder<?,?,?> mh,net.openhft.chronicle.hash.impl.stage.entry.SegmentStages s |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/impl/stage/map/ReplicatedChronicleMapHolderImpl.java | ReplicatedChronicleMapHolderImpl | initMap | class ReplicatedChronicleMapHolderImpl<K, V, R>
extends Chaining
implements ReplicatedChronicleMapHolder<K, V, R> {
@Stage("Map")
private ReplicatedChronicleMap<K, V, R> m = null;
public ReplicatedChronicleMapHolderImpl(VanillaChronicleMap map) {
super(map);
}
public ReplicatedChronicleMapHolderImpl(
ChainingInterface rootContextInThisThread, VanillaChronicleMap map) {
super(rootContextInThisThread, map);
}
@Override
public void initMap(VanillaChronicleMap map) {<FILL_FUNCTION_BODY>}
@Override
public ReplicatedChronicleMap<K, V, R> m() {
return m;
}
@Override
public VanillaChronicleHash<K, ?, ?, ?> h() {
return m;
}
@Override
public ChronicleMap<K, V> map() {
return m;
}
@Override
public ChronicleSet<K> set() {
return m.chronicleSet;
}
public ChronicleHash<K, ?, ?, ?> hash() {
return set() != null ? set() : map();
}
} |
// alternative to this "unsafe" casting approach is proper generalization
// of Chaining/ChainingInterface, but this causes issues with current version
// of stage-compiler.
// TODO generalize Chaining with <M extends VanillaCM> when stage-compiler is improved.
//noinspection unchecked
m = (ReplicatedChronicleMap<K, V, R>) map;
| 350 | 97 | 447 | <methods>public void <init>(VanillaChronicleMap#RAW) ,public void <init>(net.openhft.chronicle.hash.impl.stage.hash.ChainingInterface, VanillaChronicleMap#RAW) ,public T contextAtIndexInChain(int) ,public T getContext(Class<? extends T>, BiFunction<net.openhft.chronicle.hash.impl.stage.hash.ChainingInterface,VanillaChronicleMap#RAW,T>, VanillaChronicleMap#RAW) ,public List<net.openhft.chronicle.hash.impl.stage.hash.ChainingInterface> getContextChain() ,public abstract void initMap(VanillaChronicleMap#RAW) ,public void initUsed(boolean, VanillaChronicleMap#RAW) ,public boolean usedInit() <variables>public final non-sealed List<net.openhft.chronicle.hash.impl.stage.hash.ChainingInterface> contextChain,private boolean firstContextLockedInThisThread,public final non-sealed int indexInContextChain,public final non-sealed net.openhft.chronicle.hash.impl.stage.hash.ChainingInterface rootContextInThisThread,public boolean used |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/impl/stage/map/VanillaChronicleMapHolderImpl.java | VanillaChronicleMapHolderImpl | initMap | class VanillaChronicleMapHolderImpl<K, V, R>
extends Chaining
implements VanillaChronicleMapHolder<K, V, R> {
@Stage("Map")
private VanillaChronicleMap<K, V, R> m = null;
public VanillaChronicleMapHolderImpl(VanillaChronicleMap map) {
super(map);
}
public VanillaChronicleMapHolderImpl(
ChainingInterface rootContextInThisThread, VanillaChronicleMap map) {
super(rootContextInThisThread, map);
}
@Override
public void initMap(VanillaChronicleMap map) {<FILL_FUNCTION_BODY>}
@Override
public VanillaChronicleMap<K, V, R> m() {
return m;
}
@Override
public VanillaChronicleHash<K, ?, ?, ?> h() {
return m;
}
@Override
public ChronicleMap<K, V> map() {
return m;
}
@Override
public ChronicleSet<K> set() {
return m.chronicleSet;
}
public ChronicleHash<K, ?, ?, ?> hash() {
return set() != null ? set() : map();
}
} |
// alternative to this "unsafe" casting approach is proper generalization
// of Chaining/ChainingInterface, but this causes issues with current version
// of stage-compiler.
// TODO generalize Chaining with <M extends VanillaCM> when stage-compiler is improved.
//noinspection unchecked
m = map;
| 344 | 81 | 425 | <methods>public void <init>(VanillaChronicleMap#RAW) ,public void <init>(net.openhft.chronicle.hash.impl.stage.hash.ChainingInterface, VanillaChronicleMap#RAW) ,public T contextAtIndexInChain(int) ,public T getContext(Class<? extends T>, BiFunction<net.openhft.chronicle.hash.impl.stage.hash.ChainingInterface,VanillaChronicleMap#RAW,T>, VanillaChronicleMap#RAW) ,public List<net.openhft.chronicle.hash.impl.stage.hash.ChainingInterface> getContextChain() ,public abstract void initMap(VanillaChronicleMap#RAW) ,public void initUsed(boolean, VanillaChronicleMap#RAW) ,public boolean usedInit() <variables>public final non-sealed List<net.openhft.chronicle.hash.impl.stage.hash.ChainingInterface> contextChain,private boolean firstContextLockedInThisThread,public final non-sealed int indexInContextChain,public final non-sealed net.openhft.chronicle.hash.impl.stage.hash.ChainingInterface rootContextInThisThread,public boolean used |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/impl/stage/map/WrappedValueBytesDataAccess.java | WrappedValueBytesDataAccess | wrapValueBytesAsData | class WrappedValueBytesDataAccess<K, V, R> implements MapContext<K, V, R> {
@StageRef
CheckOnEachPublicOperation checkOnEachPublicOperation;
@StageRef
WrappedValueBytesData<V> wrappedValueBytesData;
@Override
public Data<V> wrapValueBytesAsData(BytesStore bytesStore, long offset, long size) {<FILL_FUNCTION_BODY>}
} |
Objects.requireNonNull(bytesStore);
checkOnEachPublicOperation.checkOnEachPublicOperation();
WrappedValueBytesData<V> wrapped = this.wrappedValueBytesData;
wrapped = wrapped.getUnusedWrappedValueBytesData();
wrapped.initWrappedValueBytesStore(bytesStore, offset, size);
return wrapped;
| 111 | 87 | 198 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/impl/stage/map/WrappedValueInstanceDataHolderAccess.java | WrappedValueInstanceDataHolderAccess | wrapValueAsData | class WrappedValueInstanceDataHolderAccess<K, V, R>
implements MapContext<K, V, R>, SetContext<K, R> {
@StageRef
CheckOnEachPublicOperation checkOnEachPublicOperation;
@StageRef
WrappedValueInstanceDataHolder<V> wrappedValueInstanceDataHolder;
@Override
public Data<V> wrapValueAsData(V value) {<FILL_FUNCTION_BODY>}
} |
checkOnEachPublicOperation.checkOnEachPublicOperation();
WrappedValueInstanceDataHolder<V> wrapped = this.wrappedValueInstanceDataHolder;
wrapped = wrapped.getUnusedWrappedValueHolder();
wrapped.initValue(value);
return wrapped.wrappedData;
| 115 | 71 | 186 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/impl/stage/query/MapAbsent.java | MapAbsent | doInsert | class MapAbsent<K, V> implements Absent<K, V> {
@StageRef
public KeySearch<K> ks;
@StageRef
public HashLookupSearch hashLookupSearch;
@StageRef
public CheckOnEachPublicOperation checkOnEachPublicOperation;
@StageRef
public SegmentStages s;
@StageRef
MapQuery<K, V, ?> q;
@StageRef
MapEntryStages<K, V> e;
@StageRef
VanillaChronicleMapHolder<K, V, ?> mh;
void putEntry(Data<V> value) {
assert ks.searchStateAbsent();
long entrySize = e.entrySize(ks.inputKey.size(), value.size());
q.allocatedChunks.initEntryAndKey(entrySize);
e.initValue(value);
e.freeExtraAllocatedChunks();
hashLookupSearch.putNewVolatile(e.pos);
}
@NotNull
@Override
public MapQuery<K, V, ?> context() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return q;
}
@NotNull
@Override
public Data<K> absentKey() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return ks.inputKey;
}
@Override
public void doInsert(Data<V> value) {<FILL_FUNCTION_BODY>}
@Override
public void doInsert() {
if (mh.set() == null)
throw new IllegalStateException(mh.h().toIdentityString() +
": Called SetAbsentEntry.doInsert() from Map context");
//noinspection unchecked
doInsert((Data<V>) DummyValueData.INSTANCE);
}
} |
q.putPrefix();
if (!q.entryPresent()) {
putEntry(value);
s.incrementModCount();
ks.setSearchState(PRESENT);
q.initPresenceOfEntry(EntryPresence.PRESENT);
} else {
throw new IllegalStateException(mh.h().toIdentityString() +
": Entry is present in the map when doInsert() is called");
}
| 471 | 110 | 581 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/impl/stage/query/MapQuery.java | MapQuery | doReplaceValue | class MapQuery<K, V, R> extends HashQuery<K>
implements MapEntry<K, V>, ExternalMapQueryContext<K, V, R>,
ExternalSetQueryContext<K, R>, QueryContextInterface<K, V, R>, MapAndSetContext<K, V, R> {
@StageRef
public AcquireHandle<K, V> acquireHandle;
@StageRef
public DefaultReturnValue<V> defaultReturnValue;
@StageRef
public UsingReturnValue<V> usingReturnValue;
@StageRef
public MapAbsent<K, V> absent;
@StageRef
VanillaChronicleMapHolder<K, V, R> mh;
final DataAccess<V> innerInputValueDataAccess = mh.m().valueDataAccess.copy();
@StageRef
MapEntryStages<K, V> e;
@StageRef
SearchAllocatedChunks allocatedChunks;
@StageRef
KeySearch<K> ks;
@StageRef
InputKeyBytesData<K> inputKeyBytesData;
@Stage("InputValueDataAccess")
private boolean inputValueDataAccessInitialized = false;
void initInputValueDataAccess() {
inputValueDataAccessInitialized = true;
}
void closeInputValueDataAccess() {
innerInputValueDataAccess.uninit();
inputValueDataAccessInitialized = false;
}
@Override
public DataAccess<V> inputValueDataAccess() {
initInputValueDataAccess();
return innerInputValueDataAccess;
}
@Override
public MapQuery<K, V, R> entry() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return entryPresent() ? this : null;
}
@Nullable
@Override
public Absent<K, V> absentEntry() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return entryPresent() ? null : absent;
}
protected void putPrefix() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
if (!s.innerUpdateLock.isHeldByCurrentThread())
s.innerUpdateLock.lock();
if (s.nestedContextsLockedOnSameSegment &&
s.rootContextLockedOnThisSegment.latestSameThreadSegmentModCount() !=
s.contextModCount) {
if (hlp.hashLookupPosInit() && ks.searchStateAbsent())
hlp.closeHashLookupPos();
}
}
@Override
public void doReplaceValue(Data<V> newValue) {<FILL_FUNCTION_BODY>}
@NotNull
@Override
public MapQuery<K, V, R> context() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return this;
}
@Override
public Data<K> getInputKeyBytesAsData(BytesStore bytesStore, long offset, long size) {
inputKeyBytesData.initInputKeyBytesStore(bytesStore, offset, size);
return inputKeyBytesData;
}
} |
putPrefix();
if (entryPresent()) {
e.innerDefaultReplaceValue(newValue);
s.incrementModCount();
ks.setSearchState(PRESENT);
initPresenceOfEntry(EntryPresence.PRESENT);
} else {
throw new IllegalStateException(mh.h().toIdentityString() +
": Entry is absent in the map when doReplaceValue() is called");
}
| 778 | 110 | 888 | <methods>public non-sealed void <init>() ,public void doRemove() ,public void dropSearchIfNestedContextsAndPresentHashLookupSlotCheckFailed() ,public boolean entryPresent() ,public void initPresenceOfEntry(net.openhft.chronicle.hash.impl.stage.query.HashQuery.EntryPresence) ,public DataAccess<K> inputKeyDataAccess() ,public Data<K> queriedKey() <variables>public net.openhft.chronicle.hash.impl.stage.hash.CheckOnEachPublicOperation checkOnEachPublicOperation,public HashEntryStages<K> entry,private net.openhft.chronicle.hash.impl.stage.query.HashQuery.EntryPresence entryPresence,public net.openhft.chronicle.hash.impl.stage.entry.HashLookupSearch hashLookupSearch,public VanillaChronicleHashHolder<K> hh,public net.openhft.chronicle.hash.impl.stage.entry.HashLookupPos hlp,final DataAccess<K> innerInputKeyDataAccess,private boolean inputKeyDataAccessInitialized,public KeySearch<K> ks,public net.openhft.chronicle.hash.impl.stage.entry.SegmentStages s |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/impl/stage/query/ReplicatedMapAbsent.java | ReplicatedMapAbsent | doInsert | class ReplicatedMapAbsent<K, V> extends MapAbsent<K, V> {
@StageRef
MapQuery<K, V, ?> q;
@StageRef
ReplicatedMapEntryStages<K, V> e;
@StageRef
ReplicationUpdate<K> ru;
@NotNull
@Override
public Data<K> absentKey() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return e.entryKey;
}
@Override
public void doInsert(Data<V> value) {<FILL_FUNCTION_BODY>}
} |
q.putPrefix();
if (!q.entryPresent()) {
if (!ks.searchStatePresent()) {
putEntry(value);
e.updatedReplicationStateOnAbsentEntry();
ks.setSearchState(PRESENT);
q.initPresenceOfEntry(EntryPresence.PRESENT);
} else {
s.tierDeleted(s.tierDeleted() - 1);
e.innerDefaultReplaceValue(value);
e.updatedReplicationStateOnPresentEntry();
}
s.incrementModCount();
e.writeEntryPresent();
ru.updateChange();
} else {
throw new IllegalStateException(mh.h().toIdentityString() +
": Entry is present in the map when doInsert() is called");
}
| 159 | 200 | 359 | <methods>public non-sealed void <init>() ,public Data<K> absentKey() ,public MapQuery<K,V,?> context() ,public void doInsert(Data<V>) ,public void doInsert() <variables>public net.openhft.chronicle.hash.impl.stage.hash.CheckOnEachPublicOperation checkOnEachPublicOperation,MapEntryStages<K,V> e,public net.openhft.chronicle.hash.impl.stage.entry.HashLookupSearch hashLookupSearch,public KeySearch<K> ks,VanillaChronicleMapHolder<K,V,?> mh,MapQuery<K,V,?> q,public net.openhft.chronicle.hash.impl.stage.entry.SegmentStages s |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/impl/stage/query/ReplicatedMapQuery.java | ReplicatedMapQuery | absentEntry | class ReplicatedMapQuery<K, V, R> extends MapQuery<K, V, R>
implements MapRemoteQueryContext<K, V, R>, SetRemoteQueryContext<K, R>,
ReplicableEntry, MapReplicableEntry<K, V>, SetReplicableEntry<K> {
@StageRef
ReplicatedMapEntryStages<K, V> e;
@StageRef
ReplicationUpdate ru;
@StageRef
ReplicatedMapAbsentDelegating<K, V> absentDelegating;
@StageRef
DummyValueZeroData<V> dummyValue;
@Nullable
@Override
public Absent<K, V> absentEntry() {<FILL_FUNCTION_BODY>}
@Override
public boolean entryPresent() {
return super.entryPresent() && !e.entryDeleted();
}
@Override
public ReplicatedMapQuery<K, V, R> entry() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return entryPresent() ? this : null;
}
@Override
public void doRemove() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
s.innerUpdateLock.lock();
if (entryPresent()) {
if (e.valueSize > dummyValue.size())
e.innerDefaultReplaceValue(dummyValue);
e.updatedReplicationStateOnPresentEntry();
e.writeEntryDeleted();
ru.updateChange();
s.tierDeleted(s.tierDeleted() + 1);
} else {
throw new IllegalStateException(mh.h().toIdentityString() +
": Entry is absent in the map when doRemove() is called");
}
}
@Override
public void doRemoveCompletely() {
boolean wasDeleted = e.entryDeleted();
super.doRemove();
ru.dropChange();
if (wasDeleted)
s.tierDeleted(s.tierDeleted() - 1L);
}
@Override
public void doReplaceValue(Data<V> newValue) {
super.doReplaceValue(newValue);
e.updatedReplicationStateOnPresentEntry();
ru.updateChange();
}
} |
checkOnEachPublicOperation.checkOnEachPublicOperation();
if (entryPresent()) {
return null;
} else {
if (!ks.searchStatePresent()) {
return absentDelegating;
} else {
assert e.entryDeleted();
return absent;
}
}
| 575 | 79 | 654 | <methods>public non-sealed void <init>() ,public Absent<K,V> absentEntry() ,public MapQuery<K,V,R> context() ,public void doReplaceValue(Data<V>) ,public MapQuery<K,V,R> entry() ,public Data<K> getInputKeyBytesAsData(BytesStore#RAW, long, long) ,public DataAccess<V> inputValueDataAccess() <variables>public MapAbsent<K,V> absent,public AcquireHandle<K,V> acquireHandle,net.openhft.chronicle.hash.impl.stage.query.SearchAllocatedChunks allocatedChunks,public DefaultReturnValue<V> defaultReturnValue,MapEntryStages<K,V> e,final DataAccess<V> innerInputValueDataAccess,InputKeyBytesData<K> inputKeyBytesData,private boolean inputValueDataAccessInitialized,KeySearch<K> ks,VanillaChronicleMapHolder<K,V,R> mh,public UsingReturnValue<V> usingReturnValue |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/impl/stage/replication/ReplicatedQueryAlloc.java | ReplicatedQueryAlloc | alloc | class ReplicatedQueryAlloc extends QueryAlloc {
final CleanupAction cleanupAction = new CleanupAction();
@StageRef
ReplicatedChronicleMapHolder<?, ?, ?> mh;
@StageRef
SegmentStages s;
/**
* Returns {@code true} if at least one old deleted entry was removed.
*
* @param prevPos position to skip during cleanup (because cleaned up separately)
*/
public boolean forcedOldDeletedEntriesCleanup(long prevPos) {
ReplicatedChronicleMap<?, ?, ?> map = mh.m();
if (!map.cleanupRemovedEntries)
return false;
try (MapSegmentContext<?, ?, ?> sc = map.segmentContext(s.segmentIndex)) {
cleanupAction.removedCompletely = 0;
cleanupAction.posToSkip = prevPos;
cleanupAction.iterationContext = (IterationContext<?, ?, ?>) sc;
((ReplicatedHashSegmentContext<?, ?>) sc)
.forEachSegmentReplicableEntry(cleanupAction);
return cleanupAction.removedCompletely > 0;
}
}
@Override
public long alloc(int chunks, long prevPos, int prevChunks) {<FILL_FUNCTION_BODY>}
private class CleanupAction implements Consumer<ReplicableEntry> {
int removedCompletely;
long posToSkip;
IterationContext<?, ?, ?> iterationContext;
@Override
public void accept(ReplicableEntry e) {
ReplicatedChronicleMap<?, ?, ?> map = mh.m();
if (!(e instanceof MapAbsentEntry) || iterationContext.pos() == posToSkip)
return;
long currentTime = currentTime();
if (e.originTimestamp() > currentTime)
return; // presumably unsynchronized clocks
long deleteTimeout = systemTimeIntervalBetween(
e.originTimestamp(), currentTime, map.cleanupTimeoutUnit);
if (deleteTimeout <= map.cleanupTimeout || e.isChanged())
return;
e.doRemoveCompletely();
removedCompletely++;
}
}
} |
long ret = s.allocReturnCode(chunks);
if (ret >= 0) {
if (prevPos >= 0)
s.free(prevPos, prevChunks);
return ret;
}
int firstAttemptedTier = s.tier;
long firstAttemptedTierIndex = s.tierIndex;
long firstAttemptedTierBaseAddr = s.tierBaseAddr;
boolean cleanedFirstAttemptedTier = forcedOldDeletedEntriesCleanup(prevPos);
if (cleanedFirstAttemptedTier) {
// Force recalculation of some properties as new slots may
// have become available and there might be "holes"
// created by removed entries.
((CompiledReplicatedMapQueryContext) (Object) this).closeSearchKey();
}
s.goToFirstTier();
while (true) {
boolean visitingFirstAttemptedTier = s.tier == firstAttemptedTier;
if (cleanedFirstAttemptedTier || !visitingFirstAttemptedTier) {
ret = s.allocReturnCode(chunks);
if (ret >= 0) {
if (prevPos >= 0) {
if (visitingFirstAttemptedTier) {
s.free(prevPos, prevChunks);
} else if (s.tier < firstAttemptedTier) {
int currentTier = s.tier;
long currentTierIndex = s.tierIndex;
long currentTierBaseAddr = s.tierBaseAddr;
s.initSegmentTier(firstAttemptedTier, firstAttemptedTierIndex,
firstAttemptedTierBaseAddr);
s.free(prevPos, prevChunks);
s.initSegmentTier(currentTier, currentTierIndex, currentTierBaseAddr);
}
}
return ret;
}
}
if (visitingFirstAttemptedTier && prevPos >= 0)
s.free(prevPos, prevChunks);
s.nextTier();
}
| 562 | 522 | 1,084 | <methods>public non-sealed void <init>() ,public long alloc(int, long, int) <variables>public net.openhft.chronicle.hash.impl.stage.entry.SegmentStages s |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/impl/stage/replication/ReplicationUpdate.java | ReplicationUpdate | initReplicationUpdate | class ReplicationUpdate<K> implements RemoteOperationContext<K> {
@Stage("ReplicationUpdate")
public byte innerRemoteIdentifier = (byte) 0;
@Stage("ReplicationUpdate")
public long innerRemoteTimestamp;
@Stage("ReplicationUpdate")
public byte innerRemoteNodeIdentifier;
@StageRef
SegmentStages s;
@StageRef
ReplicatedMapEntryStages<K, ?> e;
@StageRef
ReplicatedChronicleMapHolder<?, ?, ?> mh;
@StageRef
CheckOnEachPublicOperation checkOnEachPublicOperation;
public abstract boolean replicationUpdateInit();
public void initReplicationUpdate(byte identifier, long timestamp, byte remoteNodeIdentifier) {<FILL_FUNCTION_BODY>}
public void dropChange() {
mh.m().dropChange(s.tierIndex, e.pos);
}
public void dropChangeFor(byte remoteIdentifier) {
mh.m().dropChangeFor(s.tierIndex, e.pos, remoteIdentifier);
}
public void moveChange(long oldTierIndex, long oldPos, long newPos) {
mh.m().moveChange(oldTierIndex, oldPos, s.tierIndex, newPos);
}
public void updateChange() {
if (!replicationUpdateInit()) {
raiseChange();
}
}
public void raiseChange() {
mh.m().raiseChange(s.tierIndex, e.pos);
}
public void raiseChangeFor(byte remoteIdentifier) {
mh.m().raiseChangeFor(s.tierIndex, e.pos, remoteIdentifier);
}
public void raiseChangeForAllExcept(byte remoteIdentifier) {
mh.m().raiseChangeForAllExcept(s.tierIndex, e.pos, remoteIdentifier);
}
public boolean changed() {
return mh.m().isChanged(s.tierIndex, e.pos);
}
@Override
public long remoteTimestamp() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return innerRemoteTimestamp;
}
@Override
public byte remoteIdentifier() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return innerRemoteIdentifier;
}
@Override
public byte remoteNodeIdentifier() {
checkOnEachPublicOperation.checkOnEachPublicOperation();
return innerRemoteNodeIdentifier;
}
@Override
public byte currentNodeIdentifier() {
return mh.m().identifier();
}
} |
innerRemoteTimestamp = timestamp;
if (identifier == 0)
throw new IllegalStateException(mh.h().toIdentityString() + ": identifier can't be 0");
innerRemoteIdentifier = identifier;
if (remoteNodeIdentifier == 0) {
throw new IllegalStateException(
mh.h().toIdentityString() + ": remote node identifier can't be 0");
}
innerRemoteNodeIdentifier = remoteNodeIdentifier;
| 651 | 112 | 763 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/impl/stage/ret/DefaultReturnValue.java | DefaultReturnValue | returnValue | class DefaultReturnValue<V> implements InstanceReturnValue<V> {
private V defaultReturnedValue = null;
abstract boolean defaultReturnedValueInit();
private void initDefaultReturnedValue(@NotNull Data<V> value) {
defaultReturnedValue = value.getUsing(null);
}
@Override
public void returnValue(@NotNull Data<V> value) {
initDefaultReturnedValue(value);
}
@Override
public V returnValue() {<FILL_FUNCTION_BODY>}
} |
if (defaultReturnedValueInit()) {
return defaultReturnedValue;
} else {
return null;
}
| 135 | 35 | 170 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/impl/stage/ret/UsingReturnValue.java | UsingReturnValue | returnValue | class UsingReturnValue<V> implements UsableReturnValue<V> {
private V usingReturnValue = (V) USING_RETURN_VALUE_UNINIT;
private V returnedValue = null;
@Override
public void initUsingReturnValue(V usingReturnValue) {
this.usingReturnValue = usingReturnValue;
}
abstract boolean returnedValueInit();
private void initReturnedValue(@NotNull Data<V> value) {
returnedValue = value.getUsing(usingReturnValue);
}
@Override
public void returnValue(@NotNull Data<V> value) {
initReturnedValue(value);
}
@Override
public V returnValue() {<FILL_FUNCTION_BODY>}
} |
if (returnedValueInit()) {
return returnedValue;
} else {
return null;
}
| 188 | 32 | 220 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/map/internal/InternalAssertUtil.java | InternalAssertUtil | assertAddress | class InternalAssertUtil {
private static final boolean IS_64_BIT = Jvm.is64bit();
// Suppresses default constructor, ensuring non-instantiability.
private InternalAssertUtil() {
}
public static boolean assertAddress(final long address) {<FILL_FUNCTION_BODY>}
public static boolean assertPosition(final long position) {
assert position >= 0 : "position is negative: " + position;
return true;
}
} |
if (Jvm.is64bit()) {
// It is highly unlikely that we would ever address farther than 2^63
assert address > 0 : "address is non positive: " + address;
} else {
// These memory addresses are illegal on a 32-bit machine
assert address != 0 && address != -1 : "address is illegal: " + address;
}
return true;
| 123 | 104 | 227 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/set/ChronicleSetBuilderPrivateAPI.java | ChronicleSetBuilderPrivateAPI | remoteOperations | class ChronicleSetBuilderPrivateAPI<K>
implements ChronicleHashBuilderPrivateAPI<K, SetRemoteOperations<K, ?>> {
private final ChronicleHashBuilderPrivateAPI<K, MapRemoteOperations<K, DummyValue, ?>> mapB;
public ChronicleSetBuilderPrivateAPI(
ChronicleHashBuilderPrivateAPI<K, MapRemoteOperations<K, DummyValue, ?>> mapB) {
this.mapB = mapB;
}
@Override
public String name() {
return mapB.name();
}
@Override
public SerializationBuilder<K> keyBuilder() {
return mapB.keyBuilder();
}
@Override
public int segmentEntrySpaceInnerOffset() {
return mapB.segmentEntrySpaceInnerOffset();
}
@Override
public long chunkSize() {
return mapB.chunkSize();
}
@Override
public int maxChunksPerEntry() {
return mapB.maxChunksPerEntry();
}
@Override
public long entriesPerSegment() {
return mapB.entriesPerSegment();
}
@Override
public long tierHashLookupCapacity() {
return mapB.tierHashLookupCapacity();
}
@Override
public long actualChunksPerSegmentTier() {
return mapB.actualChunksPerSegmentTier();
}
@Override
public int segmentHeaderSize() {
return mapB.segmentHeaderSize();
}
@Override
public int actualSegments() {
return mapB.actualSegments();
}
@Override
public long maxExtraTiers() {
return mapB.maxExtraTiers();
}
@Override
public boolean aligned64BitMemoryOperationsAtomic() {
return mapB.aligned64BitMemoryOperationsAtomic();
}
@Override
public boolean checksumEntries() {
return mapB.checksumEntries();
}
@Override
public void replication(byte identifier) {
mapB.replication(identifier);
}
@Override
public void cleanupRemovedEntries(boolean cleanupRemovedEntries) {
mapB.cleanupRemovedEntries(cleanupRemovedEntries);
}
@Override
public void removedEntryCleanupTimeout(long removedEntryCleanupTimeout, TimeUnit unit) {
mapB.removedEntryCleanupTimeout(removedEntryCleanupTimeout, unit);
}
@Override
public void remoteOperations(SetRemoteOperations<K, ?> remoteOperations) {<FILL_FUNCTION_BODY>}
@Override
public Runnable getPreShutdownAction() {
return mapB.getPreShutdownAction();
}
@Override
public boolean skipCloseOnExitHook() {
return mapB.skipCloseOnExitHook();
}
@Override
public boolean sparseFile() {
return mapB.sparseFile();
}
} |
mapB.remoteOperations(new MapRemoteOperations<K, DummyValue, Object>() {
@Override
public void remove(MapRemoteQueryContext<K, DummyValue, Object> q) {
//noinspection unchecked
remoteOperations.remove((SetRemoteQueryContext) q);
}
@Override
public void put(
MapRemoteQueryContext<K, DummyValue, Object> q, Data<DummyValue> newValue) {
//noinspection unchecked
remoteOperations.put((SetRemoteQueryContext) q);
}
});
| 765 | 147 | 912 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/chronicle/set/SetFromMap.java | SetFromMap | toIdentityString | class SetFromMap<E> extends AbstractSet<E> implements ChronicleSet<E> {
private final ChronicleMap<E, DummyValue> m; // The backing map
private transient Set<E> s; // Its keySet
SetFromMap(VanillaChronicleMap<E, DummyValue, ?> map) {
m = map;
map.chronicleSet = this;
s = map.keySet();
}
public void clear() {
throwExceptionIfClosed();
m.clear();
}
public int size() {
throwExceptionIfClosed();
return m.size();
}
public boolean isEmpty() {
throwExceptionIfClosed();
return m.isEmpty();
}
public boolean contains(Object o) {
throwExceptionIfClosed();
return m.containsKey(o);
}
public boolean remove(Object o) {
throwExceptionIfClosed();
return m.remove(o, DUMMY_VALUE);
}
public boolean add(E e) {
throwExceptionIfClosed();
return m.putIfAbsent(e, DUMMY_VALUE) == null;
}
@NotNull
public Iterator<E> iterator() {
throwExceptionIfClosed();
return s.iterator();
}
public Object[] toArray() {
throwExceptionIfClosed();
return s.toArray();
}
public <T> T[] toArray(T[] a) {
return s.toArray(a);
}
public String toString() {
return s.toString();
}
@NotNull
@Override
public String toIdentityString() {<FILL_FUNCTION_BODY>}
public int hashCode() {
throwExceptionIfClosed();
return s.hashCode();
}
public boolean equals(Object o) {
throwExceptionIfClosed();
return o == this || s.equals(o);
}
public boolean containsAll(@NotNull Collection<?> c) {
throwExceptionIfClosed();
return s.containsAll(c);
}
public boolean removeAll(@NotNull Collection<?> c) {
throwExceptionIfClosed();
return s.removeAll(c);
}
public boolean retainAll(@NotNull Collection<?> c) {
throwExceptionIfClosed();
return s.retainAll(c);
}
// addAll is the only inherited implementation
@Override
public long longSize() {
throwExceptionIfClosed();
return m.longSize();
}
@Override
public long offHeapMemoryUsed() {
throwExceptionIfClosed();
return m.offHeapMemoryUsed();
}
@Override
public Class<E> keyClass() {
throwExceptionIfClosed();
return m.keyClass();
}
@Override
public Type keyType() {
throwExceptionIfClosed();
return m.keyType();
}
// TODO test queryContext methods
@NotNull
@Override
public ExternalSetQueryContext<E, ?> queryContext(E key) {
//noinspection unchecked
return (ExternalSetQueryContext<E, ?>) m.queryContext(key);
}
@NotNull
@Override
public ExternalSetQueryContext<E, ?> queryContext(Data<E> key) {
//noinspection unchecked
return (ExternalSetQueryContext<E, ?>) m.queryContext(key);
}
@NotNull
@Override
public ExternalSetQueryContext<E, ?> queryContext(BytesStore keyBytes, long offset, long size) {
//noinspection unchecked
return (ExternalSetQueryContext<E, ?>) m.queryContext(keyBytes, offset, size);
}
@Override
public SetSegmentContext<E, ?> segmentContext(int segmentIndex) {
// TODO
throw new UnsupportedOperationException();
}
@Override
public int segments() {
throwExceptionIfClosed();
return m.segments();
}
// TODO test forEach methods
@Override
public boolean forEachEntryWhile(Predicate<? super SetEntry<E>> predicate) {
throwExceptionIfClosed();
Objects.requireNonNull(predicate);
return m.forEachEntryWhile(e -> predicate.test(((SetEntry<E>) e)));
}
@Override
public void forEachEntry(Consumer<? super SetEntry<E>> action) {
throwExceptionIfClosed();
Objects.requireNonNull(action);
m.forEachEntry(e -> action.accept(((SetEntry<E>) e)));
}
@Override
public File file() {
throwExceptionIfClosed();
return m.file();
}
@Override
public String name() {
throwExceptionIfClosed();
return m.name();
}
@Override
public void close() {
m.close();
}
@Override
public boolean isOpen() {
throwExceptionIfClosed();
return m.isOpen();
}
} |
throwExceptionIfClosed();
return "ChronicleSet{" +
"name=" + name() +
", file=" + file() +
", identityHashCode=" + System.identityHashCode(this) +
"}";
| 1,340 | 65 | 1,405 | <methods>public boolean equals(java.lang.Object) ,public int hashCode() ,public boolean removeAll(Collection<?>) <variables> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/xstream/converters/AbstractChronicleMapConverter.java | AbstractChronicleMapConverter | unmarshal | class AbstractChronicleMapConverter<K, V> implements Converter {
private final Map<K, V> map;
private final Class mapClazz;
AbstractChronicleMapConverter(@NotNull Map<K, V> map) {
this.map = map;
this.mapClazz = map.getClass();
}
private static <E> E deserialize(@NotNull UnmarshallingContext unmarshallingContext,
@NotNull HierarchicalStreamReader reader) {
switch (reader.getNodeName()) {
case "java.util.Collections$EmptySet":
return (E) Collections.emptySet();
case "java.util.Collections$EmptyList":
return (E) Collections.emptyList();
case "java.util.Collections$EmptyMap":
case "java.util.Collections.EmptyMap":
return (E) Collections.emptyMap();
}
return (E) unmarshallingContext.convertAnother(null, forName(reader.getNodeName()));
}
private static Class forName(String clazz) {
try {
return Class.forName(clazz);
} catch (ClassNotFoundException e) {
boolean isNative = clazz.endsWith($$NATIVE);
boolean isHeap = clazz.endsWith($$HEAP);
if (!isNative && !isHeap)
throw new ConversionException("class=" + clazz, e);
final String nativeInterface = isNative ?
clazz.substring(0, clazz.length() - $$NATIVE.length()) :
clazz.substring(0, clazz.length() - $$HEAP.length());
try {
Values.newNativeReference(Class.forName(clazz));
return Class.forName(nativeInterface);
} catch (Exception e1) {
throw new ConversionException("class=" + clazz, e1);
}
}
}
@Override
public boolean canConvert(Class aClass) {
//noinspection unchecked
return mapClazz.isAssignableFrom(aClass);
}
@Override
public void marshal(Object o, HierarchicalStreamWriter writer, MarshallingContext
marshallingContext) {
for (Map.Entry e : (Iterable<Map.Entry>) ((Map) o).entrySet()) {
writer.startNode("entry");
{
final Object key = e.getKey();
writer.startNode(key.getClass().getName());
marshallingContext.convertAnother(key);
writer.endNode();
Object value = e.getValue();
writer.startNode(value.getClass().getName());
marshallingContext.convertAnother(value);
writer.endNode();
}
writer.endNode();
}
}
@Override
public Object unmarshal(HierarchicalStreamReader reader,
UnmarshallingContext context) {<FILL_FUNCTION_BODY>}
} |
// empty map
if ("[\"\"]".equals(reader.getValue()))
return null;
if (!"cmap".equals(reader.getNodeName()))
throw new ConversionException("should be under 'cmap' node");
reader.moveDown();
while (reader.hasMoreChildren()) {
reader.moveDown();
final String nodeName0 = reader.getNodeName();
if (!nodeName0.equals("entry"))
throw new ConversionException("unable to convert node named=" + nodeName0);
final K k;
final V v;
reader.moveDown();
k = deserialize(context, reader);
reader.moveUp();
reader.moveDown();
v = deserialize(context, reader);
reader.moveUp();
if (k != null)
map.put(k, v);
reader.moveUp();
}
reader.moveUp();
return null;
| 754 | 245 | 999 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/xstream/converters/ByteBufferConverter.java | ByteBufferConverter | unmarshal | class ByteBufferConverter implements Converter {
private final Charset charset = Charset.forName("ISO-8859-1");
private final CharsetDecoder decoder = charset.newDecoder();
@Override
public void marshal(Object o, HierarchicalStreamWriter writer, MarshallingContext marshallingContext) {
ByteBuffer buffer = (ByteBuffer) o;
writer.startNode("position");
marshallingContext.convertAnother(buffer.position());
writer.endNode();
writer.startNode("capacity");
marshallingContext.convertAnother(buffer.capacity());
writer.endNode();
writer.startNode("limit");
marshallingContext.convertAnother(buffer.limit());
writer.endNode();
writer.startNode("isDirect");
marshallingContext.convertAnother(buffer.isDirect());
writer.endNode();
buffer.limit();
buffer.capacity();
int position = buffer.position();
int limit = buffer.limit();
buffer.clear();
writer.startNode("data");
try {
CharBuffer charBuffer = decoder.decode(buffer);
writer.setValue(charBuffer.toString());
} catch (CharacterCodingException e) {
throw new ConversionException("", e);
}
writer.endNode();
buffer.limit(limit);
buffer.position(position);
}
@Override
public Object unmarshal(HierarchicalStreamReader reader, UnmarshallingContext unmarshallingContext) {<FILL_FUNCTION_BODY>}
@Override
public boolean canConvert(Class aClass) {
return ByteBuffer.class.isAssignableFrom(aClass);
}
} |
reader.moveDown();
int position = (Integer) unmarshallingContext.convertAnother(null, int.class);
reader.moveUp();
reader.moveDown();
int capacity = (Integer) unmarshallingContext.convertAnother(null, int.class);
reader.moveUp();
reader.moveDown();
int limit = (Integer) unmarshallingContext.convertAnother(null, int.class);
reader.moveUp();
reader.moveDown();
boolean isDirect = (Boolean) unmarshallingContext.convertAnother(null, boolean.class);
reader.moveUp();
ByteBuffer buffer = isDirect ? ByteBuffer.allocateDirect(capacity) : ByteBuffer.allocate(capacity);
buffer.clear();
reader.moveDown();
String o = (String) unmarshallingContext.convertAnother(null, String.class);
CharBuffer uCharBuffer = CharBuffer.wrap(o);
CharsetEncoder encoder = charset.newEncoder();
CoderResult encode = encoder.encode(uCharBuffer, buffer, true);
if (encode.isError())
throw new ConversionException("");
buffer.limit(limit);
buffer.position(position);
reader.moveUp();
buffer.limit(limit);
buffer.position(position);
return buffer;
| 439 | 338 | 777 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/xstream/converters/CharSequenceConverter.java | CharSequenceConverter | unmarshal | class CharSequenceConverter implements Converter {
@Override
public void marshal(
Object source, HierarchicalStreamWriter writer, MarshallingContext context) {
writer.setValue(source.toString());
}
@Override
public Object unmarshal(HierarchicalStreamReader reader, UnmarshallingContext context) {<FILL_FUNCTION_BODY>}
@Override
public boolean canConvert(Class type) {
return CharSequence.class.isAssignableFrom(type);
}
} |
if (context.getRequiredType() == StringBuilder.class) {
return new StringBuilder(reader.getValue());
} else {
return reader.getValue();
}
| 130 | 46 | 176 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/xstream/converters/ValueConverter.java | ValueConverter | unmarshal | class ValueConverter implements Converter {
@Override
public void marshal(Object o, HierarchicalStreamWriter writer, MarshallingContext context) {
ValueModel valueModel = ValueModel.acquire(o.getClass());
valueModel.fields().forEach(fieldModel -> {
if (fieldModel instanceof ArrayFieldModel) {
try {
final Method indexedGet = fieldModel.getOrGetVolatile();
indexedGet.setAccessible(true);
writer.startNode(fieldModel.name());
for (int i = 0; i < ((ArrayFieldModel) fieldModel).array().length(); i++) {
writer.startNode(Integer.toString(i));
context.convertAnother(indexedGet.invoke(o, i));
writer.endNode();
}
writer.endNode();
} catch (IllegalAccessException | InvocationTargetException e) {
throw new ConversionException("", e);
}
return;
}
try {
final Method get = fieldModel.getOrGetVolatile();
get.setAccessible(true);
final Object value = get.invoke(o);
writer.startNode(fieldModel.name());
context.convertAnother(value);
writer.endNode();
} catch (Exception e) {
Jvm.error().on(getClass(), "class=" + fieldModel.name(), e);
}
});
}
@Override
public Object unmarshal(HierarchicalStreamReader reader, UnmarshallingContext context) {<FILL_FUNCTION_BODY>}
private void fillInObject(HierarchicalStreamReader reader, UnmarshallingContext context,
ValueModel valueModel, Object using) throws ClassNotFoundException {
while (reader.hasMoreChildren()) {
reader.moveDown();
final String name = reader.getNodeName();
FieldModel fieldModel =
valueModel.fields().filter(f -> f.name().equals(name)).findAny().orElseThrow(() -> new NoSuchElementException("No element with name " + name));
if (fieldModel instanceof ArrayFieldModel) {
while (reader.hasMoreChildren()) {
reader.moveDown();
try {
String index = reader.getNodeName();
int i = Integer.parseInt(index);
Method indexedSet = fieldModel.setOrSetOrderedOrSetVolatile();
indexedSet.setAccessible(true);
Class<?>[] parameterTypes = indexedSet.getParameterTypes();
Object value = context.convertAnother(null, parameterTypes[1]);
indexedSet.invoke(using, i, value);
} catch (Exception e) {
throw new ConversionException("", e);
}
reader.moveUp();
}
reader.moveUp();
continue;
}
Method set = fieldModel.setOrSetOrderedOrSetVolatile();
set.setAccessible(true);
final Class<?>[] parameterTypes = set.getParameterTypes();
final Object value = context.convertAnother(null, parameterTypes[0]);
try {
set.invoke(using, value);
} catch (Exception e) {
throw new ConversionException("", e);
}
reader.moveUp();
}
}
@Override
public boolean canConvert(Class clazz) {
return Values.isValueInterfaceOrImplClass(clazz);
}
} |
try {
ValueModel valueModel = ValueModel.acquire(context.getRequiredType());
Object result = valueModel.heapClass().newInstance();
fillInObject(reader, context, valueModel, result);
return result;
} catch (Exception e) {
throw new ConversionException(
"class=" + context.getRequiredType().getCanonicalName(), e);
}
| 865 | 101 | 966 | <no_super_class> |
OpenHFT_Chronicle-Map | Chronicle-Map/src/main/java/net/openhft/xstream/converters/VanillaChronicleMapConverter.java | VanillaChronicleMapConverter | marshal | class VanillaChronicleMapConverter<K, V> extends AbstractChronicleMapConverter<K, V> {
public VanillaChronicleMapConverter(@NotNull Map<K, V> map) {
super(map);
}
@Override
public void marshal(Object o, final HierarchicalStreamWriter writer, final MarshallingContext
marshallingContext) {<FILL_FUNCTION_BODY>}
} |
((ChronicleMap<K, V>) o).forEachEntry(e -> {
writer.startNode("entry");
{
final Object key = e.key().get();
writer.startNode(key.getClass().getName());
marshallingContext.convertAnother(key);
writer.endNode();
Object value = e.value().get();
writer.startNode(value.getClass().getName());
marshallingContext.convertAnother(value);
writer.endNode();
}
writer.endNode();
});
| 107 | 140 | 247 | <methods>public boolean canConvert(Class#RAW) ,public void marshal(java.lang.Object, com.thoughtworks.xstream.io.HierarchicalStreamWriter, com.thoughtworks.xstream.converters.MarshallingContext) ,public java.lang.Object unmarshal(com.thoughtworks.xstream.io.HierarchicalStreamReader, com.thoughtworks.xstream.converters.UnmarshallingContext) <variables>private final non-sealed Map<K,V> map,private final non-sealed Class#RAW mapClazz |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/ChronicleHistoryReaderMain.java | ChronicleHistoryReaderMain | options | class ChronicleHistoryReaderMain {
public static void main(@NotNull String[] args) {
new ChronicleHistoryReaderMain().run(args);
}
protected void run(String[] args) {
final Options options = options();
final CommandLine commandLine = parseCommandLine(args, options);
try (final ChronicleHistoryReader chronicleHistoryReader = chronicleHistoryReader()) {
setup(commandLine, chronicleHistoryReader);
chronicleHistoryReader.execute();
}
}
protected void setup(@NotNull final CommandLine commandLine, @NotNull final ChronicleHistoryReader chronicleHistoryReader) {
chronicleHistoryReader.
withMessageSink(System.out::println).
withProgress(commandLine.hasOption('p')).
withHistosByMethod(commandLine.hasOption('m')).
withBasePath(Paths.get(commandLine.getOptionValue('d')));
if (commandLine.hasOption('t'))
chronicleHistoryReader.withTimeUnit(TimeUnit.valueOf(commandLine.getOptionValue('t')));
if (commandLine.hasOption('i'))
chronicleHistoryReader.withIgnore(Long.parseLong(commandLine.getOptionValue('i')));
if (commandLine.hasOption('w'))
chronicleHistoryReader.withMeasurementWindow(Long.parseLong(commandLine.getOptionValue('w')));
if (commandLine.hasOption('u'))
chronicleHistoryReader.withSummaryOutput(Integer.parseInt(commandLine.getOptionValue('u')));
}
@NotNull
protected ChronicleHistoryReader chronicleHistoryReader() {
return new ChronicleHistoryReader();
}
protected CommandLine parseCommandLine(@NotNull final String[] args, final Options options) {
final CommandLineParser parser = new DefaultParser();
CommandLine commandLine = null;
try {
commandLine = parser.parse(options, args);
if (commandLine.hasOption('h')) {
printHelpAndExit(options, 0);
}
} catch (ParseException e) {
printHelpAndExit(options, 1, e.getMessage());
}
return commandLine;
}
protected void printHelpAndExit(final Options options, int status) {
printHelpAndExit(options, status, null);
}
protected void printHelpAndExit(final Options options, int status, String message) {
final PrintWriter writer = new PrintWriter(System.out);
new HelpFormatter().printHelp(
writer,
180,
this.getClass().getSimpleName(),
message,
options,
HelpFormatter.DEFAULT_LEFT_PAD,
HelpFormatter.DEFAULT_DESC_PAD,
null,
true
);
writer.flush();
System.exit(status);
}
@NotNull
protected Options options() {<FILL_FUNCTION_BODY>}
} |
final Options options = new Options();
ChronicleReaderMain.addOption(options, "d", "directory", true, "Directory containing chronicle queue files", true);
ChronicleReaderMain.addOption(options, "h", "help-message", false, "Print this help and exit", false);
ChronicleReaderMain.addOption(options, "t", "time unit", true, "Time unit. Default nanos", false);
ChronicleReaderMain.addOption(options, "i", "ignore", true, "How many items to ignore from start", false);
ChronicleReaderMain.addOption(options, "w", "window", true, "Window duration in time unit. Instead of one output at the end, will output every window period", false);
ChronicleReaderMain.addOption(options, "u", "histo offset", true, "Summary output. Instead of histograms, will show one value only, in CSV format. Set this to 0 for 50th, 1 for 90th etc., -1 for worst", false);
options.addOption(new Option("p", false, "Show progress"));
options.addOption(new Option("m", false, "By method"));
return options;
| 724 | 289 | 1,013 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/channel/PipeHandler.java | PipeHandler | run | class PipeHandler extends AbstractHandler<PipeHandler> {
private String publish;
private String subscribe;
private SyncMode syncMode;
private transient Thread tailerThread;
private Predicate<Wire> filter = null;
private int publishSourceId = 0;
private int subscribeSourceId = 0;
private Consumer<ExcerptTailer> subscriptionIndexController = SubscribeHandler.NO_OP;
public PipeHandler() {
}
static ChronicleQueue newQueue(ChronicleContext context, String queueName, SyncMode syncMode, int sourceId) {
final File path = context.toFile(queueName);
return ChronicleQueue.singleBuilder(path).blockSize(OS.isSparseFileSupported() ? 512L << 30 : 64L << 20).sourceId(sourceId).syncMode(syncMode).build();
}
public String publish() {
return publish;
}
public PipeHandler publish(String publish) {
this.publish = publish;
return this;
}
public String subscribe() {
return subscribe;
}
public PipeHandler subscribe(String subscribe) {
this.subscribe = subscribe;
return this;
}
public SyncMode syncMode() {
return syncMode;
}
public PipeHandler syncMode(SyncMode syncMode) {
this.syncMode = syncMode;
return this;
}
public Predicate<Wire> filter() {
return filter;
}
public PipeHandler filter(Predicate<Wire> filter) {
this.filter = filter;
return this;
}
public int publishSourceId() {
return publishSourceId;
}
public PipeHandler publishSourceId(int publishSourceId) {
this.publishSourceId = publishSourceId;
return this;
}
public PipeHandler subscribeSourceId(int subscribeSourceId) {
this.subscribeSourceId = subscribeSourceId;
return this;
}
@Override
public void run(ChronicleContext context, ChronicleChannel channel) {<FILL_FUNCTION_BODY>}
@Override
public ChronicleChannel asInternalChannel(ChronicleContext context, ChronicleChannelCfg channelCfg) {
return new QueuesChannel(channelCfg, this, newQueue(context, publish, syncMode, publishSourceId), newQueue(context, subscribe, syncMode, 0));
}
static class PHEventPoller extends SimpleCloseable implements EventPoller {
private final ExcerptTailer tailer;
private final Predicate<Wire> filter;
public PHEventPoller(ExcerptTailer tailer, Predicate<Wire> filter) {
this.tailer = tailer;
this.filter = filter;
}
@Override
public boolean onPoll(ChronicleChannel conn) {
boolean wrote = false;
while (SubscribeHandler.copyOneMessage(conn, tailer, filter)) wrote = true;
return wrote;
}
@Override
protected void performClose() {
Closeable.closeQuietly(tailer, tailer.queue());
super.performClose();
}
}
/**
* @param subscriptionIndexController controls where the subscriptions will start to read from, by allowing the caller to
* {@link net.openhft.chronicle.queue.ExcerptTailer#moveToIndex(long) to control the first read location
*/
public PipeHandler subscriptionIndexController(Consumer<ExcerptTailer> subscriptionIndexController) {
this.subscriptionIndexController = subscriptionIndexController;
return this;
}
} |
Pauser pauser = Pauser.balanced();
try (ChronicleQueue subscribeQ = newQueue(context, subscribe, syncMode, subscribeSourceId)) {
final ExcerptTailer tailer;
if (channel instanceof BufferedChronicleChannel) {
BufferedChronicleChannel bc = (BufferedChronicleChannel) channel;
tailer = subscribeQ.createTailer();
tailer.singleThreadedCheckDisabled(true); // assume we are thread safe
subscriptionIndexController.accept(tailer);
bc.eventPoller(new PHEventPoller(tailer, filter));
} else {
tailerThread = new Thread(() -> {
try (AffinityLock lock = context.affinityLock()) {
SubscribeHandler.queueTailer(pauser, channel, subscribeQ, filter, subscriptionIndexController);
} catch (ClosedIORuntimeException e) {
Jvm.warn().on(PipeHandler.class, e.toString());
} catch (Throwable t) {
Jvm.warn().on(PipeHandler.class, t);
}
}, "pipe~tailer");
tailerThread.setDaemon(true);
tailerThread.start();
}
Thread.currentThread().setName("pipe~reader");
try (AffinityLock lock = context.affinityLock()) {
copyFromChannelToQueue(channel, pauser, newQueue(context, publish, syncMode, publishSourceId), syncMode);
} finally {
if (tailerThread != null) tailerThread.interrupt();
}
}
| 938 | 407 | 1,345 | <methods>public void <init>() ,public java.lang.Boolean buffered() ,public net.openhft.chronicle.queue.channel.PipeHandler buffered(java.lang.Boolean) <variables>private java.lang.Boolean buffered |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/channel/PublishHandler.java | PublishHandler | copyFromChannelToQueue | class PublishHandler extends AbstractHandler<PublishHandler> {
private String publish;
private SyncMode syncMode;
private int publishSourceId = 0;
static void copyFromChannelToQueue(ChronicleChannel channel, Pauser pauser, ChronicleQueue publishQueue, SyncMode syncMode) {<FILL_FUNCTION_BODY>}
private static void syncAppender(ExcerptAppender appender, SyncMode syncMode) {
if (syncMode == SyncMode.SYNC) {
try (DocumentContext dc2 = appender.writingDocument()) {
dc2.wire().write("sync").text("");
}
}
appender.sync();
}
public String publish() {
return publish;
}
public PublishHandler publish(String publish) {
this.publish = publish;
return this;
}
public SyncMode syncMode() {
return syncMode;
}
public PublishHandler syncMode(SyncMode syncMode) {
this.syncMode = syncMode;
return this;
}
public int publishSourceId() {
return publishSourceId;
}
public PublishHandler publishSourceId(int publishSourceId) {
this.publishSourceId = publishSourceId;
return this;
}
@Override
public void run(ChronicleContext context, ChronicleChannel channel) {
Pauser pauser = Pauser.balanced();
Thread.currentThread().setName("publish~reader");
try (AffinityLock lock = context.affinityLock()) {
copyFromChannelToQueue(channel, pauser, newQueue(context, publish, syncMode, publishSourceId), syncMode);
}
}
@Override
public ChronicleChannel asInternalChannel(ChronicleContext context, ChronicleChannelCfg channelCfg) {
return new PublishQueueChannel(channelCfg, this, newQueue(context, publish, syncMode, publishSourceId));
}
} |
try (ChronicleQueue publishQ = publishQueue;
ExcerptAppender appender = publishQ.createAppender()) {
appender.singleThreadedCheckDisabled(true); // assume we are thread safe
boolean needsSync = false;
while (!channel.isClosed()) {
try (DocumentContext dc = channel.readingDocument()) {
pauser.unpause();
if (!dc.isPresent()) {
if (needsSync) {
syncAppender(appender, syncMode);
needsSync = false;
}
continue;
}
if (dc.isMetaData()) {
// read message
continue;
}
try (DocumentContext dc2 = appender.writingDocument()) {
dc.wire().copyTo(dc2.wire());
needsSync = syncMode == SyncMode.SYNC || syncMode == SyncMode.ASYNC;
}
}
}
} finally {
Thread.currentThread().setName("connections");
}
| 508 | 263 | 771 | <methods>public void <init>() ,public java.lang.Boolean buffered() ,public net.openhft.chronicle.queue.channel.PublishHandler buffered(java.lang.Boolean) <variables>private java.lang.Boolean buffered |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/channel/SubscribeHandler.java | NoOp | run | class NoOp extends SelfDescribingMarshallable implements Consumer {
@Override
public void accept(Object o) {
return;
}
}
public final static Consumer NO_OP = new NoOp();
private String subscribe;
private transient boolean closeWhenRunEnds = true;
private SyncMode syncMode;
private Predicate<Wire> filter;
private int sourceId;
private Consumer<ExcerptTailer> subscriptionIndexController = NO_OP;
static void queueTailer(@NotNull Pauser pauser,
@NotNull ChronicleChannel channel,
@NotNull ChronicleQueue subscribeQueue,
@Nullable Predicate<Wire> filter,
@NotNull Consumer<ExcerptTailer> subscriptionIndexController) {
try (ChronicleQueue subscribeQ = subscribeQueue; // leave here so it gets closed
ExcerptTailer tailer = subscribeQ.createTailer()) {
tailer.singleThreadedCheckDisabled(true); // assume we are thread safe
subscriptionIndexController.accept(tailer);
while (!channel.isClosing()) {
if (copyOneMessage(channel, tailer, filter))
pauser.reset();
else
pauser.pause();
}
} catch (Exception e) {
Thread.yield();
if (channel.isClosing() || subscribeQueue.isClosing())
return;
throw e;
}
}
static boolean copyOneMessage(ChronicleChannel channel, ExcerptTailer tailer, Predicate<Wire> filter) {
try (DocumentContext dc = tailer.readingDocument()) {
if (!dc.isPresent()) {
return false;
}
if (dc.isMetaData()) {
return true;
}
Wire wire1 = dc.wire();
if (filter != null) {
long pos = wire1.bytes().readPosition();
if (!filter.test(wire1)) {
wire1.bytes().readPosition(wire1.bytes().readLimit());
return true;
}
wire1.bytes().readPosition(pos);
}
try (DocumentContext dc2 = channel.writingDocument()) {
Wire wire2 = dc2.wire();
wire1.copyTo(wire2);
final long dataBuffered = wire2.bytes().writePosition();
// wait for it to drain
return dataBuffered < 32 << 10;
}
}
}
public String subscribe() {
return subscribe;
}
public SubscribeHandler subscribe(String subscribe) {
this.subscribe = subscribe;
return this;
}
public SyncMode syncMode() {
return syncMode;
}
public SubscribeHandler syncMode(SyncMode syncMode) {
this.syncMode = syncMode;
return this;
}
public Predicate<Wire> filter() {
return filter;
}
public SubscribeHandler filter(Predicate<Wire> filter) {
this.filter = filter;
return this;
}
@Override
public void run(ChronicleContext context, ChronicleChannel channel) {<FILL_FUNCTION_BODY> |
Pauser pauser = Pauser.balanced();
final ExcerptTailer tailer;
try (ChronicleQueue subscribeQ = newQueue(context, subscribe, syncMode, sourceId)) {
InternalChronicleChannel icc = (InternalChronicleChannel) channel;
if (icc.supportsEventPoller()) {
tailer = subscribeQ.createTailer();
icc.eventPoller(new SHEventHandler(tailer, filter));
closeWhenRunEnds = false;
} else {
try (AffinityLock lock = context.affinityLock()) {
queueTailer(pauser, channel, newQueue(context, subscribe, syncMode, sourceId), filter, subscriptionIndexController);
}
closeWhenRunEnds = true;
}
}
| 819 | 207 | 1,026 | <methods>public void <init>() ,public java.lang.Boolean buffered() ,public net.openhft.chronicle.queue.channel.SubscribeHandler buffered(java.lang.Boolean) <variables>private java.lang.Boolean buffered |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/channel/impl/PublishQueueChannel.java | PublishQueueChannel | testMessage | class PublishQueueChannel implements ChronicleChannel {
private final ChronicleChannelCfg channelCfg;
private final AbstractHandler publishHandler;
private final ChannelHeader headerOut;
private final ChronicleQueue publishQueue;
private final ExcerptTailer tailer;
public PublishQueueChannel(ChronicleChannelCfg channelCfg, AbstractHandler publishHandler, ChronicleQueue publishQueue) {
this.channelCfg = channelCfg;
this.publishHandler = publishHandler;
this.headerOut = publishHandler.responseHeader(null);
this.publishQueue = publishQueue;
tailer = publishQueue.createTailer();
}
@Override
public ChronicleChannelCfg channelCfg() {
return channelCfg;
}
@Override
public ChannelHeader headerOut() {
return headerOut;
}
@Override
public ChannelHeader headerIn() {
return publishHandler;
}
@Override
public void close() {
Closeable.closeQuietly(
tailer,
publishQueue);
}
@Override
public boolean isClosed() {
return publishQueue.isClosed();
}
@Override
public DocumentContext readingDocument() {
return NoDocumentContext.INSTANCE;
}
@Override
public DocumentContext writingDocument(boolean metaData) throws UnrecoverableTimeoutException {
return acquireThreadLocalAppender(publishQueue).writingDocument(metaData);
}
@Override
public DocumentContext acquireWritingDocument(boolean metaData) throws UnrecoverableTimeoutException {
return acquireThreadLocalAppender(publishQueue).acquireWritingDocument(metaData);
}
@Override
public void testMessage(long now) {<FILL_FUNCTION_BODY>}
@Override
public long lastTestMessage() {
throw new UnsupportedOperationException();
}
} |
try (DocumentContext dc = writingDocument(true)) {
dc.wire().write("testMessage").writeLong(NanoTime.INSTANCE, now);
}
| 479 | 45 | 524 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/channel/impl/SubscribeQueueChannel.java | SubscribeQueueChannel | readingDocument | class SubscribeQueueChannel implements ChronicleChannel {
private final ChronicleChannelCfg channelCfg;
private final AbstractHandler pipeHandler;
private final ChannelHeader headerOut;
private final ChronicleQueue subscribeQueue;
private final ExcerptTailer tailer;
private long lastTestMessage;
public SubscribeQueueChannel(ChronicleChannelCfg channelCfg, AbstractHandler pipeHandler, ChronicleQueue subscribeQueue) {
this.channelCfg = channelCfg;
this.pipeHandler = pipeHandler;
this.headerOut = pipeHandler.responseHeader(null);
this.subscribeQueue = subscribeQueue;
tailer = subscribeQueue.createTailer();
}
@Override
public ChronicleChannelCfg channelCfg() {
return channelCfg;
}
@Override
public ChannelHeader headerOut() {
return headerOut;
}
@Override
public ChannelHeader headerIn() {
return pipeHandler;
}
@Override
public void close() {
Closeable.closeQuietly(
tailer,
subscribeQueue);
}
@Override
public boolean isClosed() {
return subscribeQueue.isClosed();
}
@Override
public DocumentContext readingDocument() {<FILL_FUNCTION_BODY>}
@Override
public DocumentContext writingDocument(boolean metaData) throws UnrecoverableTimeoutException {
return NoDocumentContext.INSTANCE;
}
@Override
public DocumentContext acquireWritingDocument(boolean metaData) throws UnrecoverableTimeoutException {
return NoDocumentContext.INSTANCE;
}
@Override
public void testMessage(long now) {
}
@Override
public long lastTestMessage() {
return lastTestMessage;
}
} |
final DocumentContext dc = tailer.readingDocument(true);
if (dc.isMetaData()) {
final Wire wire = dc.wire();
long pos = wire.bytes().readPosition();
final String event = wire.readEvent(String.class);
if ("testMessage".equals(event)) {
final long testMessage = wire.getValueIn().readLong(NanoTime.INSTANCE);
lastTestMessage = testMessage;
try (DocumentContext dc2 = writingDocument(true)) {
dc2.wire().write("testMessage").writeLong(NanoTime.INSTANCE, testMessage);
}
wire.bytes().readPosition(pos);
return dc;
}
dc.close();
return readingDocument();
}
return dc;
| 452 | 200 | 652 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/impl/RollingResourcesCache.java | RollingResourcesCache | parseCount0 | class RollingResourcesCache {
public static final ParseCount NO_PARSE_COUNT = new ParseCount("", Integer.MIN_VALUE);
private static final int CACHE_SIZE = Jvm.getInteger("chronicle.queue.rollingResourceCache.size", 128);
private static final int ONE_DAY_IN_MILLIS = 86400000;
private static final int MAX_TIMESTAMP_CACHE_SIZE = 32;
@NotNull
private final Function<String, File> fileFactory;
@NotNull
private final DateTimeFormatter formatter;
@NotNull
private final Resource[] values;
private final int length;
@NotNull
private final Function<File, String> fileToName;
private final String format;
private final ConcurrentMap<File, Long> filenameToTimestampCache =
new ConcurrentHashMap<>(MAX_TIMESTAMP_CACHE_SIZE);
private final long epoch;
private ParseCount lastParseCount = NO_PARSE_COUNT;
public RollingResourcesCache(@NotNull final RollCycle cycle, long epoch,
@NotNull Function<String, File> nameToFile,
@NotNull Function<File, String> fileToName) {
this(cycle.lengthInMillis(), cycle.format(), epoch, nameToFile, fileToName);
}
private RollingResourcesCache(final int length,
@NotNull String format, long epoch,
@NotNull Function<String, File> nameToFile,
@NotNull Function<File, String> fileToName) {
this.length = length;
this.fileToName = fileToName;
this.values = new Resource[CACHE_SIZE];
final long millisInDay = epoch % ONE_DAY_IN_MILLIS;
this.epoch = millisInDay >= 0 ? epoch - millisInDay : -ONE_DAY_IN_MILLIS;
this.format = format;
this.formatter = DateTimeFormatter.ofPattern(this.format).withZone(ZoneId.of("UTC"));
this.fileFactory = nameToFile;
}
/**
* Cache some resources for a rollCycle number.
*
* @param cycle the rollCycle number to format
* @return the Resource
*/
@NotNull
public Resource resourceFor(long cycle) {
long millisSinceBeginningOfEpoch = (cycle * length);
long h = Maths.hash64(millisSinceBeginningOfEpoch);
h ^= h >> 32;
int hash = (int) h & (CACHE_SIZE - 1);
Resource dv = values[hash];
if (dv == null || dv.millis != millisSinceBeginningOfEpoch) {
final Instant instant = Instant.ofEpochMilli(millisSinceBeginningOfEpoch + epoch);
@NotNull String text = formatter.format(instant);
values[hash] = dv = new Resource(millisSinceBeginningOfEpoch, text, fileFactory.apply(text));
}
return dv;
}
public int parseCount(@NotNull String name) {
ParseCount last = this.lastParseCount;
if (name.equals(last.name))
return last.count;
int count = parseCount0(name);
lastParseCount = new ParseCount(name, count);
return count;
}
private int parseCount0(@NotNull String name) {<FILL_FUNCTION_BODY>}
public Long toLong(File file) {
final Long cachedValue = filenameToTimestampCache.get(file);
if (cachedValue != null) {
return cachedValue;
}
final TemporalAccessor parse = formatter.parse(fileToName.apply(file));
final long value;
if (length == ONE_DAY_IN_MILLIS) {
value = parse.getLong(ChronoField.EPOCH_DAY);
} else if (length < ONE_DAY_IN_MILLIS) {
value = Instant.from(parse).toEpochMilli() / length;
} else {
long daysSinceEpoch = parse.getLong(ChronoField.EPOCH_DAY);
long adjShift = daysSinceEpoch < 0 ? -1 : 0;
value = adjShift + ((daysSinceEpoch * 86400) / (length / 1000));
}
if (filenameToTimestampCache.size() >= MAX_TIMESTAMP_CACHE_SIZE) {
filenameToTimestampCache.clear();
}
filenameToTimestampCache.put(file, value);
return value;
}
static final class ParseCount {
final String name;
final int count;
public ParseCount(String name, int count) {
this.name = name;
this.count = count;
}
}
public static final class Resource {
public final long millis;
public final String text;
public final File path;
public final File parentPath;
public boolean pathExists;
Resource(long millis, String text, File path) {
this.millis = millis;
this.text = text;
this.path = path;
this.parentPath = path.getParentFile();
}
}
} |
try {
TemporalAccessor parse = formatter.parse(name);
if (!parse.isSupported(ChronoField.EPOCH_DAY)) {
final WeekFields weekFields = WeekFields.of(formatter.getLocale());
if (parse.isSupported(weekFields.weekBasedYear()) && parse.isSupported(weekFields.weekOfWeekBasedYear())) {
int year = Math.toIntExact(parse.getLong(weekFields.weekBasedYear()));
int week = Math.toIntExact(parse.getLong(weekFields.weekOfWeekBasedYear()));
LocalDate ld = LocalDate.now()
.withYear(year)
.with(weekFields.weekOfYear(), week)
.with(weekFields.dayOfWeek(), 1);
return Math.toIntExact(ld.toEpochDay());
}
throw new UnsupportedOperationException("Unable to parse " + name + " using format " + format);
}
long epochDay = parse.getLong(ChronoField.EPOCH_DAY) * 86400;
if (parse.isSupported(ChronoField.SECOND_OF_DAY))
epochDay += parse.getLong(ChronoField.SECOND_OF_DAY);
return Maths.toInt32((epochDay - ((epoch) / 1000)) / (length / 1000));
} catch (DateTimeParseException e) {
throw new RuntimeException(String.format(
"Unable to parse %s using format %s", name, format), e);
}
| 1,351 | 407 | 1,758 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/impl/WireStorePool.java | WireStorePool | acquire | class WireStorePool extends SimpleCloseable {
@NotNull
private final WireStoreSupplier supplier;
private final StoreFileListener storeFileListener;
private WireStorePool(@NotNull WireStoreSupplier supplier, StoreFileListener storeFileListener) {
this.supplier = supplier;
this.storeFileListener = storeFileListener;
}
@NotNull
public static WireStorePool withSupplier(@NotNull WireStoreSupplier supplier, StoreFileListener storeFileListener) {
return new WireStorePool(supplier, storeFileListener);
}
@Nullable
public SingleChronicleQueueStore acquire(
final int cycle,
WireStoreSupplier.CreateStrategy createStrategy,
SingleChronicleQueueStore oldStore) {<FILL_FUNCTION_BODY>}
public int nextCycle(final int currentCycle, @NotNull TailerDirection direction) throws ParseException {
throwExceptionIfClosed();
return supplier.nextCycle(currentCycle, direction);
}
public void closeStore(@NotNull SingleChronicleQueueStore store) {
BackgroundResourceReleaser.release(store);
if (storeFileListener.isActive())
BackgroundResourceReleaser.run(() -> storeFileListener.onReleased(store.cycle(), store.file()));
}
/**
* list cycles between ( inclusive )
*
* @param lowerCycle the lower cycle
* @param upperCycle the upper cycle
* @return an array including these cycles and all the intermediate cycles
*/
public NavigableSet<Long> listCyclesBetween(int lowerCycle, int upperCycle) {
throwExceptionIfClosed();
return supplier.cycles(lowerCycle, upperCycle);
}
} |
throwExceptionIfClosed();
// reuse cycle store when applicable
if (oldStore != null && oldStore.cycle() == cycle && !oldStore.isClosed())
return oldStore;
SingleChronicleQueueStore store = this.supplier.acquire(cycle, createStrategy);
if (store != null) {
store.cycle(cycle);
if (store != oldStore && storeFileListener.isActive())
BackgroundResourceReleaser.run(() -> storeFileListener.onAcquired(cycle, store.file()));
}
return store;
| 440 | 148 | 588 | <methods>public final void close() ,public boolean isClosed() <variables>private volatile transient boolean closed |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/impl/single/FileSystemDirectoryListing.java | FileSystemDirectoryListing | refresh | class FileSystemDirectoryListing extends SimpleCloseable implements DirectoryListing {
private final File queueDir;
private final ToIntFunction<String> fileNameToCycleFunction;
private int minCreatedCycle = Integer.MAX_VALUE;
private int maxCreatedCycle = Integer.MIN_VALUE;
private long lastRefreshTimeMS;
FileSystemDirectoryListing(final File queueDir,
final ToIntFunction<String> fileNameToCycleFunction) {
this.queueDir = queueDir;
this.fileNameToCycleFunction = fileNameToCycleFunction;
}
@Override
public void onFileCreated(final File file, final int cycle) {
onRoll(cycle);
}
@Override
public void refresh(boolean force) {<FILL_FUNCTION_BODY>}
@Override
public long lastRefreshTimeMS() {
return lastRefreshTimeMS;
}
@Override
public int getMinCreatedCycle() {
return minCreatedCycle;
}
@Override
public int getMaxCreatedCycle() {
return maxCreatedCycle;
}
@Override
public long modCount() {
return -1;
}
@Override
public void onRoll(int cycle) {
minCreatedCycle = Math.min(minCreatedCycle, cycle);
maxCreatedCycle = Math.max(maxCreatedCycle, cycle);
}
} |
lastRefreshTimeMS = System.currentTimeMillis();
final String[] fileNamesList = queueDir.list();
String minFilename = INITIAL_MIN_FILENAME;
String maxFilename = INITIAL_MAX_FILENAME;
if (fileNamesList != null) {
for (String fileName : fileNamesList) {
if (fileName.endsWith(SingleChronicleQueue.SUFFIX)) {
if (minFilename.compareTo(fileName) > 0)
minFilename = fileName;
if (maxFilename.compareTo(fileName) < 0)
maxFilename = fileName;
}
}
}
int min = UNSET_MIN_CYCLE;
if (!INITIAL_MIN_FILENAME.equals(minFilename))
min = fileNameToCycleFunction.applyAsInt(minFilename);
int max = UNSET_MAX_CYCLE;
if (!INITIAL_MAX_FILENAME.equals(maxFilename))
max = fileNameToCycleFunction.applyAsInt(maxFilename);
minCreatedCycle = min;
maxCreatedCycle = max;
| 365 | 300 | 665 | <methods>public final void close() ,public boolean isClosed() <variables>private volatile transient boolean closed |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/impl/single/MicroToucher.java | MicroToucher | bgExecute | class MicroToucher {
private final StoreAppender appender;
private long lastPageTouched = 0;
private volatile long lastPageToSync = 0;
private long lastPageSynced = 0;
public MicroToucher(StoreAppender appender) {
this.appender = appender;
}
public boolean execute() {
final Wire bufferWire = appender.wire();
if (bufferWire == null)
return false;
final long lastPosition = appender.lastPosition;
final long lastPage = lastPosition & ~0xFFF;
final long nextPage = (lastPosition + 0xFFF) & ~0xFFF;
Bytes<?> bytes = bufferWire.bytes();
if (nextPage != lastPageTouched) {
lastPageTouched = nextPage;
try {
// best effort
final BytesStore bs = bytes.bytesStore();
if (bs.inside(nextPage, 8))
touchPage(nextPage, bs);
} catch (Throwable ignored) {
}
return true;
}
lastPageToSync = lastPage;
return false;
}
public void bgExecute() {<FILL_FUNCTION_BODY>}
private void sync(BytesStore bytes, long start, long length) {
if (!bytes.inside(start, length))
return;
// long a = System.nanoTime();
PosixAPI.posix().msync(bytes.addressForRead(start), length, MSyncFlag.MS_ASYNC);
// System.out.println("sync took " + (System.nanoTime() - a) / 1000);
}
protected boolean touchPage(long nextPage, BytesStore bs) {
return bs.compareAndSwapLong(nextPage, 0, 0);
}
} |
final long lastPage = this.lastPageToSync;
final long start = this.lastPageSynced;
final long length = Math.min(8 << 20, lastPage - start);
// System.out.println("len "+length);
if (length < 8 << 20)
return;
final Wire bufferWire = appender.wire();
if (bufferWire == null)
return;
BytesStore bytes = bufferWire.bytes().bytesStore();
sync(bytes, start, length);
this.lastPageSynced += length;
| 479 | 146 | 625 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/impl/single/ReferenceCountedCache.java | ReferenceCountedCache | releaseResource | class ReferenceCountedCache<K, T extends ReferenceCounted & Closeable, V, E extends Throwable>
extends AbstractCloseable {
private final Map<K, T> cache = new LinkedHashMap<>();
private final Function<T, V> transformer;
private final ThrowingFunction<K, T, E> creator;
private final ReferenceChangeListener referenceChangeListener;
public ReferenceCountedCache(final Function<T, V> transformer,
final ThrowingFunction<K, T, E> creator) {
this.transformer = transformer;
this.creator = creator;
this.referenceChangeListener = new TriggerFlushOnLastReferenceRemoval();
singleThreadedCheckDisabled(true);
}
@NotNull
V get(@NotNull final K key) throws E {
throwExceptionIfClosed();
final V rv;
synchronized (cache) {
@Nullable T value = cache.get(key);
if (value == null) {
value = creator.apply(key);
value.reserveTransfer(INIT, this);
value.addReferenceChangeListener(referenceChangeListener);
//System.err.println("Reserved " + value.toString() + " by " + this);
cache.put(key, value);
}
// this will add to the ref count and so needs to be done inside of sync block
rv = transformer.apply(value);
}
return rv;
}
@Override
protected void performClose() {
synchronized (cache) {
for (T value : cache.values()) {
releaseResource(value);
}
cache.clear();
}
}
private void releaseResource(T value) {<FILL_FUNCTION_BODY>}
public void remove(K key) {
// harmless to call if cache is already closing/closed
synchronized (cache) {
releaseResource(cache.remove(key));
}
}
private class TriggerFlushOnLastReferenceRemoval implements ReferenceChangeListener {
private final Runnable bgCleanup = this::bgCleanup;
@Override
public void onReferenceRemoved(ReferenceCounted referenceCounted, ReferenceOwner referenceOwner) {
if (referenceOwner != ReferenceCountedCache.this && referenceCounted.refCount() == 1) {
BackgroundResourceReleaser.run(bgCleanup);
}
}
private void bgCleanup() {
// remove all which have been de-referenced by other than me. Garbagy but rare
synchronized (cache) {
cache.entrySet().removeIf(entry -> {
T value = entry.getValue();
int refCount = value.refCount();
if (refCount == 1) {
value.release(ReferenceCountedCache.this);
}
return refCount <= 1;
});
}
}
}
} |
try {
if (value != null)
value.release(this);
} catch (Exception e) {
Jvm.debug().on(getClass(), e);
}
| 732 | 50 | 782 | <methods>public static void assertCloseablesClosed() ,public final void close() ,public net.openhft.chronicle.core.StackTrace createdHere() ,public static void disableCloseableTracing() ,public static void enableCloseableTracing() ,public static void gcAndWaitForCloseablesToClose() ,public boolean isClosed() ,public boolean isClosing() ,public int referenceId() ,public void singleThreadedCheckDisabled(boolean) ,public void singleThreadedCheckReset() ,public void throwExceptionIfClosed() throws net.openhft.chronicle.core.io.ClosedIllegalStateException, net.openhft.chronicle.core.io.ThreadingIllegalStateException,public void throwExceptionIfClosedInSetter() throws net.openhft.chronicle.core.io.ClosedIllegalStateException, net.openhft.chronicle.core.io.ThreadingIllegalStateException,public java.lang.String toString() ,public static void unmonitor(net.openhft.chronicle.core.io.Closeable) ,public static boolean waitForCloseablesToClose(long) ,public void warnAndCloseIfNotClosed() <variables>private static final long CLOSED_OFFSET,protected static final boolean DISABLE_DISCARD_WARNING,private static final int STATE_CLOSED,private static final int STATE_CLOSING,private static final int STATE_NOT_CLOSED,protected static final long WARN_NS,private volatile transient int closed,protected volatile transient net.openhft.chronicle.core.StackTrace closedHere,private final transient net.openhft.chronicle.core.StackTrace createdHere,private final transient net.openhft.chronicle.core.io.AbstractCloseable.Finalizer finalizer,private int referenceId,private transient boolean singleThreadedCheckDisabled,private volatile transient java.lang.Thread usedByThread,private volatile transient net.openhft.chronicle.core.StackTrace usedByThreadHere |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/impl/single/RollCycleEncodeSequence.java | RollCycleEncodeSequence | getSequence | class RollCycleEncodeSequence implements Sequence {
private final TwoLongValue writePositionAndSequence;
private final int cycleShift;
private final long sequenceMask;
RollCycleEncodeSequence(LongValue writePositionAndSequence, int indexCount, int indexSpacing) {
this.cycleShift = Math.max(32, Maths.intLog2(indexCount) * 2 + Maths.intLog2(indexSpacing));
this.sequenceMask = (1L << cycleShift) - 1;
this.writePositionAndSequence = writePositionAndSequence instanceof TwoLongValue ?
(TwoLongValue) writePositionAndSequence : null;
}
@Override
public void setSequence(long sequence, long position) {
if (writePositionAndSequence == null)
return;
long value = toLongValue(position, sequence);
writePositionAndSequence.setOrderedValue2(value);
}
@Override
public long toIndex(long headerNumber, long sequence) {
long cycle = toLowerBitsWritePosition(headerNumber);
return toLongValue(cycle, sequence);
}
/**
* gets the sequence for a writePosition
* <p>
* This method will only return a valid sequence number of the write position if the write position is the
* last write position in the queue. YOU CAN NOT USE THIS METHOD TO LOOK UP RANDOM SEQUENCES FOR ANY WRITE POSITION.
* NOT_FOUND_RETRY will be return if a sequence number can not be found ( so can retry )
* or NOT_FOUND when you should not retry
*
* @param forWritePosition the last write position, expected to be the end of queue
* @return NOT_FOUND_RETRY if the sequence for this write position can not be found, or NOT_FOUND if sequenceValue==null or the sequence for this {@code writePosition}
*/
public long getSequence(long forWritePosition) {<FILL_FUNCTION_BODY>}
private long toLongValue(long cycle, long sequenceNumber) {
return (cycle << cycleShift) + (sequenceNumber & sequenceMask);
}
public long toSequenceNumber(long index) {
return index & sequenceMask;
}
private long toLowerBitsWritePosition(long index) {
return index >>> cycleShift;
}
@Override
public String toString() {
return "RollCycleEncodeSequence{" +
"writePositionAndSequence=" + writePositionAndSequence +
", cycleShift=" + cycleShift +
", sequenceMask=" + sequenceMask +
'}';
}
} |
if (writePositionAndSequence == null)
return Sequence.NOT_FOUND;
// We only deal with the 2nd long in the TwoLongValue, and we use it to keep track of current position
// and current sequence. We use the same encoding as index (cycle number is shifted left by cycleShift
// and sequence number occupied the lower 64-cycleShift bits) but for this use case we mask and shift
// position into the space used for cycle number.
// todo optimize the maths in the method below
final long sequenceValue = this.writePositionAndSequence.getVolatileValue2();
if (sequenceValue == 0)
return Sequence.NOT_FOUND;
long writePositionAsCycle = toLongValue(forWritePosition, 0);
long lowerBitsOfWp = toLowerBitsWritePosition(writePositionAsCycle);
final long toLowerBitsWritePosition = toLowerBitsWritePosition(sequenceValue);
if (lowerBitsOfWp == toLowerBitsWritePosition)
return toSequenceNumber(sequenceValue);
return Sequence.NOT_FOUND_RETRY;
| 650 | 279 | 929 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/impl/single/SCQMeta.java | SCQMeta | overrideFrom | class SCQMeta implements Metadata {
@NotNull
private final SCQRoll roll;
private final int deltaCheckpointInterval;
private int sourceId;
@SuppressWarnings("unused")
@UsedViaReflection
SCQMeta(@NotNull WireIn wire) {
this.roll = Objects.requireNonNull(wire.read(MetaDataField.roll).typedMarshallable());
this.deltaCheckpointInterval = wire.bytes().readRemaining() > 0 ? wire.read(MetaDataField.deltaCheckpointInterval).int32() : -1; // disabled.
this.sourceId = wire.bytes().readRemaining() > 0 ? wire.read(MetaDataField.sourceId).int32() : 0;
}
SCQMeta(@NotNull SCQRoll roll, int deltaCheckpointInterval, int sourceId) {
this.roll = roll;
this.deltaCheckpointInterval = deltaCheckpointInterval;
this.sourceId = sourceId;
}
@NotNull
public SCQRoll roll() {
return roll;
}
public int deltaCheckpointInterval() {
return deltaCheckpointInterval;
}
public int sourceId() {
return sourceId;
}
@Override
public void writeMarshallable(@NotNull WireOut wire) {
wire
.write(MetaDataField.roll).typedMarshallable(roll)
.write(MetaDataField.deltaCheckpointInterval).int32(this.deltaCheckpointInterval)
.write(MetaDataField.sourceId).int32(this.sourceId);
}
@Override
public <T extends Metadata> void overrideFrom(T metadata) {<FILL_FUNCTION_BODY>}
} |
if (!(metadata instanceof SCQMeta))
throw new IllegalStateException("Expected SCQMeta, got " + metadata.getClass());
SCQMeta other = (SCQMeta) metadata;
SCQRoll roll = other.roll;
if (roll.epoch() != this.roll.epoch()) {
Jvm.warn().on(getClass(), "Overriding roll epoch from existing metadata, was " + this.roll.epoch() + ", overriding to " + roll.epoch());
this.roll.epoch(roll.epoch());
}
if (roll.length() != this.roll.length()) {
Jvm.warn().on(getClass(), "Overriding roll length from existing metadata, was " + this.roll.length() + ", overriding to " + roll.length());
this.roll.length(roll.length());
this.roll.format(roll.format());
}
if (roll.rollTime() != null && !Objects.equals(roll.rollTime(), this.roll.rollTime())) {
Jvm.warn().on(getClass(), "Overriding roll time from existing metadata, was " + this.roll.rollTime() + ", overriding to " + roll.rollTime());
this.roll.rollTime(roll.rollTime());
}
if (roll.rollTimeZone() != null && !Objects.equals(roll.rollTimeZone(), this.roll.rollTimeZone())) {
Jvm.warn().on(getClass(), "Overriding roll time zone from existing metadata, was " + this.roll.rollTimeZone() + ", overriding to " + roll.rollTimeZone());
this.roll.rollTimeZone(roll.rollTimeZone());
}
if (other.sourceId != sourceId) {
Jvm.warn().on(getClass(), "Overriding sourceId from existing metadata, was " + sourceId + ", overriding to " + other.sourceId);
this.sourceId = other.sourceId;
}
| 436 | 493 | 929 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/impl/single/SCQRoll.java | SCQRoll | toString | class SCQRoll implements Demarshallable, WriteMarshallable {
private int length;
@Nullable
private String format;
@Nullable
private LocalTime rollTime;
@Nullable
private ZoneId rollTimeZone;
private long epoch;
/**
* used by {@link Demarshallable}
*
* @param wire a wire
*/
@UsedViaReflection
private SCQRoll(@NotNull WireIn wire) {
length = wire.read(RollFields.length).int32();
format = wire.read(RollFields.format).text();
epoch = wire.read(RollFields.epoch).int64();
ValueIn rollTimeVIN = wire.read(RollFields.rollTime);
if (rollTimeVIN.hasNext())
rollTime = rollTimeVIN.time();
String zoneId = wire.read(RollFields.rollTimeZone).text();
if (zoneId != null)
rollTimeZone = ZoneId.of(zoneId);
else
rollTimeZone = null;
}
SCQRoll(@NotNull RollCycle rollCycle,
long epoch,
@Nullable LocalTime rollTime,
@Nullable ZoneId rollTimeZone) {
this.length = rollCycle.lengthInMillis();
this.format = rollCycle.format();
this.epoch = epoch;
this.rollTime = rollTime;
this.rollTimeZone = rollTimeZone;
}
@Override
public void writeMarshallable(@NotNull WireOut wire) {
wire.write(RollFields.length).int32(length)
.write(RollFields.format).text(format)
.write(RollFields.epoch).int64(epoch);
if (rollTime != null)
wire.write(RollFields.rollTime).time(rollTime);
if (rollTimeZone != null)
wire.write(RollFields.rollTimeZone).text(rollTimeZone.getId());
}
/**
* @return an epoch offset as the number of number of milliseconds since January 1, 1970,
* 00:00:00 GMT
*/
public long epoch() {
return this.epoch;
}
public String format() {
return this.format;
}
int length() {
return length;
}
@Nullable
public LocalTime rollTime() {
return rollTime;
}
@Nullable
public ZoneId rollTimeZone() {
return rollTimeZone;
}
public void length(int length) {
this.length = length;
}
public void format(@Nullable String format) {
this.format = format;
}
public void rollTime(@Nullable LocalTime rollTime) {
this.rollTime = rollTime;
}
public void rollTimeZone(@Nullable ZoneId rollTimeZone) {
this.rollTimeZone = rollTimeZone;
}
public void epoch(long epoch) {
this.epoch = epoch;
}
@Override
public String toString() {<FILL_FUNCTION_BODY>}
enum RollFields implements WireKey {
length, format, epoch, rollTime, rollTimeZone
}
} |
return "SCQRoll{" +
"length=" + length +
", format='" + format + '\'' +
", epoch=" + epoch +
", rollTime=" + rollTime +
", rollTimeZone=" + rollTimeZone +
'}';
| 843 | 72 | 915 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/impl/single/TableDirectoryListing.java | TableDirectoryListing | refresh | class TableDirectoryListing extends AbstractCloseable implements DirectoryListing {
private static final String HIGHEST_CREATED_CYCLE = "listing.highestCycle";
private static final String LOWEST_CREATED_CYCLE = "listing.lowestCycle";
private static final String MOD_COUNT = "listing.modCount";
static final int UNSET_MAX_CYCLE = Integer.MIN_VALUE;
static final int UNSET_MIN_CYCLE = Integer.MAX_VALUE;
static final String INITIAL_MIN_FILENAME = Character.toString(Character.MAX_VALUE);
static final String INITIAL_MAX_FILENAME = Character.toString(Character.MIN_VALUE);
private final TableStore<?> tableStore;
private final Path queuePath;
private final ToIntFunction<String> fileNameToCycleFunction;
private volatile LongValue maxCycleValue;
private volatile LongValue minCycleValue;
private volatile LongValue modCount;
private long lastRefreshTimeMS = 0;
TableDirectoryListing(
final @NotNull TableStore<?> tableStore,
final Path queuePath,
final ToIntFunction<String> fileNameToCycleFunction) {
this.tableStore = tableStore;
this.queuePath = queuePath;
this.fileNameToCycleFunction = fileNameToCycleFunction;
checkReadOnly(tableStore);
singleThreadedCheckDisabled(true);
}
protected void checkReadOnly(@NotNull TableStore<?> tableStore) {
if (tableStore.readOnly()) {
throw new IllegalArgumentException(getClass().getSimpleName() + " should only be used for writable queues");
}
}
@Override
public void init() {
throwExceptionIfClosedInSetter();
tableStore.doWithExclusiveLock(ts -> {
initLongValues();
minCycleValue.compareAndSwapValue(Long.MIN_VALUE, UNSET_MIN_CYCLE);
if (modCount.getVolatileValue() == Long.MIN_VALUE) {
modCount.compareAndSwapValue(Long.MIN_VALUE, 0);
}
return this;
});
}
protected void initLongValues() {
maxCycleValue = tableStore.acquireValueFor(HIGHEST_CREATED_CYCLE);
minCycleValue = tableStore.acquireValueFor(LOWEST_CREATED_CYCLE);
modCount = tableStore.acquireValueFor(MOD_COUNT);
}
@Override
public void refresh(final boolean force) {<FILL_FUNCTION_BODY>}
@Override
public void onFileCreated(final File file, final int cycle) {
onRoll(cycle);
}
@Override
public void onRoll(int cycle) {
minCycleValue.setMinValue(cycle);
maxCycleValue.setMaxValue(cycle);
modCount.addAtomicValue(1);
}
@Override
public long lastRefreshTimeMS() {
return lastRefreshTimeMS;
}
@Override
public int getMaxCreatedCycle() {
return getMaxCycleValue();
}
@Override
public int getMinCreatedCycle() {
return getMinCycleValue();
}
@Override
public long modCount() {
return modCount.getVolatileValue();
}
@Override
public String toString() {
return tableStore.dump(WireType.BINARY_LIGHT);
}
protected void performClose() {
Closeable.closeQuietly(minCycleValue, maxCycleValue, modCount);
}
private int getMaxCycleValue() {
return (int) maxCycleValue.getVolatileValue();
}
private int getMinCycleValue() {
return (int) minCycleValue.getVolatileValue();
}
} |
if (!force) {
return;
}
lastRefreshTimeMS = System.currentTimeMillis();
final long currentMin0 = minCycleValue.getVolatileValue();
final long currentMax0 = maxCycleValue.getVolatileValue();
while (true) {
throwExceptionIfClosed();
tableStore.throwExceptionIfClosed();
Jvm.safepoint();
final long currentMax = maxCycleValue.getVolatileValue();
final String[] fileNamesList = queuePath.toFile().list();
String minFilename = INITIAL_MIN_FILENAME;
String maxFilename = INITIAL_MAX_FILENAME;
if (fileNamesList != null) {
for (String fileName : fileNamesList) {
if (fileName.endsWith(SingleChronicleQueue.SUFFIX)) {
if (minFilename.compareTo(fileName) > 0)
minFilename = fileName;
if (maxFilename.compareTo(fileName) < 0)
maxFilename = fileName;
}
}
}
int min = UNSET_MIN_CYCLE;
if (!INITIAL_MIN_FILENAME.equals(minFilename))
min = fileNameToCycleFunction.applyAsInt(minFilename);
int max = UNSET_MAX_CYCLE;
if (!INITIAL_MAX_FILENAME.equals(maxFilename))
max = fileNameToCycleFunction.applyAsInt(maxFilename);
if (currentMin0 == min && currentMax0 == max) {
modCount.addAtomicValue(1);
return;
}
minCycleValue.setOrderedValue(min);
if (maxCycleValue.compareAndSwapValue(currentMax, max)) {
modCount.addAtomicValue(1);
break;
}
Jvm.nanoPause();
}
| 1,006 | 496 | 1,502 | <methods>public static void assertCloseablesClosed() ,public final void close() ,public net.openhft.chronicle.core.StackTrace createdHere() ,public static void disableCloseableTracing() ,public static void enableCloseableTracing() ,public static void gcAndWaitForCloseablesToClose() ,public boolean isClosed() ,public boolean isClosing() ,public int referenceId() ,public void singleThreadedCheckDisabled(boolean) ,public void singleThreadedCheckReset() ,public void throwExceptionIfClosed() throws net.openhft.chronicle.core.io.ClosedIllegalStateException, net.openhft.chronicle.core.io.ThreadingIllegalStateException,public void throwExceptionIfClosedInSetter() throws net.openhft.chronicle.core.io.ClosedIllegalStateException, net.openhft.chronicle.core.io.ThreadingIllegalStateException,public java.lang.String toString() ,public static void unmonitor(net.openhft.chronicle.core.io.Closeable) ,public static boolean waitForCloseablesToClose(long) ,public void warnAndCloseIfNotClosed() <variables>private static final long CLOSED_OFFSET,protected static final boolean DISABLE_DISCARD_WARNING,private static final int STATE_CLOSED,private static final int STATE_CLOSING,private static final int STATE_NOT_CLOSED,protected static final long WARN_NS,private volatile transient int closed,protected volatile transient net.openhft.chronicle.core.StackTrace closedHere,private final transient net.openhft.chronicle.core.StackTrace createdHere,private final transient net.openhft.chronicle.core.io.AbstractCloseable.Finalizer finalizer,private int referenceId,private transient boolean singleThreadedCheckDisabled,private volatile transient java.lang.Thread usedByThread,private volatile transient net.openhft.chronicle.core.StackTrace usedByThreadHere |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/impl/single/TableDirectoryListingReadOnly.java | TableDirectoryListingReadOnly | init | class TableDirectoryListingReadOnly extends TableDirectoryListing {
TableDirectoryListingReadOnly(final @NotNull TableStore<?> tableStore) {
super(tableStore, null, null);
}
@Override
protected void checkReadOnly(@NotNull TableStore<?> tableStore) {
// no-op
}
@Override
public void init() {<FILL_FUNCTION_BODY>}
@Override
public void refresh(final boolean force) {
// no-op
}
@Override
public void onFileCreated(final File file, final int cycle) {
onRoll(cycle);
}
@Override
public void onRoll(int cycle) {
// no-op
}
} |
throwExceptionIfClosedInSetter();
// it is possible if r/o queue created at same time as r/w queue for longValues to be only half-written
final long timeoutMillis = System.currentTimeMillis() + 500;
while (true) {
try {
initLongValues();
break;
} catch (Exception e) {
if (System.currentTimeMillis() > timeoutMillis)
throw e;
Jvm.pause(1);
}
}
| 192 | 131 | 323 | <methods>public int getMaxCreatedCycle() ,public int getMinCreatedCycle() ,public void init() ,public long lastRefreshTimeMS() ,public long modCount() ,public void onFileCreated(java.io.File, int) ,public void onRoll(int) ,public void refresh(boolean) ,public java.lang.String toString() <variables>private static final java.lang.String HIGHEST_CREATED_CYCLE,static final java.lang.String INITIAL_MAX_FILENAME,static final java.lang.String INITIAL_MIN_FILENAME,private static final java.lang.String LOWEST_CREATED_CYCLE,private static final java.lang.String MOD_COUNT,static final int UNSET_MAX_CYCLE,static final int UNSET_MIN_CYCLE,private final non-sealed ToIntFunction<java.lang.String> fileNameToCycleFunction,private long lastRefreshTimeMS,private volatile net.openhft.chronicle.core.values.LongValue maxCycleValue,private volatile net.openhft.chronicle.core.values.LongValue minCycleValue,private volatile net.openhft.chronicle.core.values.LongValue modCount,private final non-sealed java.nio.file.Path queuePath,private final non-sealed TableStore<?> tableStore |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/impl/single/TableStoreWriteLock.java | TableStoreWriteLock | lockAssertPostConditions | class TableStoreWriteLock extends AbstractTSQueueLock implements WriteLock {
public static final String APPEND_LOCK_KEY = "chronicle.append.lock";
private static final String LOCK_KEY = "chronicle.write.lock";
private final long timeout;
private Thread lockedByThread = null;
private StackTrace lockedHere;
public TableStoreWriteLock(final TableStore<?> tableStore, Supplier<TimingPauser> pauser, Long timeoutMs, final String lockKey) {
super(lockKey, tableStore, pauser);
timeout = timeoutMs;
}
public TableStoreWriteLock(final TableStore<?> tableStore, Supplier<TimingPauser> pauser, Long timeoutMs) {
this(tableStore, pauser, timeoutMs, LOCK_KEY);
}
/**
* Guaranteed to succeed in getting the lock (may involve timeout and recovery) or else throw.
* <p>This is not re-entrant i.e. if you lock and try and lock again it will timeout and recover
*/
@Override
public void lock() {
throwExceptionIfClosed();
assert checkNotAlreadyLocked();
long currentLockValue = 0;
TimingPauser tlPauser = pauser.get();
try {
currentLockValue = lock.getVolatileValue();
while (!lock.compareAndSwapValue(UNLOCKED, PID)) {
currentLockValue = lockGetCurrentLockValue(tlPauser);
}
lockAssertPostConditions();
// success
} catch (TimeoutException e) {
handleTimeoutEx(currentLockValue);
} finally {
tlPauser.reset();
}
}
private long lockGetCurrentLockValue(TimingPauser tlPauser) throws TimeoutException {
if (Thread.currentThread().isInterrupted())
throw new InterruptedRuntimeException("Interrupted for the lock file:" + path);
tlPauser.pause(timeout, TimeUnit.MILLISECONDS);
return lock.getVolatileValue();
}
private void lockAssertPostConditions() {<FILL_FUNCTION_BODY>}
private void handleTimeoutEx(long currentLockValue) {
final String lockedBy = getLockedBy(currentLockValue);
final String warningMsg = lockHandleTimeoutExCreateWarningMessage(lockedBy);
if (forceUnlockOnTimeoutWhen == UnlockMode.NEVER)
throw new UnrecoverableTimeoutException(new IllegalStateException(warningMsg + UNLOCK_MAIN_MSG));
else if (forceUnlockOnTimeoutWhen == UnlockMode.LOCKING_PROCESS_DEAD) {
if (forceUnlockIfProcessIsDead())
lock();
else
throw new UnrecoverableTimeoutException(new IllegalStateException(warningMsg + UNLOCK_MAIN_MSG));
} else {
warn().on(getClass(), warningMsg + UNLOCKING_FORCIBLY_MSG);
forceUnlock(currentLockValue);
lock();
}
}
@NotNull
private String lockHandleTimeoutExCreateWarningMessage(String lockedBy) {
return "Couldn't acquire write lock " +
"after " + timeout + " ms " +
"for the lock file:" + path + ". " +
"Lock was held by " + lockedBy;
}
@NotNull
protected String getLockedBy(long value) {
return value == Long.MIN_VALUE ? "unknown" :
value == PID ? "me"
: Long.toString((int) value);
}
private boolean checkNotAlreadyLocked() {
if (!locked())
return true;
if (lockedByThread == null)
return true;
if (lockedByThread == Thread.currentThread())
throw new AssertionError("Lock is already acquired by current thread and is not reentrant - nested document context?", lockedHere);
return true;
}
@Override
public void unlock() {
throwExceptionIfClosed();
if (!lock.compareAndSwapValue(PID, UNLOCKED)) {
long value = lock.getVolatileValue();
if (value == UNLOCKED)
warn().on(getClass(), "Write lock was already unlocked. For the " +
"lock file:" + path);
else
warn().on(getClass(), "Write lock was locked by someone else! For the " +
"lock file:" + path + " " +
"by PID: " + getLockedBy(value));
}
lockedByThread = null;
lockedHere = null;
}
@Override
public boolean locked() {
throwExceptionIfClosed();
return lock.getVolatileValue(UNLOCKED) != UNLOCKED;
}
/**
* Don't use this - for internal use only
*/
public void forceUnlock() {
throwExceptionIfClosed();
if (locked())
forceUnlock(lockedBy());
}
/**
* Don't use this - for internal use only
* Does not warn when force unlocked
*/
@Deprecated(/* to be removed in x.26. No replacement provided - use forceUnlock */)
public void forceUnlockQuietly() {
lock.setValue(UNLOCKED);
}
} |
//noinspection ConstantConditions,AssertWithSideEffects
assert SKIP_ASSERTIONS ||
((lockedByThread = Thread.currentThread()) != null && (lockedHere = new StackTrace()) != null);
| 1,351 | 58 | 1,409 | <methods>public void <init>(java.lang.String, TableStore<?>, Supplier<net.openhft.chronicle.threads.TimingPauser>) ,public boolean forceUnlockIfProcessIsDead() ,public boolean isLockedByCurrentProcess(java.util.function.LongConsumer) ,public long lockedBy() ,public java.lang.String toString() <variables>protected static final long PID,public static final long UNLOCKED,protected static final java.lang.String UNLOCKING_FORCIBLY_MSG,protected static final java.lang.String UNLOCK_MAIN_MSG,protected final non-sealed net.openhft.chronicle.queue.impl.table.UnlockMode forceUnlockOnTimeoutWhen,protected final non-sealed net.openhft.chronicle.core.values.LongValue lock,private final non-sealed java.lang.String lockKey,protected final non-sealed java.io.File path,protected final non-sealed ThreadLocal<net.openhft.chronicle.threads.TimingPauser> pauser,protected final non-sealed TableStore#RAW tableStore |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/impl/single/ThreadLocalAppender.java | ThreadLocalAppender | acquireThreadLocalAppender | class ThreadLocalAppender {
private ThreadLocalAppender() {
// Intentional no-op
}
/**
* Returns a ExcerptAppender for the given ChronicleQueue that is local to the current Thread.
* <p>
* An Appender can be used to store new excerpts sequentially to the queue.
* <p>
* <b>
* An Appender is <em>NOT thread-safe</em> and, in addition to that, confined to be used <em>by the creating thread only.</em>.
* Sharing an Appender across threads is unsafe and will inevitably lead to errors and unspecified behaviour.
* </b>
* <p>
* This method returns a {@link ThreadLocal} appender, so does not produce any garbage, hence it's safe to simply call
* this method every time an appender is needed.
*
* @return Returns a ExcerptAppender for this ChronicleQueue that is local to the current Thread
* @throws IllegalArgumentException if the queue it is passed is not an instance of {@link SingleChronicleQueue}
*/
public static ExcerptAppender acquireThreadLocalAppender(ChronicleQueue queue) {<FILL_FUNCTION_BODY>}
} |
if (!(queue instanceof SingleChronicleQueue)) {
throw new IllegalArgumentException("acquireThreadLocalAppender only accepts instances of SingleChronicleQueue");
}
SingleChronicleQueue singleChronicleQueue = (SingleChronicleQueue) queue;
return singleChronicleQueue.acquireThreadLocalAppender(singleChronicleQueue);
| 317 | 87 | 404 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/impl/single/namedtailer/IndexUpdaterFactory.java | IndexUpdaterFactory | createIndexUpdater | class IndexUpdaterFactory {
/**
* Create an instance of an {@link IndexUpdater} depending on the values provided.
*/
@Nullable
public static IndexUpdater createIndexUpdater(@Nullable String tailerName, @NotNull SingleChronicleQueue queue) {<FILL_FUNCTION_BODY>}
/**
* An index updater that simply sets the index value on update. No versioning.
*/
public static class StandardIndexUpdater implements IndexUpdater, Closeable {
private final LongValue indexValue;
public StandardIndexUpdater(@NotNull LongValue indexValue) {
this.indexValue = indexValue;
}
@Override
public void close() throws IOException {
closeQuietly(indexValue);
}
@Override
public void update(long index) {
indexValue.setValue(index);
}
@Override
public LongValue index() {
return indexValue;
}
}
/**
* An index updater that increments a version field on every update.
*/
public static class VersionedIndexUpdater implements IndexUpdater, Closeable {
private final TableStoreWriteLock versionIndexLock;
private final LongValue indexValue;
private final LongValue indexVersionValue;
public VersionedIndexUpdater(@NotNull String tailerName,
@NotNull SingleChronicleQueue queue,
@NotNull LongValue indexValue,
@NotNull LongValue indexVersionValue) {
this.versionIndexLock = queue.versionIndexLockForId(tailerName);
this.versionIndexLock.forceUnlockIfProcessIsDead();
this.indexValue = indexValue;
this.indexVersionValue = indexVersionValue;
}
@Override
public void close() throws IOException {
closeQuietly(versionIndexLock, indexValue, indexVersionValue);
}
@Override
public void update(long index) {
try {
versionIndexLock.lock();
indexValue.setVolatileValue(index);
indexVersionValue.addAtomicValue(1);
} finally {
versionIndexLock.unlock();
}
}
@Override
public LongValue index() {
return indexValue;
}
}
} |
if (tailerName == null) {
// A null index updater is used when a plain (unnamed) tailer is in use
// Note this nullness is not ideal and needs to be tackled in a future refactor of StoreTailer
return null;
} else if (tailerName.startsWith(SingleChronicleQueue.REPLICATED_NAMED_TAILER_PREFIX)) {
// Replicated named tailers use an additional version field updated on each index mutation
return new VersionedIndexUpdater(
tailerName,
queue,
queue.indexForId(tailerName),
queue.indexVersionForId(tailerName)
);
} else {
// Normal named tailers use a simple unversioned scheme
return new StandardIndexUpdater(queue.indexForId(tailerName));
}
| 570 | 215 | 785 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/impl/table/AbstractTSQueueLock.java | AbstractTSQueueLock | forceUnlockIfProcessIsDead | class AbstractTSQueueLock extends AbstractCloseable implements Closeable {
protected static final String UNLOCK_MAIN_MSG = ". You can manually unlock with net.openhft.chronicle.queue.main.UnlockMain";
protected static final String UNLOCKING_FORCIBLY_MSG = ". Unlocking forcibly. Note that this feature is designed to recover " +
"if another process died while holding a lock. If the other process is still alive, you may see queue corruption.";
protected static final long PID = getProcessId();
public static final long UNLOCKED = 1L << 63;
protected final UnlockMode forceUnlockOnTimeoutWhen;
protected final LongValue lock;
protected final ThreadLocal<TimingPauser> pauser;
protected final File path;
protected final TableStore tableStore;
private final String lockKey;
public AbstractTSQueueLock(final String lockKey, final TableStore<?> tableStore, final Supplier<TimingPauser> pauserSupplier) {
this.tableStore = tableStore;
this.lock = tableStore.doWithExclusiveLock(ts -> ts.acquireValueFor(lockKey));
this.pauser = ThreadLocal.withInitial(pauserSupplier);
this.path = tableStore.file();
this.lockKey = lockKey;
final boolean dontRecoverLockTimeout = Jvm.getBoolean("queue.dont.recover.lock.timeout");
if (dontRecoverLockTimeout) {
forceUnlockOnTimeoutWhen = UnlockMode.NEVER;
Jvm.warn().on(getClass(), "queue.dont.recover.lock.timeout property is deprecated and will be removed in a future version. " +
"Use queue.force.unlock.mode=NEVER instead");
} else {
forceUnlockOnTimeoutWhen = UnlockMode.valueOf(Jvm.getProperty("queue.force.unlock.mode", UnlockMode.LOCKING_PROCESS_DEAD.name()).toUpperCase());
}
singleThreadedCheckDisabled(true);
}
protected void performClose() {
Closeable.closeQuietly(lock);
}
/**
* will only force unlock if you give it the correct pid
*/
protected void forceUnlock(long value) {
boolean unlocked = lock.compareAndSwapValue(value, UNLOCKED);
Jvm.warn().on(getClass(), "" +
"Forced unlock for the " +
"lock file:" + path + ", " +
"lockKey: " + lockKey + ", " +
"unlocked: " + unlocked,
new StackTrace("Forced unlock"));
}
public boolean isLockedByCurrentProcess(LongConsumer notCurrentProcessConsumer) {
final long pid = this.lock.getVolatileValue();
// mask off thread (if used)
int realPid = (int) pid;
if (realPid == PID)
return true;
notCurrentProcessConsumer.accept(pid);
return false;
}
/**
* forces an unlock only if the process that currently holds the table store lock is no-longer running
*
* @return {@code true} if the lock was already unlocked, It will not release the lock if it is held by this process
* or the process that was holding the lock is no longer running (and we were able to unlock).
* Otherwise {@code false} is returned if the lock is held by this process or another live process.
*/
public boolean forceUnlockIfProcessIsDead() {<FILL_FUNCTION_BODY>}
/**
* @return the pid that had the locked or the returns UNLOCKED if it is not locked
*/
public long lockedBy() {
return lock.getVolatileValue();
}
@Override
public String toString() {
return this.getClass().getSimpleName() + "{" +
"lock=" + lock +
", path=" + path +
", lockKey='" + lockKey + '\'' +
'}';
}
} |
long pid;
for (; ; ) {
pid = this.lock.getVolatileValue();
if (pid == UNLOCKED)
return true;
// mask off thread (if used)
int realPid = (int) pid;
if (!Jvm.isProcessAlive(realPid)) {
Jvm.warn().on(this.getClass(), format("Forced unlocking `%s` in lock file:%s, as this was locked by: %d which is now dead",
lockKey, this.path, realPid));
if (lock.compareAndSwapValue(pid, UNLOCKED))
return true;
} else
break;
}
if (Jvm.isDebugEnabled(this.getClass()))
// don't make this a WARN as this method should only unlock if process is dead or current process.
Jvm.debug().on(this.getClass(), format("Unable to release the lock=%s in the table store file=%s " +
"as it is being held by pid=%d, and this process is still running.", lockKey, path, pid));
return false;
| 1,012 | 283 | 1,295 | <methods>public static void assertCloseablesClosed() ,public final void close() ,public net.openhft.chronicle.core.StackTrace createdHere() ,public static void disableCloseableTracing() ,public static void enableCloseableTracing() ,public static void gcAndWaitForCloseablesToClose() ,public boolean isClosed() ,public boolean isClosing() ,public int referenceId() ,public void singleThreadedCheckDisabled(boolean) ,public void singleThreadedCheckReset() ,public void throwExceptionIfClosed() throws net.openhft.chronicle.core.io.ClosedIllegalStateException, net.openhft.chronicle.core.io.ThreadingIllegalStateException,public void throwExceptionIfClosedInSetter() throws net.openhft.chronicle.core.io.ClosedIllegalStateException, net.openhft.chronicle.core.io.ThreadingIllegalStateException,public java.lang.String toString() ,public static void unmonitor(net.openhft.chronicle.core.io.Closeable) ,public static boolean waitForCloseablesToClose(long) ,public void warnAndCloseIfNotClosed() <variables>private static final long CLOSED_OFFSET,protected static final boolean DISABLE_DISCARD_WARNING,private static final int STATE_CLOSED,private static final int STATE_CLOSING,private static final int STATE_NOT_CLOSED,protected static final long WARN_NS,private volatile transient int closed,protected volatile transient net.openhft.chronicle.core.StackTrace closedHere,private final transient net.openhft.chronicle.core.StackTrace createdHere,private final transient net.openhft.chronicle.core.io.AbstractCloseable.Finalizer finalizer,private int referenceId,private transient boolean singleThreadedCheckDisabled,private volatile transient java.lang.Thread usedByThread,private volatile transient net.openhft.chronicle.core.StackTrace usedByThreadHere |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/impl/table/ReadonlyTableStore.java | ReadonlyTableStore | bytes | class ReadonlyTableStore<T extends Metadata> extends AbstractCloseable implements TableStore<T> {
private final T metadata;
public ReadonlyTableStore(T metadata) {
this.metadata = metadata;
singleThreadedCheckDisabled(true);
}
@Override
public T metadata() {
return metadata;
}
@Override
protected void performClose() {
}
@Override
public LongValue acquireValueFor(CharSequence key, long defaultValue) {
throw new UnsupportedOperationException("Read only");
}
@Override
public <T> void forEachKey(T accumulator, TableStoreIterator<T> tsIterator) {
throw new UnsupportedOperationException("Read only");
}
@Override
public <R> R doWithExclusiveLock(Function<TableStore<T>, ? extends R> code) {
UnsupportedOperationException read_only = new UnsupportedOperationException("Read only");
throw read_only;
}
@Nullable
@Override
public File file() {
throwExceptionIfClosed();
UnsupportedOperationException read_only = new UnsupportedOperationException("Read only");
throw read_only;
}
@NotNull
@Override
public MappedBytes bytes() {<FILL_FUNCTION_BODY>}
@NotNull
@Override
public String dump() {
return metadata.toString();
}
@Override
public String dump(WireType wireType) {
return metadata.toString();
}
@NotNull
@Override
public String shortDump() {
return metadata.toString();
}
@Override
public void writeMarshallable(@NotNull WireOut wire) {
UnsupportedOperationException read_only = new UnsupportedOperationException("Read only");
throw read_only;
}
@Override
public boolean readOnly() {
return true;
}
} |
throwExceptionIfClosed();
UnsupportedOperationException read_only = new UnsupportedOperationException("Read only");
throw read_only;
| 487 | 38 | 525 | <methods>public static void assertCloseablesClosed() ,public final void close() ,public net.openhft.chronicle.core.StackTrace createdHere() ,public static void disableCloseableTracing() ,public static void enableCloseableTracing() ,public static void gcAndWaitForCloseablesToClose() ,public boolean isClosed() ,public boolean isClosing() ,public int referenceId() ,public void singleThreadedCheckDisabled(boolean) ,public void singleThreadedCheckReset() ,public void throwExceptionIfClosed() throws net.openhft.chronicle.core.io.ClosedIllegalStateException, net.openhft.chronicle.core.io.ThreadingIllegalStateException,public void throwExceptionIfClosedInSetter() throws net.openhft.chronicle.core.io.ClosedIllegalStateException, net.openhft.chronicle.core.io.ThreadingIllegalStateException,public java.lang.String toString() ,public static void unmonitor(net.openhft.chronicle.core.io.Closeable) ,public static boolean waitForCloseablesToClose(long) ,public void warnAndCloseIfNotClosed() <variables>private static final long CLOSED_OFFSET,protected static final boolean DISABLE_DISCARD_WARNING,private static final int STATE_CLOSED,private static final int STATE_CLOSING,private static final int STATE_NOT_CLOSED,protected static final long WARN_NS,private volatile transient int closed,protected volatile transient net.openhft.chronicle.core.StackTrace closedHere,private final transient net.openhft.chronicle.core.StackTrace createdHere,private final transient net.openhft.chronicle.core.io.AbstractCloseable.Finalizer finalizer,private int referenceId,private transient boolean singleThreadedCheckDisabled,private volatile transient java.lang.Thread usedByThread,private volatile transient net.openhft.chronicle.core.StackTrace usedByThreadHere |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/impl/table/SingleTableBuilder.java | SingleTableBuilder | builder | class SingleTableBuilder<T extends Metadata> implements Builder<TableStore<T>> {
static {
CLASS_ALIASES.addAlias(WireType.class);
CLASS_ALIASES.addAlias(SingleTableStore.class, "STStore");
}
@NotNull
private final File file;
@NotNull
private final T metadata;
private WireType wireType;
private boolean readOnly;
private SingleTableBuilder(@NotNull File path, @NotNull T metadata) {
this.file = path;
this.metadata = metadata;
}
@NotNull
public static <T extends Metadata> SingleTableBuilder<T> builder(@NotNull File file, @NotNull WireType wireType, @NotNull T metadata) {<FILL_FUNCTION_BODY>}
@NotNull
public static <T extends Metadata> SingleTableBuilder<T> binary(@NotNull Path path, @NotNull T metadata) {
return binary(path.toFile(), metadata);
}
@NotNull
public static <T extends Metadata> SingleTableBuilder<T> binary(@NotNull String file, @NotNull T metadata) {
return binary(new File(file), metadata);
}
@NotNull
public static <T extends Metadata> SingleTableBuilder<T> binary(@NotNull File basePathFile, @NotNull T metadata) {
return builder(basePathFile, WireType.BINARY_LIGHT, metadata);
}
// *************************************************************************
//
// *************************************************************************
@NotNull
public TableStore<T> build() {
if (readOnly) {
if (!file.exists())
throw new IORuntimeException("Metadata file not found in readOnly mode");
// Wait a short time for the file to be initialized
TimingPauser pauser = Pauser.balanced();
try {
while (file.length() < OS.mapAlignment()) {
pauser.pause(1, TimeUnit.SECONDS);
}
} catch (TimeoutException e) {
throw new IORuntimeException("Metadata file found in readOnly mode, but not initialized yet");
}
}
MappedBytes bytes = null;
try {
if (!readOnly && file.createNewFile() && !file.canWrite()) {
throw new IllegalStateException("Cannot write to tablestore file " + file);
}
bytes = MappedBytes.mappedBytes(file, OS.SAFE_PAGE_SIZE, OS.SAFE_PAGE_SIZE, readOnly);
// these MappedBytes are shared, but the assumption is they shouldn't grow. Supports 2K entries.
bytes.singleThreadedCheckDisabled(true);
// eagerly initialize backing MappedFile page - otherwise wire.writeFirstHeader() will try to lock the file
// to allocate the first byte store and that will cause lock overlap
bytes.readVolatileInt(0);
Wire wire = wireType.apply(bytes);
if (readOnly)
return SingleTableStore.doWithSharedLock(file, v -> {
try {
return readTableStore(wire);
} catch (IOException ex) {
throw Jvm.rethrow(ex);
}
}, () -> null);
else {
MappedBytes finalBytes = bytes;
return SingleTableStore.doWithExclusiveLock(file, v -> {
try {
if (wire.writeFirstHeader()) {
return writeTableStore(finalBytes, wire);
} else {
return readTableStore(wire);
}
} catch (IOException ex) {
throw Jvm.rethrow(ex);
}
}, () -> null);
}
} catch (IOException e) {
throw new IORuntimeException("file=" + file.getAbsolutePath(), e);
} finally {
if (bytes != null)
bytes.singleThreadedCheckReset();
}
}
@NotNull
private TableStore<T> readTableStore(Wire wire) throws StreamCorruptedException {
wire.readFirstHeader();
final ValueIn valueIn = readTableStoreValue(wire);
@NotNull TableStore<T> existing = Objects.requireNonNull(valueIn.typedMarshallable());
metadata.overrideFrom(existing.metadata());
return existing;
}
private ValueIn readTableStoreValue(@NotNull Wire wire) throws StreamCorruptedException {
try (ScopedResource<StringBuilder> stlSb = Wires.acquireStringBuilderScoped()) {
StringBuilder name = stlSb.get();
ValueIn valueIn = wire.readEventName(name);
if (!StringUtils.isEqual(name, MetaDataKeys.header.name())) {
throw new StreamCorruptedException("The first message should be the header, was " + name);
}
return valueIn;
}
}
@NotNull
private TableStore<T> writeTableStore(MappedBytes bytes, Wire wire) {
TableStore<T> store = new SingleTableStore<>(wireType, bytes, metadata);
wire.writeEventName("header").object(store);
wire.updateFirstHeader();
return store;
}
@NotNull
@Override
@Deprecated(/* to be removed in x.26*/)
public SingleTableBuilder<T> clone() {
try {
@SuppressWarnings("unchecked")
SingleTableBuilder<T> clone = (SingleTableBuilder) super.clone();
return clone;
} catch (CloneNotSupportedException e) {
throw new AssertionError(e);
}
}
@NotNull
public File file() {
return file;
}
public WireType wireType() {
return wireType;
}
public SingleTableBuilder<T> wireType(WireType wireType) {
this.wireType = wireType;
return this;
}
public boolean readOnly() {
return readOnly;
}
public SingleTableBuilder<T> readOnly(boolean readOnly) {
this.readOnly = readOnly;
return this;
}
} |
if (file.isDirectory()) {
throw new IllegalArgumentException("Tables should be configured with the table file, not a directory. Actual file used: " + file.getParentFile());
}
if (!file.getName().endsWith(SingleTableStore.SUFFIX)) {
throw new IllegalArgumentException("Invalid file type: " + file.getName());
}
return new SingleTableBuilder<>(file, metadata).wireType(wireType);
| 1,528 | 110 | 1,638 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/internal/domestic/QueueOffsetSpec.java | QueueOffsetSpec | apply | class QueueOffsetSpec {
private static final String TOKEN_DELIMITER = ";";
private final Type type;
private final String[] spec;
private QueueOffsetSpec(final Type type, final String[] spec) {
this.type = type;
this.spec = spec;
}
public static QueueOffsetSpec ofEpoch(final long epoch) {
return new QueueOffsetSpec(Type.EPOCH, new String[]{Long.toString(epoch)});
}
public static QueueOffsetSpec ofRollTime(@NotNull final LocalTime time, @NotNull final ZoneId zoneId) {
return new QueueOffsetSpec(Type.ROLL_TIME, new String[]{time.toString(), zoneId.toString()});
}
public static QueueOffsetSpec ofNone() {
return new QueueOffsetSpec(Type.NONE, new String[]{});
}
public static QueueOffsetSpec parse(@NotNull final String definition) {
final String[] tokens = definition.split(TOKEN_DELIMITER);
final Type type = Type.valueOf(tokens[0]);
switch (type) {
case EPOCH:
expectArgs(tokens, 2);
return new QueueOffsetSpec(type, new String[]{tokens[1]});
case ROLL_TIME:
expectArgs(tokens, 3);
return new QueueOffsetSpec(type, new String[]{tokens[1], tokens[2]});
case NONE:
expectArgs(tokens, 1);
return new QueueOffsetSpec(type, new String[]{});
default:
throw new IllegalArgumentException("Unknown type: " + type);
}
}
public static String formatEpochOffset(final long epochOffset) {
return String.format("%s;%s", Type.EPOCH.name(), epochOffset);
}
public static String formatRollTime(final LocalTime time, final ZoneId zoneId) {
return String.format("%s;%s;%s", Type.ROLL_TIME.name(), time.toString(), zoneId.toString());
}
public static String formatNone() {
return Type.NONE.name();
}
private static ZoneId toZoneId(final String zoneId) {
return ZoneId.of(zoneId);
}
private static LocalTime toLocalTime(final String timestamp) {
return LocalTime.parse(timestamp);
}
private static void expectArgs(final String[] tokens, final int expectedLength) {
if (tokens.length != expectedLength) {
throw new IllegalArgumentException("Expected " + expectedLength + " tokens in " + Arrays.toString(tokens));
}
}
public void apply(final SingleChronicleQueueBuilder builder) {<FILL_FUNCTION_BODY>}
public String format() {
return type.name() + TOKEN_DELIMITER + type.argFormatter.apply(spec);
}
public void validate() {
switch (type) {
case EPOCH:
Long.parseLong(spec[0]);
break;
case ROLL_TIME:
toLocalTime(spec[0]);
toZoneId(spec[1]);
break;
case NONE:
break;
default:
throw new IllegalArgumentException("Unknown type: " + type);
}
}
public enum Type {
EPOCH(args -> args[0]),
ROLL_TIME(args -> args[0] + TOKEN_DELIMITER + args[1]),
NONE(args -> "");
private final Function<String[], String> argFormatter;
Type(final Function<String[], String> argFormatter) {
this.argFormatter = argFormatter;
}
}
} |
switch (type) {
case EPOCH:
builder.epoch(Long.parseLong(spec[0]));
break;
case ROLL_TIME:
builder.rollTime(toLocalTime(spec[0]), toZoneId(spec[1]));
break;
case NONE:
break;
default:
throw new IllegalArgumentException("Unknown type: " + type);
}
| 951 | 106 | 1,057 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/internal/main/InternalBenchmarkMain.java | InternalBenchmarkMain | benchmark | class InternalBenchmarkMain {
static volatile boolean running = true;
static int throughput = Integer.getInteger("throughput", 250); // MB/s
static int runtime = Integer.getInteger("runtime", 300); // seconds
static String basePath = System.getProperty("path", OS.TMP);
static volatile long readerLoopTime = 0;
static volatile long readerEndLoopTime = 0;
static int counter = 0;
static {
System.setProperty("jvm.safepoint.enabled", "true");
}
public static void main(String[] args) {
System.out.println(
"-Dthroughput=" + throughput
+ " -Druntime=" + runtime
+ " -Dpath=" + basePath);
MappedFile.warmup();
System.out.println("Warming up");
benchmark(128);
System.out.println("Warmed up");
for (int size = 64; size <= 16 << 20; size *= 4) {
benchmark(size);
}
}
static void benchmark(int messageSize) {<FILL_FUNCTION_BODY>}
private static void runInner(Histogram transportTime, Histogram readTime, ExcerptTailer tailer) {
Jvm.safepoint();
/*if (tailer.peekDocument()) {
if (counter++ < 1000) {
Jvm.safepoint();
return;
}
}*/
if (counter > 0)
Jvm.safepoint();
else
Jvm.safepoint();
counter = 0;
try (DocumentContext dc = tailer.readingDocument(false)) {
Jvm.safepoint();
if (!dc.isPresent()) {
return;
}
long transport = System.nanoTime();
Jvm.safepoint();
Wire wire = dc.wire();
Bytes<?> bytes = wire.bytes();
long start = readMessage(bytes);
long end = System.nanoTime();
transportTime.sample((double) (transport - start));
readTime.sample((double) (end - transport));
}
Jvm.safepoint();
}
@NotNull
private static ChronicleQueue createQueue(String path) {
return ChronicleQueue.singleBuilder(path)
.blockSize(1 << 30)
.pauserSupplier(Pauser::timedBusy)
.build();
}
private static long readMessage(Bytes<?> bytes) {
Jvm.safepoint();
long start = bytes.readLong();
long rp = bytes.readPosition();
long rl = bytes.readLimit();
long addr = bytes.addressForRead(rp);
long addrEnd = bytes.addressForRead(rl);
Memory memory = OS.memory();
for (addr += 8; addr + 7 < addrEnd; addr += 8)
memory.readLong(addr);
Jvm.safepoint();
return start;
}
private static void writeMessage(Wire wire, int messageSize) {
Bytes<?> bytes = wire.bytes();
long wp = bytes.writePosition();
long addr = bytes.addressForWrite(wp);
Memory memory = OS.memory();
for (int i = 0; i < messageSize; i += 16) {
memory.writeLong(addr + i, 0L);
memory.writeLong(addr + i + 8, 0L);
}
bytes.writeSkip(messageSize);
bytes.writeLong(wp, System.nanoTime());
}
} |
Histogram writeTime = new Histogram(32, 7);
Histogram transportTime = new Histogram(32, 7);
Histogram readTime = new Histogram(32, 7);
String path = basePath + "/test-q-" + messageSize;
ChronicleQueue queue = createQueue(path);
// Pretoucher will only work with Queue Enterprise in the path
Thread pretoucher = new Thread(() -> {
try (ExcerptAppender appender = queue.createAppender()) {
Thread thread = Thread.currentThread();
while (!thread.isInterrupted()) {
appender.pretouch();
Jvm.pause(10);
}
}
});
pretoucher.setDaemon(true);
pretoucher.start();
Histogram loopTime = new Histogram();
Thread reader = new Thread(() -> {
// try (ChronicleQueue queue2 = createQueue(path))
ExcerptTailer tailer = queue.createTailer().toEnd();
long endLoop = System.nanoTime();
while (running) {
loopTime.sample((double) (System.nanoTime() - endLoop));
Jvm.safepoint();
// readerLoopTime = System.nanoTime();
// if (readerLoopTime - readerEndLoopTime > 1000)
// System.out.println("r " + (readerLoopTime - readerEndLoopTime));
// try {
runInner(transportTime, readTime, tailer);
runInner(transportTime, readTime, tailer);
runInner(transportTime, readTime, tailer);
runInner(transportTime, readTime, tailer);
// } finally {
// readerEndLoopTime = System.nanoTime();
// }
Jvm.safepoint();
endLoop = System.nanoTime();
}
});
reader.start();
Jvm.pause(250); // give the reader time to start
long next = System.nanoTime();
long end = (long) (next + runtime * 1e9);
ExcerptAppender appender = queue.createAppender();
while (end > System.nanoTime()) {
long start = System.nanoTime();
try (DocumentContext dc = appender.writingDocument(false)) {
writeMessage(dc.wire(), messageSize);
}
long written = System.nanoTime();
long time = written - start;
// System.out.println(time);
writeTime.sample(time);
long diff = writeTime.totalCount() - readTime.totalCount();
Thread.yield();
if (diff >= 200) {
// long rlt = readerLoopTime;
// long delay = System.nanoTime() - rlt;
System.out.println("diff=" + diff /* +" delay= " + delay*/);
StringBuilder sb = new StringBuilder();
sb.append("Reader: profile of the thread");
Jvm.trimStackTrace(sb, reader.getStackTrace());
System.out.println(sb);
}
next += messageSize * 1e9 / (throughput * 1e6);
long delay = next - System.nanoTime();
if (delay > 0)
LockSupport.parkNanos(delay);
}
while (readTime.totalCount() < writeTime.totalCount())
Jvm.pause(50);
pretoucher.interrupt();
reader.interrupt();
running = false;
// monitor.interrupt();
System.out.println("Loop times " + loopTime.toMicrosFormat());
System.out.println("messageSize " + messageSize);
System.out.println("messages " + writeTime.totalCount());
System.out.println("write histogram: " + writeTime.toMicrosFormat());
System.out.println("transport histogram: " + transportTime.toMicrosFormat());
System.out.println("read histogram: " + readTime.toMicrosFormat());
IOTools.deleteDirWithFiles(path, 2);
Jvm.pause(1000);
| 933 | 1,060 | 1,993 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/internal/main/InternalDumpMain.java | InternalDumpMain | dump | class InternalDumpMain {
private static final String FILE = System.getProperty("file");
private static final boolean SKIP_TABLE_STORE = Jvm.getBoolean("skipTableStoreDump");
private static final boolean UNALIGNED = Jvm.getBoolean("dumpUnaligned");
private static final int LENGTH = ", 0".length();
static {
SingleChronicleQueueBuilder.addAliases();
}
public static void main(String[] args) throws FileNotFoundException {
dump(args[0]);
}
public static void dump(@NotNull String path) throws FileNotFoundException {
File path2 = new File(path);
PrintStream out = FILE == null ? System.out : new PrintStream(FILE);
long upperLimit = Long.MAX_VALUE;
dump(path2, out, upperLimit);
}
public static void dump(@NotNull File path, @NotNull PrintStream out, long upperLimit) {<FILL_FUNCTION_BODY>}
private static void dumpFile(@NotNull File file, @NotNull PrintStream out, long upperLimit) {
Bytes<ByteBuffer> buffer = Bytes.elasticByteBuffer();
try (MappedBytes bytes = MappedBytes.mappedBytes(file, 4 << 20, OS.pageSize(), !OS.isWindows())) {
bytes.readLimit(bytes.realCapacity());
StringBuilder sb = new StringBuilder();
WireDumper dumper = WireDumper.of(bytes, !UNALIGNED);
while (bytes.readRemaining() >= 4) {
sb.setLength(0);
boolean last = dumper.dumpOne(sb, buffer);
if (sb.indexOf("\nindex2index:") != -1 || sb.indexOf("\nindex:") != -1) {
// truncate trailing zeros
if (sb.indexOf(", 0\n]\n") == sb.length() - 6) {
int i = indexOfLastZero(sb);
if (i < sb.length())
sb.setLength(i - 5);
sb.append(" # truncated trailing zeros\n]");
}
}
out.println(sb);
if (last)
break;
if (bytes.readPosition() > upperLimit) {
out.println("# limit reached.");
return;
}
}
} catch (IOException ioe) {
err.println("Failed to read " + file + " " + ioe);
} finally {
buffer.releaseLast();
}
}
private static int indexOfLastZero(@NotNull CharSequence str) {
int i = str.length() - 3;
do {
i -= LENGTH;
CharSequence charSequence = str.subSequence(i, i + 3);
if (!", 0".contentEquals(charSequence))
return i + LENGTH;
} while (i > 3);
return 0;
}
} |
if (path.isDirectory()) {
final FilenameFilter filter =
SKIP_TABLE_STORE
? (d, n) -> n.endsWith(SingleChronicleQueue.SUFFIX)
: (d, n) -> n.endsWith(SingleChronicleQueue.SUFFIX) || n.endsWith(SingleTableStore.SUFFIX);
File[] files = path.listFiles(filter);
if (files == null) {
err.println("Directory not found " + path);
System.exit(1);
}
Arrays.sort(files);
for (File file : files) {
out.println("## " + file);
dumpFile(file, out, upperLimit);
}
} else if (path.getName().endsWith(SingleChronicleQueue.SUFFIX) || path.getName().endsWith(SingleTableStore.SUFFIX)) {
dumpFile(path, out, upperLimit);
}
| 740 | 243 | 983 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/internal/main/InternalPingPongMain.java | InternalPingPongMain | pingPong | class InternalPingPongMain {
// static int throughput = Integer.getInteger("throughput", 250); // MB/s
static int runtime = Integer.getInteger("runtime", 30); // seconds
static String basePath = System.getProperty("path", OS.TMP);
static AtomicLong writeTime = new AtomicLong();
static AtomicInteger writeCount = new AtomicInteger();
static AtomicInteger readCount = new AtomicInteger();
static AtomicBoolean running = new AtomicBoolean(true);
static {
System.setProperty("jvm.safepoint.enabled", "true");
}
public static void main(String[] args) {
System.out.println(
// "-Dthroughput=" + throughput
" -Druntime=" + runtime
+ " -Dpath=" + basePath);
MappedFile.warmup();
pingPong(64);
}
static void pingPong(int size) {<FILL_FUNCTION_BODY>}
@NotNull
private static ChronicleQueue createQueue(String path) {
return ChronicleQueue.single(path);
}
} |
String path = InternalPingPongMain.basePath + "/test-q-" + Time.uniqueId();
Histogram readDelay = new Histogram();
Histogram readDelay2 = new Histogram();
try (ChronicleQueue queue = createQueue(path)) {
Thread reader = new Thread(() -> {
ExcerptTailer tailer = queue.createTailer();
while (running.get()) {
//noinspection StatementWithEmptyBody
while (readCount.get() == writeCount.get()) ;
long wakeTime = System.nanoTime();
while (running.get()) {
try (DocumentContext dc = tailer.readingDocument(true)) {
if (!dc.isPresent())
continue;
}
break;
}
final long delay = wakeTime - writeTime.get();
final long time = System.nanoTime() - wakeTime;
readDelay2.sample(time);
readDelay.sample(delay);
if (time + delay > 20_000)
System.out.println("td " + delay + " + " + time);
if (readCount.get() == 100000) {
System.out.println("reset");
readDelay.reset();
readDelay2.reset();
}
readCount.incrementAndGet();
}
});
reader.setDaemon(true);
reader.start();
Jvm.pause(100);
final long finish = System.currentTimeMillis() + runtime * 1000L;
final ExcerptAppender appender = queue.createAppender();
while (System.currentTimeMillis() < finish) {
if (readCount.get() < writeCount.get()) {
Thread.yield();
continue;
}
try (DocumentContext dc = appender.writingDocument(false)) {
dc.wire().bytes().writeSkip(size);
}
writeCount.incrementAndGet();
writeTime.set(System.nanoTime());
}
running.set(false);
}
System.out.println("read delay: " + readDelay.toMicrosFormat());
System.out.println("read delay2: " + readDelay2.toMicrosFormat());
IOTools.deleteDirWithFiles(path, 2);
| 296 | 589 | 885 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/internal/main/InternalRemovableRollFileCandidatesMain.java | InternalRemovableRollFileCandidatesMain | main | class InternalRemovableRollFileCandidatesMain {
/**
* Produces a list of removable roll file candidates and prints
* their absolute path to standard out row-by-row.
*
* @param args the directory. If no directory is given, "." is assumed
*/
public static void main(String[] args) {<FILL_FUNCTION_BODY>}
} |
final File dir;
if (args.length == 0) {
dir = new File(".");
} else {
dir = new File(args[0]);
}
FileUtil.removableRollFileCandidates(dir)
.map(File::getAbsolutePath)
.forEach(System.out::println);
| 98 | 87 | 185 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/internal/main/InternalUnlockMain.java | InternalUnlockMain | unlock | class InternalUnlockMain {
static {
SingleChronicleQueueBuilder.addAliases();
}
public static void main(String[] args) {
unlock(args[0]);
}
private static void unlock(@NotNull String dir) {<FILL_FUNCTION_BODY>}
} |
File path = new File(dir);
if (!path.isDirectory()) {
System.err.println("Path argument must be a queue directory");
System.exit(1);
}
File storeFilePath = new File(path, QUEUE_METADATA_FILE);
if (!storeFilePath.exists()) {
System.err.println("Metadata file not found, nothing to unlock");
System.exit(1);
}
final TableStore<?> store = SingleTableBuilder.binary(storeFilePath, Metadata.NoMeta.INSTANCE).readOnly(false).build();
// appender lock
(new TableStoreWriteLock(store, BusyTimedPauser::new, 0L, TableStoreWriteLock.APPEND_LOCK_KEY)).forceUnlock();
// write lock
(new TableStoreWriteLock(store, BusyTimedPauser::new, 0L)).forceUnlock();
System.out.println("Done");
| 78 | 245 | 323 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/internal/reader/InternalDummyMethodReaderQueueEntryHandler.java | InternalDummyMethodReaderQueueEntryHandler | accept | class InternalDummyMethodReaderQueueEntryHandler implements QueueEntryHandler {
private final Bytes<?> textConversionTarget = Bytes.allocateElasticOnHeap();
private final WireType wireType;
public InternalDummyMethodReaderQueueEntryHandler(@NotNull WireType wireType) {
this.wireType = requireNonNull(wireType);
}
@Override
public void accept(final WireIn wireIn, final Consumer<String> messageHandler) {<FILL_FUNCTION_BODY>}
@Override
public void close() {
textConversionTarget.releaseLast();
}
} |
long elementCount = 0;
while (wireIn.hasMore()) {
new BinaryWire(wireIn.bytes()).copyOne(wireType.apply(textConversionTarget));
elementCount++;
if ((elementCount & 1) == 0) {
messageHandler.accept(textConversionTarget.toString());
textConversionTarget.clear();
}
}
| 156 | 98 | 254 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/internal/reader/InternalMessageToTextQueueEntryHandler.java | InternalMessageToTextQueueEntryHandler | accept | class InternalMessageToTextQueueEntryHandler implements QueueEntryHandler {
private final Bytes<?> textConversionTarget = Bytes.allocateElasticOnHeap();
private final WireType wireType;
public InternalMessageToTextQueueEntryHandler(WireType wireType) {
this.wireType = requireNonNull(wireType);
}
private static boolean isBinaryFormat(final byte dataFormatIndicator) {
return dataFormatIndicator < 0;
}
@Override
public void accept(final WireIn wireIn, final Consumer<String> messageHandler) {<FILL_FUNCTION_BODY>}
@Override
public void close() {
textConversionTarget.releaseLast();
}
} |
final Bytes<?> serialisedMessage = wireIn.bytes();
final byte dataFormatIndicator = serialisedMessage.readByte(serialisedMessage.readPosition());
String text;
if (isBinaryFormat(dataFormatIndicator)) {
textConversionTarget.clear();
final BinaryWire binaryWire = new BinaryWire(serialisedMessage);
binaryWire.copyTo(wireType.apply(textConversionTarget));
text = textConversionTarget.toString();
} else {
text = serialisedMessage.toString();
}
messageHandler.accept(text);
| 186 | 153 | 339 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/internal/reader/MessageCountingMessageConsumer.java | MessageCountingMessageConsumer | consume | class MessageCountingMessageConsumer implements MessageConsumer {
private final long matchLimit;
private final MessageConsumer wrappedConsumer;
private long matches = 0;
/**
* Constructor
*
* @param matchLimit The limit used to determine {@link #matchLimitReached()}
* @param wrappedConsumer The downstream consumer to pass messages to
*/
public MessageCountingMessageConsumer(long matchLimit, MessageConsumer wrappedConsumer) {
this.matchLimit = matchLimit;
this.wrappedConsumer = wrappedConsumer;
}
@Override
public boolean consume(long index, String message) {<FILL_FUNCTION_BODY>}
public boolean matchLimitReached() {
return matchLimit > 0 && matches >= matchLimit;
}
} |
final boolean consume = wrappedConsumer.consume(index, message);
if (consume) {
matches++;
}
return consume;
| 190 | 40 | 230 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/internal/reader/PatternFilterMessageConsumer.java | PatternFilterMessageConsumer | consume | class PatternFilterMessageConsumer implements MessageConsumer {
private final List<Pattern> patterns;
private final boolean shouldBePresent;
private final MessageConsumer nextMessageConsumer;
/**
* Constructor
*
* @param patterns The list of patterns to match against
* @param shouldBePresent true if we require all the patterns to match, false if we require none of the patterns to match
* @param nextMessageConsumer The next message consumer in line, messages that pass the filter will be passed to it
*/
public PatternFilterMessageConsumer(List<Pattern> patterns, boolean shouldBePresent, MessageConsumer nextMessageConsumer) {
this.patterns = patterns;
this.shouldBePresent = shouldBePresent;
this.nextMessageConsumer = nextMessageConsumer;
}
@Override
public boolean consume(long index, String message) {<FILL_FUNCTION_BODY>}
} |
for (Pattern pattern : patterns) {
if (shouldBePresent != pattern.matcher(message).find()) {
return false;
}
}
return nextMessageConsumer.consume(index, message);
| 219 | 57 | 276 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/internal/reader/queueentryreaders/AbstractTailerPollingQueueEntryReader.java | AbstractTailerPollingQueueEntryReader | read | class AbstractTailerPollingQueueEntryReader implements QueueEntryReader {
private final ExcerptTailer tailer;
private final Function<ExcerptTailer, DocumentContext> pollMethod;
protected AbstractTailerPollingQueueEntryReader(ExcerptTailer tailer, Function<ExcerptTailer, DocumentContext> pollMethod) {
this.tailer = tailer;
this.pollMethod = pollMethod;
}
@Override
public final boolean read() {<FILL_FUNCTION_BODY>}
protected abstract void doRead(DocumentContext documentContext);
} |
try (DocumentContext dc = pollMethod.apply(tailer)) {
if (!dc.isPresent()) {
return false;
}
doRead(dc);
return true;
}
| 154 | 54 | 208 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/internal/reader/queueentryreaders/MethodReaderQueueEntryReader.java | MethodReaderQueueEntryReader | read | class MethodReaderQueueEntryReader implements QueueEntryReader {
private final ExcerptTailer tailer;
private final MessageConsumer messageConsumer;
private final MethodReader methodReader;
private final Bytes<ByteBuffer> bytes;
public MethodReaderQueueEntryReader(ExcerptTailer tailer, MessageConsumer messageConsumer, WireType wireType, Class<?> methodReaderInterface, boolean showMessageHistory) {
this.tailer = tailer;
this.messageConsumer = messageConsumer;
bytes = Bytes.elasticHeapByteBuffer(256);
Wire wire = wireType.apply(bytes);
if (wire instanceof TextWire)
((TextWire) wire).useTextDocuments();
MethodWriterBuilder<?> mwb = wire.methodWriterBuilder(methodReaderInterface);
if (showMessageHistory)
mwb.updateInterceptor((methodName, t) -> {
MessageHistory messageHistory = MessageHistory.get();
// this is an attempt to recognise that no MH was read and instead the method reader called reset(...) on it
if (messageHistory.sources() != 1 || messageHistory.timings() != 1)
bytes.append(messageHistory + System.lineSeparator());
return true;
});
methodReader = tailer.methodReader(mwb.build());
}
@Override
public boolean read() {<FILL_FUNCTION_BODY>}
} |
if (!methodReader.readOne()) {
return false;
}
messageConsumer.consume(tailer.lastReadIndex(), bytes.toString());
bytes.clear();
return true;
| 354 | 52 | 406 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/internal/reader/queueentryreaders/VanillaQueueEntryReader.java | VanillaQueueEntryReader | read | class VanillaQueueEntryReader implements QueueEntryReader {
private final ExcerptTailer tailer;
private final Function<ExcerptTailer, DocumentContext> pollMethod;
private final QueueEntryHandler messageConverter;
private final MessageConsumer messageConsumer;
public VanillaQueueEntryReader(@NotNull ExcerptTailer tailer, @NotNull Function<ExcerptTailer, DocumentContext> pollMethod,
@NotNull QueueEntryHandler messageConverter, @NotNull MessageConsumer messageConsumer) {
this.tailer = tailer;
this.pollMethod = pollMethod;
this.messageConverter = messageConverter;
this.messageConsumer = messageConsumer;
}
@Override
public boolean read() {<FILL_FUNCTION_BODY>}
} |
try (DocumentContext dc = pollMethod.apply(tailer)) {
if (!dc.isPresent()) {
return false;
}
messageConverter.accept(dc.wire(), val -> messageConsumer.consume(dc.index(), val));
return true;
}
| 192 | 74 | 266 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/internal/util/InternalFileUtil.java | ProcFdWalker | visitFile | class ProcFdWalker extends SimpleFileVisitor<Path> {
private final Set<String> openFiles = new HashSet<>();
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) {<FILL_FUNCTION_BODY>}
@Override
public FileVisitResult visitFileFailed(Path file, IOException exc) {
// we definitely won't be able to access all the files, and it's common for a file to go missing mid-traversal
// so don't log when one of those things happens
if (!((exc instanceof AccessDeniedException) || (exc instanceof NoSuchFileException))) {
Jvm.warn().on(ProcFdWalker.class, "Error visiting file", exc);
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
if (!dir.toAbsolutePath().toString().matches("/proc(/\\d+(/fd)?)?")) {
return FileVisitResult.SKIP_SUBTREE;
}
return FileVisitResult.CONTINUE;
}
} |
if (file.toAbsolutePath().toString().matches("/proc/\\d+/fd/\\d+")) {
try {
final String e = file.toRealPath().toAbsolutePath().toString();
openFiles.add(e);
} catch (NoSuchFileException | AccessDeniedException e) {
// Ignore, sometimes they disappear & we can't access all the files
} catch (IOException e) {
Jvm.warn().on(ProcFdWalker.class, "Error resolving " + file, e);
}
}
return FileVisitResult.CONTINUE;
| 289 | 153 | 442 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/internal/writer/ChronicleWriter.java | ChronicleWriter | asMethodWriter | class ChronicleWriter {
private Path basePath;
private String methodName;
private List<String> files;
private Class<?> writeTo;
public void execute() throws IOException {
try (final ChronicleQueue queue = ChronicleQueue.singleBuilder(this.basePath).build();
final ExcerptAppender appender = queue.createAppender()) {
for (final String file : files) {
final Object payload = Marshallable.fromFile(Object.class, file);
try (final DocumentContext dc = appender.writingDocument()) {
if (writeTo != null)
dc.wire().write(methodName).marshallable((WriteMarshallable) payload);
else
dc.wire().write(methodName).object(payload);
}
}
}
}
/**
* Chronicle queue base path
* @param path path of queue to write to
* @return this
*/
public ChronicleWriter withBasePath(final Path path) {
this.basePath = path;
return this;
}
/**
* Interface class to use to write via
* @param interfaceName interface
* @return this
*/
public ChronicleWriter asMethodWriter(String interfaceName) {<FILL_FUNCTION_BODY>}
/**
* Specify method name to write each message out as
* @param methodName method name
* @return this
*/
public ChronicleWriter withMethodName(String methodName) {
this.methodName = methodName;
return this;
}
/**
* List of files to read and, for each, write out a message preceded by {@link #methodName}
* @param files files
* @return this
*/
public ChronicleWriter withFiles(List<String> files) {
this.files = files;
return this;
}
} |
try {
this.writeTo = Class.forName(interfaceName);
} catch (ClassNotFoundException e) {
throw Jvm.rethrow(e);
}
return this;
| 474 | 52 | 526 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/internal/writer/ChronicleWriterMain.java | ChronicleWriterMain | printHelpAndExit | class ChronicleWriterMain {
public void run(@NotNull String[] args) throws Exception {
final Options options = options();
final CommandLine commandLine = parseCommandLine(args, options);
final ChronicleWriter writer = new ChronicleWriter();
configure(writer, commandLine);
writer.execute();
}
private CommandLine parseCommandLine(final @NotNull String[] args, final Options options) {
final CommandLineParser parser = new DefaultParser();
CommandLine commandLine = null;
try {
commandLine = parser.parse(options, args);
if (commandLine.hasOption('h')) {
printHelpAndExit(options, 0, null);
}
if (commandLine.getArgList().isEmpty()) {
printHelpAndExit(options, 1, "Need files...");
}
} catch (ParseException e) {
printHelpAndExit(options, 1, e.getMessage());
}
return commandLine;
}
private void printHelpAndExit(final Options options, int status, String message) {<FILL_FUNCTION_BODY>}
private void configure(final ChronicleWriter writer, final CommandLine commandLine) {
writer.withBasePath(Paths.get(commandLine.getOptionValue('d')));
writer.withMethodName(commandLine.getOptionValue('m'));
if (commandLine.hasOption('i')) {
final String r = commandLine.getOptionValue('i');
writer.asMethodWriter(r.equals("null") ? null : r);
}
writer.withFiles(commandLine.getArgList());
}
@NotNull
private Options options() {
final Options options = new Options();
addOption(options, "m", "method", true, "Method name", true);
addOption(options, "d", "directory", true, "Directory containing chronicle queue to write to", true);
addOption(options, "i", "interface", true, "Interface to write via", false);
return options;
}
} |
final PrintWriter writer = new PrintWriter(System.out);
new HelpFormatter().printHelp(
writer,
180,
this.getClass().getSimpleName() + " files..",
message,
options,
HelpFormatter.DEFAULT_LEFT_PAD,
HelpFormatter.DEFAULT_DESC_PAD,
null,
true
);
writer.flush();
System.exit(status);
| 510 | 114 | 624 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/rollcycles/RollCycleArithmetic.java | RollCycleArithmetic | toIndex | class RollCycleArithmetic {
/**
* Sunday 1970 Jan 4th 00:00:00 UTC
*/
public static final int SUNDAY_00_00 = 259_200_000;
private final int cycleShift;
private final int indexCount;
private final int indexSpacing;
private final long sequenceMask;
public static RollCycleArithmetic of(int indexCount, int indexSpacing) {
return new RollCycleArithmetic(indexCount, indexSpacing);
}
private RollCycleArithmetic(int indexCount, int indexSpacing) {
this.indexCount = Maths.nextPower2(indexCount, 8);
assert this.indexCount <= MAX_INDEX_COUNT : "indexCount: " + indexCount;
this.indexSpacing = Maths.nextPower2(indexSpacing, 1);
cycleShift = Math.max(32, Maths.intLog2(indexCount) * 2 + Maths.intLog2(indexSpacing));
assert cycleShift < Long.SIZE : "cycleShift: " + cycleShift;
sequenceMask = (1L << cycleShift) - 1;
}
public long maxMessagesPerCycle() {
return Math.min(sequenceMask, ((long) indexCount * indexCount * indexSpacing));
}
public long toIndex(int cycle, long sequenceNumber) {<FILL_FUNCTION_BODY>}
public long toSequenceNumber(long index) {
return index & sequenceMask;
}
public int toCycle(long index) {
return Maths.toUInt31(index >> cycleShift);
}
public int indexSpacing() {
return indexSpacing;
}
public int indexCount() {
return indexCount;
}
} |
return ((long) cycle << cycleShift) + (sequenceNumber & sequenceMask);
| 463 | 22 | 485 | <no_super_class> |
OpenHFT_Chronicle-Queue | Chronicle-Queue/src/main/java/net/openhft/chronicle/queue/util/ToolsUtil.java | ToolsUtil | warnIfResourceTracing | class ToolsUtil {
private ToolsUtil() {
}
/**
* When running tools e.g. ChronicleReader, from the CQ source dir, resource tracing may be turned on
*/
public static void warnIfResourceTracing() {<FILL_FUNCTION_BODY>}
} |
// System.err (*not* logger as slf4j may not be set up e.g. when running queue_reader.sh)
if (Jvm.isResourceTracing())
System.err.println("Resource tracing is turned on - this will eventually die with OOME");
| 77 | 70 | 147 | <no_super_class> |
wkeyuan_DWSurvey | DWSurvey/src/main/java/com/baidu/ueditor/ActionEnter.java | ActionEnter | getStartIndex | class ActionEnter {
private HttpServletRequest request = null;
private String rootPath = null;
private String contextPath = null;
private String actionType = null;
private ConfigManager configManager = null;
public ActionEnter ( HttpServletRequest request, String rootPath, String userId) {
this.request = request;
this.rootPath = rootPath;
this.actionType = request.getParameter( "action" );
this.contextPath = request.getContextPath();
this.configManager = ConfigManager.getInstance( this.rootPath, this.contextPath, request.getRequestURI(), userId );
}
public String exec () {
String callbackName = this.request.getParameter("callback");
if ( callbackName != null ) {
if ( !validCallbackName( callbackName ) ) {
return new BaseState( false, AppInfo.ILLEGAL ).toJSONString();
}
String ww = callbackName+"("+this.invoke()+");";
return ww;
} else {
return this.invoke();
}
}
public String invoke() {
if ( actionType == null || !ActionMap.mapping.containsKey( actionType ) ) {
return new BaseState( false, AppInfo.INVALID_ACTION ).toJSONString();
}
if ( this.configManager == null || !this.configManager.valid() ) {
return new BaseState( false, AppInfo.CONFIG_ERROR ).toJSONString();
}
State state = null;
int actionCode = ActionMap.getType( this.actionType );
Map<String, Object> conf = null;
switch ( actionCode ) {
case ActionMap.CONFIG:
String config = this.configManager.getAllConfig().toString();
return config;
case ActionMap.UPLOAD_IMAGE:
case ActionMap.UPLOAD_SCRAWL:
case ActionMap.UPLOAD_VIDEO:
case ActionMap.UPLOAD_FILE:
conf = this.configManager.getConfig( actionCode );
state = new Uploader( request, conf ).doExec();
break;
case ActionMap.CATCH_IMAGE:
conf = configManager.getConfig( actionCode );
String[] list = this.request.getParameterValues( (String)conf.get( "fieldName" ) );
state = new ImageHunter( conf ).capture( list );
break;
case ActionMap.LIST_IMAGE:
case ActionMap.LIST_FILE:
conf = configManager.getConfig( actionCode );
int start = this.getStartIndex();
state = new FileManager( conf ).listFile( start );
break;
}
System.out.println("upload state:"+state.toJSONString());
return state.toJSONString();
}
public int getStartIndex () {<FILL_FUNCTION_BODY>}
/**
* callback参数验证
*/
public boolean validCallbackName ( String name ) {
if ( name.matches( "^[a-zA-Z_]+[\\w0-9_]*$" ) ) {
return true;
}
return false;
}
} |
String start = this.request.getParameter( "start" );
try {
return Integer.parseInt( start );
} catch ( Exception e ) {
return 0;
}
| 816 | 55 | 871 | <no_super_class> |
wkeyuan_DWSurvey | DWSurvey/src/main/java/com/baidu/ueditor/Encoder.java | Encoder | toUnicode | class Encoder {
public static String toUnicode ( String input ) {<FILL_FUNCTION_BODY>}
} |
StringBuilder builder = new StringBuilder();
char[] chars = input.toCharArray();
for ( char ch : chars ) {
if ( ch < 256 ) {
builder.append( ch );
} else {
builder.append( "\\u" + Integer.toHexString( ch& 0xffff ) );
}
}
return builder.toString();
| 34 | 115 | 149 | <no_super_class> |
wkeyuan_DWSurvey | DWSurvey/src/main/java/com/baidu/ueditor/PathFormat.java | PathFormat | parse | class PathFormat {
private static final String TIME = "time";
private static final String FULL_YEAR = "yyyy";
private static final String YEAR = "yy";
private static final String MONTH = "mm";
private static final String DAY = "dd";
private static final String HOUR = "hh";
private static final String MINUTE = "ii";
private static final String SECOND = "ss";
private static final String RAND = "rand";
private static Date currentDate = null;
public static String parse ( String input ) {<FILL_FUNCTION_BODY>}
/**
* 格式化路径, 把windows路径替换成标准路径
* @param input 待格式化的路径
* @return 格式化后的路径
*/
public static String format ( String input ) {
return input.replace( "\\", "/" );
}
public static String parse ( String input, String filename ) {
Pattern pattern = Pattern.compile( "\\{([^\\}]+)\\}", Pattern.CASE_INSENSITIVE );
Matcher matcher = pattern.matcher(input);
String matchStr = null;
PathFormat.currentDate = new Date();
StringBuffer sb = new StringBuffer();
while ( matcher.find() ) {
matchStr = matcher.group( 1 );
if ( matchStr.indexOf( "filename" ) != -1 ) {
filename = filename.replace( "$", "\\$" ).replaceAll( "[\\/:*?\"<>|]", "" );
matcher.appendReplacement(sb, filename );
} else {
matcher.appendReplacement(sb, PathFormat.getString( matchStr ) );
}
}
matcher.appendTail(sb);
return sb.toString();
}
private static String getString ( String pattern ) {
pattern = pattern.toLowerCase();
// time 处理
if ( pattern.indexOf( PathFormat.TIME ) != -1 ) {
return PathFormat.getTimestamp();
} else if ( pattern.indexOf( PathFormat.FULL_YEAR ) != -1 ) {
return PathFormat.getFullYear();
} else if ( pattern.indexOf( PathFormat.YEAR ) != -1 ) {
return PathFormat.getYear();
} else if ( pattern.indexOf( PathFormat.MONTH ) != -1 ) {
return PathFormat.getMonth();
} else if ( pattern.indexOf( PathFormat.DAY ) != -1 ) {
return PathFormat.getDay();
} else if ( pattern.indexOf( PathFormat.HOUR ) != -1 ) {
return PathFormat.getHour();
} else if ( pattern.indexOf( PathFormat.MINUTE ) != -1 ) {
return PathFormat.getMinute();
} else if ( pattern.indexOf( PathFormat.SECOND ) != -1 ) {
return PathFormat.getSecond();
} else if ( pattern.indexOf( PathFormat.RAND ) != -1 ) {
return PathFormat.getRandom( pattern );
}
return pattern;
}
private static String getTimestamp () {
return System.currentTimeMillis() + "";
}
private static String getFullYear () {
return new SimpleDateFormat( "yyyy" ).format( PathFormat.currentDate );
}
private static String getYear () {
return new SimpleDateFormat( "yy" ).format( PathFormat.currentDate );
}
private static String getMonth () {
return new SimpleDateFormat( "MM" ).format( PathFormat.currentDate );
}
private static String getDay () {
return new SimpleDateFormat( "dd" ).format( PathFormat.currentDate );
}
private static String getHour () {
return new SimpleDateFormat( "HH" ).format( PathFormat.currentDate );
}
private static String getMinute () {
return new SimpleDateFormat( "mm" ).format( PathFormat.currentDate );
}
private static String getSecond () {
return new SimpleDateFormat( "ss" ).format( PathFormat.currentDate );
}
private static String getRandom ( String pattern ) {
int length = 0;
pattern = pattern.split( ":" )[ 1 ].trim();
length = Integer.parseInt( pattern );
return ( Math.random() + "" ).replace( ".", "" ).substring( 0, length );
}
public static void main(String[] args) {
// TODO Auto-generated method stub
}
} |
Pattern pattern = Pattern.compile( "\\{([^\\}]+)\\}", Pattern.CASE_INSENSITIVE );
Matcher matcher = pattern.matcher(input);
PathFormat.currentDate = new Date();
StringBuffer sb = new StringBuffer();
while ( matcher.find() ) {
matcher.appendReplacement(sb, PathFormat.getString( matcher.group( 1 ) ) );
}
matcher.appendTail(sb);
return sb.toString();
| 1,184 | 151 | 1,335 | <no_super_class> |
wkeyuan_DWSurvey | DWSurvey/src/main/java/com/baidu/ueditor/define/BaseState.java | BaseState | toString | class BaseState implements State {
private boolean state = false;
private String info = null;
private Map<String, String> infoMap = new HashMap<String, String>();
public BaseState () {
this.state = true;
}
public BaseState ( boolean state ) {
this.setState( state );
}
public BaseState ( boolean state, String info ) {
this.setState( state );
this.info = info;
}
public BaseState ( boolean state, int infoCode ) {
this.setState( state );
this.info = AppInfo.getStateInfo( infoCode );
}
public BaseState ( boolean state, int infoCode , File tempFile) {
this.setState( state );
this.info = AppInfo.getStateInfo( infoCode );
this.tmpFile = tempFile;
}
public boolean isSuccess () {
return this.state;
}
public void setState ( boolean state ) {
this.state = state;
}
public void setInfo ( String info ) {
this.info = info;
}
public void setInfo ( int infoCode ) {
this.info = AppInfo.getStateInfo( infoCode );
}
@Override
public String toJSONString() {
return this.toString();
}
public String toString () {<FILL_FUNCTION_BODY>}
@Override
public void putInfo(String name, String val) {
this.infoMap.put(name, val);
}
@Override
public void putInfo(String name, long val) {
this.putInfo(name, val+"");
}
private File tmpFile;
public File getTmpFile() {
return tmpFile;
}
public void setTmpFile(File tmpFile) {
this.tmpFile = tmpFile;
}
} |
String key = null;
String stateVal = this.isSuccess() ? AppInfo.getStateInfo( AppInfo.SUCCESS ) : this.info;
StringBuilder builder = new StringBuilder();
builder.append( "{\"state\": \"" + stateVal + "\"" );
Iterator<String> iterator = this.infoMap.keySet().iterator();
while ( iterator.hasNext() ) {
key = iterator.next();
builder.append( ",\"" + key + "\": \"" + this.infoMap.get(key) + "\"" );
}
builder.append( "}" );
return Encoder.toUnicode( builder.toString() );
| 457 | 186 | 643 | <no_super_class> |
wkeyuan_DWSurvey | DWSurvey/src/main/java/com/baidu/ueditor/define/MultiState.java | MultiState | toJSONString | class MultiState implements State {
private boolean state = false;
private String info = null;
private Map<String, Long> intMap = new HashMap<String, Long>();
private Map<String, String> infoMap = new HashMap<String, String>();
private List<String> stateList = new ArrayList<String>();
public MultiState ( boolean state ) {
this.state = state;
}
public MultiState ( boolean state, String info ) {
this.state = state;
this.info = info;
}
public MultiState ( boolean state, int infoKey ) {
this.state = state;
this.info = AppInfo.getStateInfo( infoKey );
}
@Override
public boolean isSuccess() {
return this.state;
}
public void addState ( State state ) {
stateList.add( state.toJSONString() );
}
/**
* 该方法调用无效果
*/
@Override
public void putInfo(String name, String val) {
this.infoMap.put(name, val);
}
@Override
public String toJSONString() {<FILL_FUNCTION_BODY>}
@Override
public void putInfo(String name, long val) {
this.intMap.put( name, val );
}
} |
String stateVal = this.isSuccess() ? AppInfo.getStateInfo( AppInfo.SUCCESS ) : this.info;
StringBuilder builder = new StringBuilder();
builder.append( "{\"state\": \"" + stateVal + "\"" );
// 数字转换
Iterator<String> iterator = this.intMap.keySet().iterator();
while ( iterator.hasNext() ) {
stateVal = iterator.next();
builder.append( ",\""+ stateVal +"\": " + this.intMap.get( stateVal ) );
}
iterator = this.infoMap.keySet().iterator();
while ( iterator.hasNext() ) {
stateVal = iterator.next();
builder.append( ",\""+ stateVal +"\": \"" + this.infoMap.get( stateVal ) + "\"" );
}
builder.append( ", list: [" );
iterator = this.stateList.iterator();
while ( iterator.hasNext() ) {
builder.append( iterator.next() + "," );
}
if ( this.stateList.size() > 0 ) {
builder.deleteCharAt( builder.length() - 1 );
}
builder.append( " ]}" );
return Encoder.toUnicode( builder.toString() );
| 327 | 387 | 714 | <no_super_class> |
wkeyuan_DWSurvey | DWSurvey/src/main/java/com/baidu/ueditor/hunter/FileManager.java | FileManager | getAllowFiles | class FileManager {
private String dir = null;
private String rootPath = null;
private String[] allowFiles = null;
private int count = 0;
public FileManager ( Map<String, Object> conf ) {
this.rootPath = (String)conf.get( "rootPath" );
this.dir = this.rootPath + (String)conf.get( "dir" );
this.allowFiles = this.getAllowFiles( conf.get("allowFiles") );
this.count = (Integer)conf.get( "count" );
}
public State listFile ( int index ) {
File dir = new File( this.dir );
State state = null;
if ( !dir.exists() ) {
return new BaseState( false, AppInfo.NOT_EXIST );
}
if ( !dir.isDirectory() ) {
return new BaseState( false, AppInfo.NOT_DIRECTORY );
}
Collection<File> list = FileUtils.listFiles( dir, this.allowFiles, true );
if ( index < 0 || index > list.size() ) {
state = new MultiState( true );
} else {
Object[] fileList = Arrays.copyOfRange( list.toArray(), index, index + this.count );
state = this.getState( fileList );
}
state.putInfo( "start", index );
state.putInfo( "total", list.size() );
return state;
}
private State getState ( Object[] files ) {
MultiState state = new MultiState( true );
BaseState fileState = null;
File file = null;
for ( Object obj : files ) {
if ( obj == null ) {
break;
}
file = (File)obj;
fileState = new BaseState( true );
fileState.putInfo( "url", PathFormat.format( this.getPath( file ) ) );
state.addState( fileState );
}
return state;
}
private String getPath ( File file ) {
String path = file.getAbsolutePath();
return path.replace( this.rootPath, "/" );
}
private String[] getAllowFiles ( Object fileExt ) {<FILL_FUNCTION_BODY>}
} |
String[] exts = null;
String ext = null;
if ( fileExt == null ) {
return new String[ 0 ];
}
exts = (String[])fileExt;
for ( int i = 0, len = exts.length; i < len; i++ ) {
ext = exts[ i ];
exts[ i ] = ext.replace( ".", "" );
}
return exts;
| 612 | 130 | 742 | <no_super_class> |
wkeyuan_DWSurvey | DWSurvey/src/main/java/com/baidu/ueditor/hunter/ImageHunter.java | ImageHunter | captureRemoteData | class ImageHunter {
private String filename = null;
private String savePath = null;
private String rootPath = null;
private List<String> allowTypes = null;
private long maxSize = -1;
private List<String> filters = null;
public ImageHunter ( Map<String, Object> conf ) {
this.filename = (String)conf.get( "filename" );
this.savePath = (String)conf.get( "savePath" );
this.rootPath = (String)conf.get( "rootPath" );
this.maxSize = (Long)conf.get( "maxSize" );
this.allowTypes = Arrays.asList( (String[])conf.get( "allowFiles" ) );
this.filters = Arrays.asList( (String[])conf.get( "filter" ) );
}
public State capture ( String[] list ) {
MultiState state = new MultiState( true );
for ( String source : list ) {
state.addState( captureRemoteData( source ) );
}
return state;
}
public State captureRemoteData ( String urlStr ) {<FILL_FUNCTION_BODY>}
private String getPath ( String savePath, String filename, String suffix ) {
return PathFormat.parse( savePath + suffix, filename );
}
private boolean validHost ( String hostname ) {
try {
InetAddress ip = InetAddress.getByName(hostname);
if (ip.isSiteLocalAddress()) {
return false;
}
} catch (UnknownHostException e) {
return false;
}
return !filters.contains( hostname );
}
private boolean validContentState ( int code ) {
return HttpURLConnection.HTTP_OK == code;
}
private boolean validFileType ( String type ) {
return this.allowTypes.contains( type );
}
private boolean validFileSize ( int size ) {
return size < this.maxSize;
}
} |
HttpURLConnection connection = null;
URL url = null;
String suffix = null;
try {
url = new URL( urlStr );
if ( !validHost( url.getHost() ) ) {
return new BaseState( false, AppInfo.PREVENT_HOST );
}
connection = (HttpURLConnection) url.openConnection();
connection.setInstanceFollowRedirects( true );
connection.setUseCaches( true );
if ( !validContentState( connection.getResponseCode() ) ) {
return new BaseState( false, AppInfo.CONNECTION_ERROR );
}
suffix = MIMEType.getSuffix( connection.getContentType() );
if ( !validFileType( suffix ) ) {
return new BaseState( false, AppInfo.NOT_ALLOW_FILE_TYPE );
}
if ( !validFileSize( connection.getContentLength() ) ) {
return new BaseState( false, AppInfo.MAX_SIZE );
}
String savePath = this.getPath( this.savePath, this.filename, suffix );
String physicalPath = this.rootPath + savePath;
State state = StorageManager.saveFileByInputStream( connection.getInputStream(), physicalPath );
if ( state.isSuccess() ) {
state.putInfo( "url", PathFormat.format( savePath ) );
state.putInfo( "source", urlStr );
}
return state;
} catch ( Exception e ) {
return new BaseState( false, AppInfo.REMOTE_FAIL );
}
| 534 | 428 | 962 | <no_super_class> |
wkeyuan_DWSurvey | DWSurvey/src/main/java/com/baidu/ueditor/upload/Base64Uploader.java | Base64Uploader | save | class Base64Uploader {
public static State save(String content, Map<String, Object> conf) {<FILL_FUNCTION_BODY>}
private static byte[] decode(String content) {
return Base64.decodeBase64(content);
}
private static boolean validSize(byte[] data, long length) {
return data.length <= length;
}
} |
byte[] data = decode(content);
long maxSize = ((Long) conf.get("maxSize")).longValue();
if (!validSize(data, maxSize)) {
return new BaseState(false, AppInfo.MAX_SIZE);
}
String suffix = FileType.getSuffix("JPG");
String savePath = PathFormat.parse((String) conf.get("savePath"),
(String) conf.get("filename"));
savePath = savePath + suffix;
String physicalPath = (String) conf.get("rootPath") + savePath;
State storageState = StorageManager.saveBinaryFile(data, physicalPath);
if (storageState.isSuccess()) {
storageState.putInfo("url", PathFormat.format(savePath));
storageState.putInfo("type", suffix);
storageState.putInfo("original", "");
}
return storageState;
| 99 | 241 | 340 | <no_super_class> |
wkeyuan_DWSurvey | DWSurvey/src/main/java/com/baidu/ueditor/upload/BinaryUploader.java | BinaryUploader | save | class BinaryUploader {
public static final State save(HttpServletRequest request,
Map<String, Object> conf) {<FILL_FUNCTION_BODY>}
private static boolean validType(String type, String[] allowTypes) {
List<String> list = Arrays.asList(allowTypes);
return list.contains(type);
}
} |
FileItemStream fileStream = null;
boolean isAjaxUpload = request.getHeader( "X_Requested_With" ) != null;
if (!ServletFileUpload.isMultipartContent(request)) {
return new BaseState(false, AppInfo.NOT_MULTIPART_CONTENT);
}
ServletFileUpload upload = new ServletFileUpload(
new DiskFileItemFactory());
if ( isAjaxUpload ) {
upload.setHeaderEncoding( "UTF-8" );
}
try {
FileItemIterator iterator = upload.getItemIterator(request);
while (iterator.hasNext()) {
fileStream = iterator.next();
if (!fileStream.isFormField())
break;
fileStream = null;
}
if (fileStream == null) {
return new BaseState(false, AppInfo.NOTFOUND_UPLOAD_DATA);
}
String savePath = (String) conf.get("savePath");
String originFileName = fileStream.getName();
String suffix = FileType.getSuffixByFilename(originFileName);
originFileName = originFileName.substring(0,
originFileName.length() - suffix.length());
savePath = savePath + suffix;
long maxSize = ((Long) conf.get("maxSize")).longValue();
if (!validType(suffix, (String[]) conf.get("allowFiles"))) {
return new BaseState(false, AppInfo.NOT_ALLOW_FILE_TYPE);
}
savePath = PathFormat.parse(savePath, originFileName);
String physicalPath = (String) conf.get("rootPath") + savePath;
InputStream is = fileStream.openStream();
State storageState = StorageManager.saveFileByInputStream(is,
physicalPath, maxSize);
is.close();
if (storageState.isSuccess()) {
storageState.putInfo("url", PathFormat.format(savePath));
storageState.putInfo("type", suffix);
storageState.putInfo("original", originFileName + suffix);
}
return storageState;
} catch (FileUploadException e) {
return new BaseState(false, AppInfo.PARSE_REQUEST_ERROR);
} catch (IOException e) {
}
return new BaseState(false, AppInfo.IO_ERROR);
| 93 | 612 | 705 | <no_super_class> |
wkeyuan_DWSurvey | DWSurvey/src/main/java/com/baidu/ueditor/upload/StorageManager.java | StorageManager | saveTmpFile | class StorageManager {
public static final int BUFFER_SIZE = 8192;
public StorageManager() {
}
public static State saveBinaryFile(byte[] data, String path) {
if(!FileMagicUtils.isUserUpFileType(data,path.substring(path.lastIndexOf(".")))){
return new BaseState(false, AppInfo.NOT_ALLOW_FILE_TYPE);
}
File file = new File(path);
State state = valid(file);
if (!state.isSuccess()) {
return state;
}
try {
BufferedOutputStream bos = new BufferedOutputStream(
new FileOutputStream(file));
bos.write(data);
bos.flush();
bos.close();
} catch (IOException ioe) {
ioe.printStackTrace();
return new BaseState(false, AppInfo.IO_ERROR);
}
state = new BaseState(true, file.getAbsolutePath());
state.putInfo( "size", data.length );
state.putInfo( "title", file.getName() );
return state;
}
public static State saveFileByInputStream(InputStream is, String path,
long maxSize) {
BaseState validateState = isUserUpFileType(is,path.substring(path.lastIndexOf(".")));
if(!validateState.isSuccess()) return validateState;
State state = new BaseState(false, AppInfo.IO_ERROR);
File tmpFile = validateState.getTmpFile();
if(tmpFile!=null){
state = saveTmpFile(tmpFile, path);
deleteTmpFile(tmpFile);
return state;
}
return state;
}
public static State saveFileByInputStream(InputStream is, String path) {
BaseState validateState = isUserUpFileType(is,path.substring(path.lastIndexOf(".")));
if(!validateState.isSuccess()) return validateState;
State state = new BaseState(false, AppInfo.IO_ERROR);
File tmpFile = validateState.getTmpFile();
if(tmpFile!=null){
state = saveTmpFile(tmpFile, path);
deleteTmpFile(tmpFile);
return state;
}
return state;
}
private static File getTmpFile() {
File tmpDir = FileUtils.getTempDirectory();
String tmpFileName = (Math.random() * 10000 + "").replace(".", "");
return new File(tmpDir, tmpFileName);
}
private static State saveTmpFile(File tmpFile, String path) {<FILL_FUNCTION_BODY>}
private static State valid(File file) {
File parentPath = file.getParentFile();
if ((!parentPath.exists()) && (!parentPath.mkdirs())) {
return new BaseState(false, AppInfo.FAILED_CREATE_FILE);
}
if (!parentPath.canWrite()) {
return new BaseState(false, AppInfo.PERMISSION_DENIED);
}
return new BaseState(true);
}
public static BaseState isUserUpFileType(InputStream is,String fileSuffix) {
File tmpFile = getTmpFile();
byte[] dataBuf = new byte[ 2048 ];
BufferedInputStream bis = new BufferedInputStream(is, StorageManager.BUFFER_SIZE);
try {
BufferedOutputStream bos = new BufferedOutputStream(
new FileOutputStream(tmpFile), StorageManager.BUFFER_SIZE);
int count = 0;
while ((count = bis.read(dataBuf)) != -1) {
bos.write(dataBuf, 0, count);
}
bis.close();
bos.flush();
bos.close();
if(!FileMagicUtils.isUserUpFileType(tmpFile,fileSuffix)){
tmpFile.delete();
return new BaseState(false, AppInfo.NOT_ALLOW_FILE_TYPE);
}
// tmpFile.delete();
} catch (IOException e) {
e.printStackTrace();
return new BaseState(false, AppInfo.IO_ERROR);
}
return new BaseState(true, AppInfo.SUCCESS, tmpFile);
}
private static void deleteTmpFile(File tmpFile) {
try{
tmpFile.delete();
}catch (Exception e){
e.printStackTrace();
}
}
} |
State state = null;
File targetFile = new File(path);
if (targetFile.canWrite()) {
return new BaseState(false, AppInfo.PERMISSION_DENIED);
}
try {
FileUtils.moveFile(tmpFile, targetFile);
} catch (IOException e) {
e.printStackTrace();
return new BaseState(false, AppInfo.IO_ERROR);
}
state = new BaseState(true);
state.putInfo( "size", targetFile.length() );
state.putInfo( "title", targetFile.getName() );
return state;
| 1,155 | 168 | 1,323 | <no_super_class> |
wkeyuan_DWSurvey | DWSurvey/src/main/java/com/baidu/ueditor/upload/Uploader.java | Uploader | doExec | class Uploader {
private HttpServletRequest request = null;
private Map<String, Object> conf = null;
public Uploader(HttpServletRequest request, Map<String, Object> conf) {
this.request = request;
this.conf = conf;
}
public final State doExec() {<FILL_FUNCTION_BODY>}
} |
String filedName = (String) this.conf.get("fieldName");
State state = null;
if ("true".equals(this.conf.get("isBase64"))) {
state = Base64Uploader.save(this.request.getParameter(filedName),
this.conf);
} else {
state = BinaryUploader.save(this.request, this.conf);
}
return state;
| 89 | 113 | 202 | <no_super_class> |