proj_name
stringclasses 110
values | relative_path
stringlengths 40
228
| class_name
stringlengths 1
68
⌀ | func_name
stringlengths 1
98
⌀ | masked_class
stringlengths 58
2.52M
⌀ | func_body
stringlengths 0
166k
⌀ |
---|---|---|---|---|---|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/generator/UniformLongGenerator.java | UniformLongGenerator | nextValue | class UniformLongGenerator extends NumberGenerator {
private final long lb, ub, interval;
/**
* Creates a generator that will return longs uniformly randomly from the
* interval [lb,ub] inclusive (that is, lb and ub are possible values)
* (lb and ub are possible values).
*
* @param lb the lower bound (inclusive) of generated values
* @param ub the upper bound (inclusive) of generated values
*/
public UniformLongGenerator(long lb, long ub) {
this.lb = lb;
this.ub = ub;
interval = this.ub - this.lb + 1;
}
@Override
public Long nextValue() {<FILL_FUNCTION_BODY>}
@Override
public double mean() {
return ((lb + (long) ub)) / 2.0;
}
} |
long ret = Math.abs(ThreadLocalRandom.current().nextLong()) % interval + lb;
setLastValue(ret);
return ret;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/generator/UniformLongGenerator.java | UniformLongGenerator | mean | class UniformLongGenerator extends NumberGenerator {
private final long lb, ub, interval;
/**
* Creates a generator that will return longs uniformly randomly from the
* interval [lb,ub] inclusive (that is, lb and ub are possible values)
* (lb and ub are possible values).
*
* @param lb the lower bound (inclusive) of generated values
* @param ub the upper bound (inclusive) of generated values
*/
public UniformLongGenerator(long lb, long ub) {
this.lb = lb;
this.ub = ub;
interval = this.ub - this.lb + 1;
}
@Override
public Long nextValue() {
long ret = Math.abs(ThreadLocalRandom.current().nextLong()) % interval + lb;
setLastValue(ret);
return ret;
}
@Override
public double mean() {<FILL_FUNCTION_BODY>}
} |
return ((lb + (long) ub)) / 2.0;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/generator/UnixEpochTimestampGenerator.java | UnixEpochTimestampGenerator | initalizeTimestamp | class UnixEpochTimestampGenerator extends Generator<Long> {
/** The base timestamp used as a starting reference. */
protected long startTimestamp;
/** The current timestamp that will be incremented. */
protected long currentTimestamp;
/** The last used timestamp. Should always be one interval behind current. */
protected long lastTimestamp;
/** The interval to increment by. Multiplied by {@link #timeUnits}. */
protected long interval;
/** The units of time the interval represents. */
protected TimeUnit timeUnits;
/**
* Default ctor with the current system time and a 60 second interval.
*/
public UnixEpochTimestampGenerator() {
this(60, TimeUnit.SECONDS);
}
/**
* Ctor that uses the current system time as current.
* @param interval The interval for incrementing the timestamp.
* @param timeUnits The units of time the increment represents.
*/
public UnixEpochTimestampGenerator(final long interval, final TimeUnit timeUnits) {
this.interval = interval;
this.timeUnits = timeUnits;
// move the first timestamp by 1 interval so that the first call to nextValue
// returns this timestamp
initalizeTimestamp(-1);
currentTimestamp -= getOffset(1);
lastTimestamp = currentTimestamp;
}
/**
* Ctor for supplying a starting timestamp.
* @param interval The interval for incrementing the timestamp.
* @param timeUnits The units of time the increment represents.
* @param startTimestamp The start timestamp to use.
* NOTE that this must match the time units used for the interval.
* If the units are in nanoseconds, provide a nanosecond timestamp {@code System.nanoTime()}
* or in microseconds, {@code System.nanoTime() / 1000}
* or in millis, {@code System.currentTimeMillis()}
* or seconds and any interval above, {@code System.currentTimeMillis() / 1000}
*/
public UnixEpochTimestampGenerator(final long interval, final TimeUnit timeUnits,
final long startTimestamp) {
this.interval = interval;
this.timeUnits = timeUnits;
// move the first timestamp by 1 interval so that the first call to nextValue
// returns this timestamp
currentTimestamp = startTimestamp - getOffset(1);
this.startTimestamp = currentTimestamp;
lastTimestamp = currentTimestamp - getOffset(1);
}
/**
* Sets the starting timestamp to the current system time plus the interval offset.
* E.g. to set the time an hour in the past, supply a value of {@code -60}.
* @param intervalOffset The interval to increment or decrement by.
*/
public void initalizeTimestamp(final long intervalOffset) {<FILL_FUNCTION_BODY>}
@Override
public Long nextValue() {
lastTimestamp = currentTimestamp;
currentTimestamp += getOffset(1);
return currentTimestamp;
}
/**
* Returns the proper increment offset to use given the interval and timeunits.
* @param intervalOffset The amount of offset to multiply by.
* @return An offset value to adjust the timestamp by.
*/
public long getOffset(final long intervalOffset) {
switch (timeUnits) {
case NANOSECONDS:
case MICROSECONDS:
case MILLISECONDS:
case SECONDS:
return intervalOffset * interval;
case MINUTES:
return intervalOffset * interval * (long) 60;
case HOURS:
return intervalOffset * interval * (long) (60 * 60);
case DAYS:
return intervalOffset * interval * (long) (60 * 60 * 24);
default:
throw new IllegalArgumentException("Unhandled time unit type: " + timeUnits);
}
}
@Override
public Long lastValue() {
return lastTimestamp;
}
/** @return The current timestamp as set by the last call to {@link #nextValue()} */
public long currentValue() {
return currentTimestamp;
}
} |
switch (timeUnits) {
case NANOSECONDS:
currentTimestamp = System.nanoTime() + getOffset(intervalOffset);
break;
case MICROSECONDS:
currentTimestamp = (System.nanoTime() / 1000) + getOffset(intervalOffset);
break;
case MILLISECONDS:
currentTimestamp = System.currentTimeMillis() + getOffset(intervalOffset);
break;
case SECONDS:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
case MINUTES:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
case HOURS:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
case DAYS:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
default:
throw new IllegalArgumentException("Unhandled time unit type: " + timeUnits);
}
startTimestamp = currentTimestamp;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/generator/UnixEpochTimestampGenerator.java | UnixEpochTimestampGenerator | nextValue | class UnixEpochTimestampGenerator extends Generator<Long> {
/** The base timestamp used as a starting reference. */
protected long startTimestamp;
/** The current timestamp that will be incremented. */
protected long currentTimestamp;
/** The last used timestamp. Should always be one interval behind current. */
protected long lastTimestamp;
/** The interval to increment by. Multiplied by {@link #timeUnits}. */
protected long interval;
/** The units of time the interval represents. */
protected TimeUnit timeUnits;
/**
* Default ctor with the current system time and a 60 second interval.
*/
public UnixEpochTimestampGenerator() {
this(60, TimeUnit.SECONDS);
}
/**
* Ctor that uses the current system time as current.
* @param interval The interval for incrementing the timestamp.
* @param timeUnits The units of time the increment represents.
*/
public UnixEpochTimestampGenerator(final long interval, final TimeUnit timeUnits) {
this.interval = interval;
this.timeUnits = timeUnits;
// move the first timestamp by 1 interval so that the first call to nextValue
// returns this timestamp
initalizeTimestamp(-1);
currentTimestamp -= getOffset(1);
lastTimestamp = currentTimestamp;
}
/**
* Ctor for supplying a starting timestamp.
* @param interval The interval for incrementing the timestamp.
* @param timeUnits The units of time the increment represents.
* @param startTimestamp The start timestamp to use.
* NOTE that this must match the time units used for the interval.
* If the units are in nanoseconds, provide a nanosecond timestamp {@code System.nanoTime()}
* or in microseconds, {@code System.nanoTime() / 1000}
* or in millis, {@code System.currentTimeMillis()}
* or seconds and any interval above, {@code System.currentTimeMillis() / 1000}
*/
public UnixEpochTimestampGenerator(final long interval, final TimeUnit timeUnits,
final long startTimestamp) {
this.interval = interval;
this.timeUnits = timeUnits;
// move the first timestamp by 1 interval so that the first call to nextValue
// returns this timestamp
currentTimestamp = startTimestamp - getOffset(1);
this.startTimestamp = currentTimestamp;
lastTimestamp = currentTimestamp - getOffset(1);
}
/**
* Sets the starting timestamp to the current system time plus the interval offset.
* E.g. to set the time an hour in the past, supply a value of {@code -60}.
* @param intervalOffset The interval to increment or decrement by.
*/
public void initalizeTimestamp(final long intervalOffset) {
switch (timeUnits) {
case NANOSECONDS:
currentTimestamp = System.nanoTime() + getOffset(intervalOffset);
break;
case MICROSECONDS:
currentTimestamp = (System.nanoTime() / 1000) + getOffset(intervalOffset);
break;
case MILLISECONDS:
currentTimestamp = System.currentTimeMillis() + getOffset(intervalOffset);
break;
case SECONDS:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
case MINUTES:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
case HOURS:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
case DAYS:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
default:
throw new IllegalArgumentException("Unhandled time unit type: " + timeUnits);
}
startTimestamp = currentTimestamp;
}
@Override
public Long nextValue() {<FILL_FUNCTION_BODY>}
/**
* Returns the proper increment offset to use given the interval and timeunits.
* @param intervalOffset The amount of offset to multiply by.
* @return An offset value to adjust the timestamp by.
*/
public long getOffset(final long intervalOffset) {
switch (timeUnits) {
case NANOSECONDS:
case MICROSECONDS:
case MILLISECONDS:
case SECONDS:
return intervalOffset * interval;
case MINUTES:
return intervalOffset * interval * (long) 60;
case HOURS:
return intervalOffset * interval * (long) (60 * 60);
case DAYS:
return intervalOffset * interval * (long) (60 * 60 * 24);
default:
throw new IllegalArgumentException("Unhandled time unit type: " + timeUnits);
}
}
@Override
public Long lastValue() {
return lastTimestamp;
}
/** @return The current timestamp as set by the last call to {@link #nextValue()} */
public long currentValue() {
return currentTimestamp;
}
} |
lastTimestamp = currentTimestamp;
currentTimestamp += getOffset(1);
return currentTimestamp;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/generator/UnixEpochTimestampGenerator.java | UnixEpochTimestampGenerator | getOffset | class UnixEpochTimestampGenerator extends Generator<Long> {
/** The base timestamp used as a starting reference. */
protected long startTimestamp;
/** The current timestamp that will be incremented. */
protected long currentTimestamp;
/** The last used timestamp. Should always be one interval behind current. */
protected long lastTimestamp;
/** The interval to increment by. Multiplied by {@link #timeUnits}. */
protected long interval;
/** The units of time the interval represents. */
protected TimeUnit timeUnits;
/**
* Default ctor with the current system time and a 60 second interval.
*/
public UnixEpochTimestampGenerator() {
this(60, TimeUnit.SECONDS);
}
/**
* Ctor that uses the current system time as current.
* @param interval The interval for incrementing the timestamp.
* @param timeUnits The units of time the increment represents.
*/
public UnixEpochTimestampGenerator(final long interval, final TimeUnit timeUnits) {
this.interval = interval;
this.timeUnits = timeUnits;
// move the first timestamp by 1 interval so that the first call to nextValue
// returns this timestamp
initalizeTimestamp(-1);
currentTimestamp -= getOffset(1);
lastTimestamp = currentTimestamp;
}
/**
* Ctor for supplying a starting timestamp.
* @param interval The interval for incrementing the timestamp.
* @param timeUnits The units of time the increment represents.
* @param startTimestamp The start timestamp to use.
* NOTE that this must match the time units used for the interval.
* If the units are in nanoseconds, provide a nanosecond timestamp {@code System.nanoTime()}
* or in microseconds, {@code System.nanoTime() / 1000}
* or in millis, {@code System.currentTimeMillis()}
* or seconds and any interval above, {@code System.currentTimeMillis() / 1000}
*/
public UnixEpochTimestampGenerator(final long interval, final TimeUnit timeUnits,
final long startTimestamp) {
this.interval = interval;
this.timeUnits = timeUnits;
// move the first timestamp by 1 interval so that the first call to nextValue
// returns this timestamp
currentTimestamp = startTimestamp - getOffset(1);
this.startTimestamp = currentTimestamp;
lastTimestamp = currentTimestamp - getOffset(1);
}
/**
* Sets the starting timestamp to the current system time plus the interval offset.
* E.g. to set the time an hour in the past, supply a value of {@code -60}.
* @param intervalOffset The interval to increment or decrement by.
*/
public void initalizeTimestamp(final long intervalOffset) {
switch (timeUnits) {
case NANOSECONDS:
currentTimestamp = System.nanoTime() + getOffset(intervalOffset);
break;
case MICROSECONDS:
currentTimestamp = (System.nanoTime() / 1000) + getOffset(intervalOffset);
break;
case MILLISECONDS:
currentTimestamp = System.currentTimeMillis() + getOffset(intervalOffset);
break;
case SECONDS:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
case MINUTES:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
case HOURS:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
case DAYS:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
default:
throw new IllegalArgumentException("Unhandled time unit type: " + timeUnits);
}
startTimestamp = currentTimestamp;
}
@Override
public Long nextValue() {
lastTimestamp = currentTimestamp;
currentTimestamp += getOffset(1);
return currentTimestamp;
}
/**
* Returns the proper increment offset to use given the interval and timeunits.
* @param intervalOffset The amount of offset to multiply by.
* @return An offset value to adjust the timestamp by.
*/
public long getOffset(final long intervalOffset) {<FILL_FUNCTION_BODY>}
@Override
public Long lastValue() {
return lastTimestamp;
}
/** @return The current timestamp as set by the last call to {@link #nextValue()} */
public long currentValue() {
return currentTimestamp;
}
} |
switch (timeUnits) {
case NANOSECONDS:
case MICROSECONDS:
case MILLISECONDS:
case SECONDS:
return intervalOffset * interval;
case MINUTES:
return intervalOffset * interval * (long) 60;
case HOURS:
return intervalOffset * interval * (long) (60 * 60);
case DAYS:
return intervalOffset * interval * (long) (60 * 60 * 24);
default:
throw new IllegalArgumentException("Unhandled time unit type: " + timeUnits);
}
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/generator/UnixEpochTimestampGenerator.java | UnixEpochTimestampGenerator | lastValue | class UnixEpochTimestampGenerator extends Generator<Long> {
/** The base timestamp used as a starting reference. */
protected long startTimestamp;
/** The current timestamp that will be incremented. */
protected long currentTimestamp;
/** The last used timestamp. Should always be one interval behind current. */
protected long lastTimestamp;
/** The interval to increment by. Multiplied by {@link #timeUnits}. */
protected long interval;
/** The units of time the interval represents. */
protected TimeUnit timeUnits;
/**
* Default ctor with the current system time and a 60 second interval.
*/
public UnixEpochTimestampGenerator() {
this(60, TimeUnit.SECONDS);
}
/**
* Ctor that uses the current system time as current.
* @param interval The interval for incrementing the timestamp.
* @param timeUnits The units of time the increment represents.
*/
public UnixEpochTimestampGenerator(final long interval, final TimeUnit timeUnits) {
this.interval = interval;
this.timeUnits = timeUnits;
// move the first timestamp by 1 interval so that the first call to nextValue
// returns this timestamp
initalizeTimestamp(-1);
currentTimestamp -= getOffset(1);
lastTimestamp = currentTimestamp;
}
/**
* Ctor for supplying a starting timestamp.
* @param interval The interval for incrementing the timestamp.
* @param timeUnits The units of time the increment represents.
* @param startTimestamp The start timestamp to use.
* NOTE that this must match the time units used for the interval.
* If the units are in nanoseconds, provide a nanosecond timestamp {@code System.nanoTime()}
* or in microseconds, {@code System.nanoTime() / 1000}
* or in millis, {@code System.currentTimeMillis()}
* or seconds and any interval above, {@code System.currentTimeMillis() / 1000}
*/
public UnixEpochTimestampGenerator(final long interval, final TimeUnit timeUnits,
final long startTimestamp) {
this.interval = interval;
this.timeUnits = timeUnits;
// move the first timestamp by 1 interval so that the first call to nextValue
// returns this timestamp
currentTimestamp = startTimestamp - getOffset(1);
this.startTimestamp = currentTimestamp;
lastTimestamp = currentTimestamp - getOffset(1);
}
/**
* Sets the starting timestamp to the current system time plus the interval offset.
* E.g. to set the time an hour in the past, supply a value of {@code -60}.
* @param intervalOffset The interval to increment or decrement by.
*/
public void initalizeTimestamp(final long intervalOffset) {
switch (timeUnits) {
case NANOSECONDS:
currentTimestamp = System.nanoTime() + getOffset(intervalOffset);
break;
case MICROSECONDS:
currentTimestamp = (System.nanoTime() / 1000) + getOffset(intervalOffset);
break;
case MILLISECONDS:
currentTimestamp = System.currentTimeMillis() + getOffset(intervalOffset);
break;
case SECONDS:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
case MINUTES:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
case HOURS:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
case DAYS:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
default:
throw new IllegalArgumentException("Unhandled time unit type: " + timeUnits);
}
startTimestamp = currentTimestamp;
}
@Override
public Long nextValue() {
lastTimestamp = currentTimestamp;
currentTimestamp += getOffset(1);
return currentTimestamp;
}
/**
* Returns the proper increment offset to use given the interval and timeunits.
* @param intervalOffset The amount of offset to multiply by.
* @return An offset value to adjust the timestamp by.
*/
public long getOffset(final long intervalOffset) {
switch (timeUnits) {
case NANOSECONDS:
case MICROSECONDS:
case MILLISECONDS:
case SECONDS:
return intervalOffset * interval;
case MINUTES:
return intervalOffset * interval * (long) 60;
case HOURS:
return intervalOffset * interval * (long) (60 * 60);
case DAYS:
return intervalOffset * interval * (long) (60 * 60 * 24);
default:
throw new IllegalArgumentException("Unhandled time unit type: " + timeUnits);
}
}
@Override
public Long lastValue() {<FILL_FUNCTION_BODY>}
/** @return The current timestamp as set by the last call to {@link #nextValue()} */
public long currentValue() {
return currentTimestamp;
}
} |
return lastTimestamp;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/generator/UnixEpochTimestampGenerator.java | UnixEpochTimestampGenerator | currentValue | class UnixEpochTimestampGenerator extends Generator<Long> {
/** The base timestamp used as a starting reference. */
protected long startTimestamp;
/** The current timestamp that will be incremented. */
protected long currentTimestamp;
/** The last used timestamp. Should always be one interval behind current. */
protected long lastTimestamp;
/** The interval to increment by. Multiplied by {@link #timeUnits}. */
protected long interval;
/** The units of time the interval represents. */
protected TimeUnit timeUnits;
/**
* Default ctor with the current system time and a 60 second interval.
*/
public UnixEpochTimestampGenerator() {
this(60, TimeUnit.SECONDS);
}
/**
* Ctor that uses the current system time as current.
* @param interval The interval for incrementing the timestamp.
* @param timeUnits The units of time the increment represents.
*/
public UnixEpochTimestampGenerator(final long interval, final TimeUnit timeUnits) {
this.interval = interval;
this.timeUnits = timeUnits;
// move the first timestamp by 1 interval so that the first call to nextValue
// returns this timestamp
initalizeTimestamp(-1);
currentTimestamp -= getOffset(1);
lastTimestamp = currentTimestamp;
}
/**
* Ctor for supplying a starting timestamp.
* @param interval The interval for incrementing the timestamp.
* @param timeUnits The units of time the increment represents.
* @param startTimestamp The start timestamp to use.
* NOTE that this must match the time units used for the interval.
* If the units are in nanoseconds, provide a nanosecond timestamp {@code System.nanoTime()}
* or in microseconds, {@code System.nanoTime() / 1000}
* or in millis, {@code System.currentTimeMillis()}
* or seconds and any interval above, {@code System.currentTimeMillis() / 1000}
*/
public UnixEpochTimestampGenerator(final long interval, final TimeUnit timeUnits,
final long startTimestamp) {
this.interval = interval;
this.timeUnits = timeUnits;
// move the first timestamp by 1 interval so that the first call to nextValue
// returns this timestamp
currentTimestamp = startTimestamp - getOffset(1);
this.startTimestamp = currentTimestamp;
lastTimestamp = currentTimestamp - getOffset(1);
}
/**
* Sets the starting timestamp to the current system time plus the interval offset.
* E.g. to set the time an hour in the past, supply a value of {@code -60}.
* @param intervalOffset The interval to increment or decrement by.
*/
public void initalizeTimestamp(final long intervalOffset) {
switch (timeUnits) {
case NANOSECONDS:
currentTimestamp = System.nanoTime() + getOffset(intervalOffset);
break;
case MICROSECONDS:
currentTimestamp = (System.nanoTime() / 1000) + getOffset(intervalOffset);
break;
case MILLISECONDS:
currentTimestamp = System.currentTimeMillis() + getOffset(intervalOffset);
break;
case SECONDS:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
case MINUTES:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
case HOURS:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
case DAYS:
currentTimestamp = (System.currentTimeMillis() / 1000) +
getOffset(intervalOffset);
break;
default:
throw new IllegalArgumentException("Unhandled time unit type: " + timeUnits);
}
startTimestamp = currentTimestamp;
}
@Override
public Long nextValue() {
lastTimestamp = currentTimestamp;
currentTimestamp += getOffset(1);
return currentTimestamp;
}
/**
* Returns the proper increment offset to use given the interval and timeunits.
* @param intervalOffset The amount of offset to multiply by.
* @return An offset value to adjust the timestamp by.
*/
public long getOffset(final long intervalOffset) {
switch (timeUnits) {
case NANOSECONDS:
case MICROSECONDS:
case MILLISECONDS:
case SECONDS:
return intervalOffset * interval;
case MINUTES:
return intervalOffset * interval * (long) 60;
case HOURS:
return intervalOffset * interval * (long) (60 * 60);
case DAYS:
return intervalOffset * interval * (long) (60 * 60 * 24);
default:
throw new IllegalArgumentException("Unhandled time unit type: " + timeUnits);
}
}
@Override
public Long lastValue() {
return lastTimestamp;
}
/** @return The current timestamp as set by the last call to {@link #nextValue()} */
public long currentValue() {<FILL_FUNCTION_BODY>}
} |
return currentTimestamp;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/generator/ZipfianGenerator.java | ZipfianGenerator | zeta | class ZipfianGenerator extends NumberGenerator {
public static final double ZIPFIAN_CONSTANT = 0.99;
/**
* Number of items.
*/
private final long items;
/**
* Min item to generate.
*/
private final long base;
/**
* The zipfian constant to use.
*/
private final double zipfianconstant;
/**
* Computed parameters for generating the distribution.
*/
private double alpha, zetan, eta, theta, zeta2theta;
/**
* The number of items used to compute zetan the last time.
*/
private long countforzeta;
/**
* Flag to prevent problems. If you increase the number of items the zipfian generator is allowed to choose from,
* this code will incrementally compute a new zeta value for the larger itemcount. However, if you decrease the
* number of items, the code computes zeta from scratch; this is expensive for large itemsets.
* Usually this is not intentional; e.g. one thread thinks the number of items is 1001 and calls "nextLong()" with
* that item count; then another thread who thinks the number of items is 1000 calls nextLong() with itemcount=1000
* triggering the expensive recomputation. (It is expensive for 100 million items, not really for 1000 items.) Why
* did the second thread think there were only 1000 items? maybe it read the item count before the first thread
* incremented it. So this flag allows you to say if you really do want that recomputation. If true, then the code
* will recompute zeta if the itemcount goes down. If false, the code will assume itemcount only goes up, and never
* recompute.
*/
private boolean allowitemcountdecrease = false;
/******************************* Constructors **************************************/
/**
* Create a zipfian generator for the specified number of items.
* @param items The number of items in the distribution.
*/
public ZipfianGenerator(long items) {
this(0, items - 1);
}
/**
* Create a zipfian generator for items between min and max.
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
*/
public ZipfianGenerator(long min, long max) {
this(min, max, ZIPFIAN_CONSTANT);
}
/**
* Create a zipfian generator for the specified number of items using the specified zipfian constant.
*
* @param items The number of items in the distribution.
* @param zipfianconstant The zipfian constant to use.
*/
public ZipfianGenerator(long items, double zipfianconstant) {
this(0, items - 1, zipfianconstant);
}
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant.
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
*/
public ZipfianGenerator(long min, long max, double zipfianconstant) {
this(min, max, zipfianconstant, zetastatic(max - min + 1, zipfianconstant));
}
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant, using
* the precomputed value of zeta.
*
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
* @param zetan The precomputed zeta constant.
*/
public ZipfianGenerator(long min, long max, double zipfianconstant, double zetan) {
items = max - min + 1;
base = min;
this.zipfianconstant = zipfianconstant;
theta = this.zipfianconstant;
zeta2theta = zeta(2, theta);
alpha = 1.0 / (1.0 - theta);
this.zetan = zetan;
countforzeta = items;
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / this.zetan);
nextValue();
}
/**************************************************************************/
/**
* Compute the zeta constant needed for the distribution. Do this from scratch for a distribution with n items,
* using the zipfian constant thetaVal. Remember the value of n, so if we change the itemcount, we can recompute zeta.
*
* @param n The number of items to compute zeta over.
* @param thetaVal The zipfian constant.
*/
double zeta(long n, double thetaVal) {<FILL_FUNCTION_BODY>}
/**
* Compute the zeta constant needed for the distribution. Do this from scratch for a distribution with n items,
* using the zipfian constant theta. This is a static version of the function which will not remember n.
* @param n The number of items to compute zeta over.
* @param theta The zipfian constant.
*/
static double zetastatic(long n, double theta) {
return zetastatic(0, n, theta, 0);
}
/**
* Compute the zeta constant needed for the distribution. Do this incrementally for a distribution that
* has n items now but used to have st items. Use the zipfian constant thetaVal. Remember the new value of
* n so that if we change the itemcount, we'll know to recompute zeta.
*
* @param st The number of items used to compute the last initialsum
* @param n The number of items to compute zeta over.
* @param thetaVal The zipfian constant.
* @param initialsum The value of zeta we are computing incrementally from.
*/
double zeta(long st, long n, double thetaVal, double initialsum) {
countforzeta = n;
return zetastatic(st, n, thetaVal, initialsum);
}
/**
* Compute the zeta constant needed for the distribution. Do this incrementally for a distribution that
* has n items now but used to have st items. Use the zipfian constant theta. Remember the new value of
* n so that if we change the itemcount, we'll know to recompute zeta.
* @param st The number of items used to compute the last initialsum
* @param n The number of items to compute zeta over.
* @param theta The zipfian constant.
* @param initialsum The value of zeta we are computing incrementally from.
*/
static double zetastatic(long st, long n, double theta, double initialsum) {
double sum = initialsum;
for (long i = st; i < n; i++) {
sum += 1 / (Math.pow(i + 1, theta));
}
//System.out.println("countforzeta="+countforzeta);
return sum;
}
/****************************************************************************************/
/**
* Generate the next item as a long.
*
* @param itemcount The number of items in the distribution.
* @return The next item in the sequence.
*/
long nextLong(long itemcount) {
//from "Quickly Generating Billion-Record Synthetic Databases", Jim Gray et al, SIGMOD 1994
if (itemcount != countforzeta) {
//have to recompute zetan and eta, since they depend on itemcount
synchronized (this) {
if (itemcount > countforzeta) {
//System.err.println("WARNING: Incrementally recomputing Zipfian distribtion. (itemcount="+itemcount+"
// countforzeta="+countforzeta+")");
//we have added more items. can compute zetan incrementally, which is cheaper
zetan = zeta(countforzeta, itemcount, theta, zetan);
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
} else if ((itemcount < countforzeta) && (allowitemcountdecrease)) {
//have to start over with zetan
//note : for large itemsets, this is very slow. so don't do it!
//TODO: can also have a negative incremental computation, e.g. if you decrease the number of items,
// then just subtract the zeta sequence terms for the items that went away. This would be faster than
// recomputing from scratch when the number of items decreases
System.err.println("WARNING: Recomputing Zipfian distribtion. This is slow and should be avoided. " +
"(itemcount=" + itemcount + " countforzeta=" + countforzeta + ")");
zetan = zeta(itemcount, theta);
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
}
}
}
double u = ThreadLocalRandom.current().nextDouble();
double uz = u * zetan;
if (uz < 1.0) {
return base;
}
if (uz < 1.0 + Math.pow(0.5, theta)) {
return base + 1;
}
long ret = base + (long) ((itemcount) * Math.pow(eta * u - eta + 1, alpha));
setLastValue(ret);
return ret;
}
/**
* Return the next value, skewed by the Zipfian distribution. The 0th item will be the most popular, followed by
* the 1st, followed by the 2nd, etc. (Or, if min != 0, the min-th item is the most popular, the min+1th item the
* next most popular, etc.) If you want the popular items scattered throughout the item space, use
* ScrambledZipfianGenerator instead.
*/
@Override
public Long nextValue() {
return nextLong(items);
}
public static void main(String[] args) {
new ZipfianGenerator(ScrambledZipfianGenerator.ITEM_COUNT);
}
/**
* @todo Implement ZipfianGenerator.mean()
*/
@Override
public double mean() {
throw new UnsupportedOperationException("@todo implement ZipfianGenerator.mean()");
}
} |
countforzeta = n;
return zetastatic(n, thetaVal);
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/generator/ZipfianGenerator.java | ZipfianGenerator | zetastatic | class ZipfianGenerator extends NumberGenerator {
public static final double ZIPFIAN_CONSTANT = 0.99;
/**
* Number of items.
*/
private final long items;
/**
* Min item to generate.
*/
private final long base;
/**
* The zipfian constant to use.
*/
private final double zipfianconstant;
/**
* Computed parameters for generating the distribution.
*/
private double alpha, zetan, eta, theta, zeta2theta;
/**
* The number of items used to compute zetan the last time.
*/
private long countforzeta;
/**
* Flag to prevent problems. If you increase the number of items the zipfian generator is allowed to choose from,
* this code will incrementally compute a new zeta value for the larger itemcount. However, if you decrease the
* number of items, the code computes zeta from scratch; this is expensive for large itemsets.
* Usually this is not intentional; e.g. one thread thinks the number of items is 1001 and calls "nextLong()" with
* that item count; then another thread who thinks the number of items is 1000 calls nextLong() with itemcount=1000
* triggering the expensive recomputation. (It is expensive for 100 million items, not really for 1000 items.) Why
* did the second thread think there were only 1000 items? maybe it read the item count before the first thread
* incremented it. So this flag allows you to say if you really do want that recomputation. If true, then the code
* will recompute zeta if the itemcount goes down. If false, the code will assume itemcount only goes up, and never
* recompute.
*/
private boolean allowitemcountdecrease = false;
/******************************* Constructors **************************************/
/**
* Create a zipfian generator for the specified number of items.
* @param items The number of items in the distribution.
*/
public ZipfianGenerator(long items) {
this(0, items - 1);
}
/**
* Create a zipfian generator for items between min and max.
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
*/
public ZipfianGenerator(long min, long max) {
this(min, max, ZIPFIAN_CONSTANT);
}
/**
* Create a zipfian generator for the specified number of items using the specified zipfian constant.
*
* @param items The number of items in the distribution.
* @param zipfianconstant The zipfian constant to use.
*/
public ZipfianGenerator(long items, double zipfianconstant) {
this(0, items - 1, zipfianconstant);
}
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant.
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
*/
public ZipfianGenerator(long min, long max, double zipfianconstant) {
this(min, max, zipfianconstant, zetastatic(max - min + 1, zipfianconstant));
}
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant, using
* the precomputed value of zeta.
*
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
* @param zetan The precomputed zeta constant.
*/
public ZipfianGenerator(long min, long max, double zipfianconstant, double zetan) {
items = max - min + 1;
base = min;
this.zipfianconstant = zipfianconstant;
theta = this.zipfianconstant;
zeta2theta = zeta(2, theta);
alpha = 1.0 / (1.0 - theta);
this.zetan = zetan;
countforzeta = items;
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / this.zetan);
nextValue();
}
/**************************************************************************/
/**
* Compute the zeta constant needed for the distribution. Do this from scratch for a distribution with n items,
* using the zipfian constant thetaVal. Remember the value of n, so if we change the itemcount, we can recompute zeta.
*
* @param n The number of items to compute zeta over.
* @param thetaVal The zipfian constant.
*/
double zeta(long n, double thetaVal) {
countforzeta = n;
return zetastatic(n, thetaVal);
}
/**
* Compute the zeta constant needed for the distribution. Do this from scratch for a distribution with n items,
* using the zipfian constant theta. This is a static version of the function which will not remember n.
* @param n The number of items to compute zeta over.
* @param theta The zipfian constant.
*/
static double zetastatic(long n, double theta) {<FILL_FUNCTION_BODY>}
/**
* Compute the zeta constant needed for the distribution. Do this incrementally for a distribution that
* has n items now but used to have st items. Use the zipfian constant thetaVal. Remember the new value of
* n so that if we change the itemcount, we'll know to recompute zeta.
*
* @param st The number of items used to compute the last initialsum
* @param n The number of items to compute zeta over.
* @param thetaVal The zipfian constant.
* @param initialsum The value of zeta we are computing incrementally from.
*/
double zeta(long st, long n, double thetaVal, double initialsum) {
countforzeta = n;
return zetastatic(st, n, thetaVal, initialsum);
}
/**
* Compute the zeta constant needed for the distribution. Do this incrementally for a distribution that
* has n items now but used to have st items. Use the zipfian constant theta. Remember the new value of
* n so that if we change the itemcount, we'll know to recompute zeta.
* @param st The number of items used to compute the last initialsum
* @param n The number of items to compute zeta over.
* @param theta The zipfian constant.
* @param initialsum The value of zeta we are computing incrementally from.
*/
static double zetastatic(long st, long n, double theta, double initialsum) {
double sum = initialsum;
for (long i = st; i < n; i++) {
sum += 1 / (Math.pow(i + 1, theta));
}
//System.out.println("countforzeta="+countforzeta);
return sum;
}
/****************************************************************************************/
/**
* Generate the next item as a long.
*
* @param itemcount The number of items in the distribution.
* @return The next item in the sequence.
*/
long nextLong(long itemcount) {
//from "Quickly Generating Billion-Record Synthetic Databases", Jim Gray et al, SIGMOD 1994
if (itemcount != countforzeta) {
//have to recompute zetan and eta, since they depend on itemcount
synchronized (this) {
if (itemcount > countforzeta) {
//System.err.println("WARNING: Incrementally recomputing Zipfian distribtion. (itemcount="+itemcount+"
// countforzeta="+countforzeta+")");
//we have added more items. can compute zetan incrementally, which is cheaper
zetan = zeta(countforzeta, itemcount, theta, zetan);
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
} else if ((itemcount < countforzeta) && (allowitemcountdecrease)) {
//have to start over with zetan
//note : for large itemsets, this is very slow. so don't do it!
//TODO: can also have a negative incremental computation, e.g. if you decrease the number of items,
// then just subtract the zeta sequence terms for the items that went away. This would be faster than
// recomputing from scratch when the number of items decreases
System.err.println("WARNING: Recomputing Zipfian distribtion. This is slow and should be avoided. " +
"(itemcount=" + itemcount + " countforzeta=" + countforzeta + ")");
zetan = zeta(itemcount, theta);
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
}
}
}
double u = ThreadLocalRandom.current().nextDouble();
double uz = u * zetan;
if (uz < 1.0) {
return base;
}
if (uz < 1.0 + Math.pow(0.5, theta)) {
return base + 1;
}
long ret = base + (long) ((itemcount) * Math.pow(eta * u - eta + 1, alpha));
setLastValue(ret);
return ret;
}
/**
* Return the next value, skewed by the Zipfian distribution. The 0th item will be the most popular, followed by
* the 1st, followed by the 2nd, etc. (Or, if min != 0, the min-th item is the most popular, the min+1th item the
* next most popular, etc.) If you want the popular items scattered throughout the item space, use
* ScrambledZipfianGenerator instead.
*/
@Override
public Long nextValue() {
return nextLong(items);
}
public static void main(String[] args) {
new ZipfianGenerator(ScrambledZipfianGenerator.ITEM_COUNT);
}
/**
* @todo Implement ZipfianGenerator.mean()
*/
@Override
public double mean() {
throw new UnsupportedOperationException("@todo implement ZipfianGenerator.mean()");
}
} |
return zetastatic(0, n, theta, 0);
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/generator/ZipfianGenerator.java | ZipfianGenerator | zeta | class ZipfianGenerator extends NumberGenerator {
public static final double ZIPFIAN_CONSTANT = 0.99;
/**
* Number of items.
*/
private final long items;
/**
* Min item to generate.
*/
private final long base;
/**
* The zipfian constant to use.
*/
private final double zipfianconstant;
/**
* Computed parameters for generating the distribution.
*/
private double alpha, zetan, eta, theta, zeta2theta;
/**
* The number of items used to compute zetan the last time.
*/
private long countforzeta;
/**
* Flag to prevent problems. If you increase the number of items the zipfian generator is allowed to choose from,
* this code will incrementally compute a new zeta value for the larger itemcount. However, if you decrease the
* number of items, the code computes zeta from scratch; this is expensive for large itemsets.
* Usually this is not intentional; e.g. one thread thinks the number of items is 1001 and calls "nextLong()" with
* that item count; then another thread who thinks the number of items is 1000 calls nextLong() with itemcount=1000
* triggering the expensive recomputation. (It is expensive for 100 million items, not really for 1000 items.) Why
* did the second thread think there were only 1000 items? maybe it read the item count before the first thread
* incremented it. So this flag allows you to say if you really do want that recomputation. If true, then the code
* will recompute zeta if the itemcount goes down. If false, the code will assume itemcount only goes up, and never
* recompute.
*/
private boolean allowitemcountdecrease = false;
/******************************* Constructors **************************************/
/**
* Create a zipfian generator for the specified number of items.
* @param items The number of items in the distribution.
*/
public ZipfianGenerator(long items) {
this(0, items - 1);
}
/**
* Create a zipfian generator for items between min and max.
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
*/
public ZipfianGenerator(long min, long max) {
this(min, max, ZIPFIAN_CONSTANT);
}
/**
* Create a zipfian generator for the specified number of items using the specified zipfian constant.
*
* @param items The number of items in the distribution.
* @param zipfianconstant The zipfian constant to use.
*/
public ZipfianGenerator(long items, double zipfianconstant) {
this(0, items - 1, zipfianconstant);
}
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant.
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
*/
public ZipfianGenerator(long min, long max, double zipfianconstant) {
this(min, max, zipfianconstant, zetastatic(max - min + 1, zipfianconstant));
}
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant, using
* the precomputed value of zeta.
*
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
* @param zetan The precomputed zeta constant.
*/
public ZipfianGenerator(long min, long max, double zipfianconstant, double zetan) {
items = max - min + 1;
base = min;
this.zipfianconstant = zipfianconstant;
theta = this.zipfianconstant;
zeta2theta = zeta(2, theta);
alpha = 1.0 / (1.0 - theta);
this.zetan = zetan;
countforzeta = items;
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / this.zetan);
nextValue();
}
/**************************************************************************/
/**
* Compute the zeta constant needed for the distribution. Do this from scratch for a distribution with n items,
* using the zipfian constant thetaVal. Remember the value of n, so if we change the itemcount, we can recompute zeta.
*
* @param n The number of items to compute zeta over.
* @param thetaVal The zipfian constant.
*/
double zeta(long n, double thetaVal) {
countforzeta = n;
return zetastatic(n, thetaVal);
}
/**
* Compute the zeta constant needed for the distribution. Do this from scratch for a distribution with n items,
* using the zipfian constant theta. This is a static version of the function which will not remember n.
* @param n The number of items to compute zeta over.
* @param theta The zipfian constant.
*/
static double zetastatic(long n, double theta) {
return zetastatic(0, n, theta, 0);
}
/**
* Compute the zeta constant needed for the distribution. Do this incrementally for a distribution that
* has n items now but used to have st items. Use the zipfian constant thetaVal. Remember the new value of
* n so that if we change the itemcount, we'll know to recompute zeta.
*
* @param st The number of items used to compute the last initialsum
* @param n The number of items to compute zeta over.
* @param thetaVal The zipfian constant.
* @param initialsum The value of zeta we are computing incrementally from.
*/
double zeta(long st, long n, double thetaVal, double initialsum) {<FILL_FUNCTION_BODY>}
/**
* Compute the zeta constant needed for the distribution. Do this incrementally for a distribution that
* has n items now but used to have st items. Use the zipfian constant theta. Remember the new value of
* n so that if we change the itemcount, we'll know to recompute zeta.
* @param st The number of items used to compute the last initialsum
* @param n The number of items to compute zeta over.
* @param theta The zipfian constant.
* @param initialsum The value of zeta we are computing incrementally from.
*/
static double zetastatic(long st, long n, double theta, double initialsum) {
double sum = initialsum;
for (long i = st; i < n; i++) {
sum += 1 / (Math.pow(i + 1, theta));
}
//System.out.println("countforzeta="+countforzeta);
return sum;
}
/****************************************************************************************/
/**
* Generate the next item as a long.
*
* @param itemcount The number of items in the distribution.
* @return The next item in the sequence.
*/
long nextLong(long itemcount) {
//from "Quickly Generating Billion-Record Synthetic Databases", Jim Gray et al, SIGMOD 1994
if (itemcount != countforzeta) {
//have to recompute zetan and eta, since they depend on itemcount
synchronized (this) {
if (itemcount > countforzeta) {
//System.err.println("WARNING: Incrementally recomputing Zipfian distribtion. (itemcount="+itemcount+"
// countforzeta="+countforzeta+")");
//we have added more items. can compute zetan incrementally, which is cheaper
zetan = zeta(countforzeta, itemcount, theta, zetan);
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
} else if ((itemcount < countforzeta) && (allowitemcountdecrease)) {
//have to start over with zetan
//note : for large itemsets, this is very slow. so don't do it!
//TODO: can also have a negative incremental computation, e.g. if you decrease the number of items,
// then just subtract the zeta sequence terms for the items that went away. This would be faster than
// recomputing from scratch when the number of items decreases
System.err.println("WARNING: Recomputing Zipfian distribtion. This is slow and should be avoided. " +
"(itemcount=" + itemcount + " countforzeta=" + countforzeta + ")");
zetan = zeta(itemcount, theta);
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
}
}
}
double u = ThreadLocalRandom.current().nextDouble();
double uz = u * zetan;
if (uz < 1.0) {
return base;
}
if (uz < 1.0 + Math.pow(0.5, theta)) {
return base + 1;
}
long ret = base + (long) ((itemcount) * Math.pow(eta * u - eta + 1, alpha));
setLastValue(ret);
return ret;
}
/**
* Return the next value, skewed by the Zipfian distribution. The 0th item will be the most popular, followed by
* the 1st, followed by the 2nd, etc. (Or, if min != 0, the min-th item is the most popular, the min+1th item the
* next most popular, etc.) If you want the popular items scattered throughout the item space, use
* ScrambledZipfianGenerator instead.
*/
@Override
public Long nextValue() {
return nextLong(items);
}
public static void main(String[] args) {
new ZipfianGenerator(ScrambledZipfianGenerator.ITEM_COUNT);
}
/**
* @todo Implement ZipfianGenerator.mean()
*/
@Override
public double mean() {
throw new UnsupportedOperationException("@todo implement ZipfianGenerator.mean()");
}
} |
countforzeta = n;
return zetastatic(st, n, thetaVal, initialsum);
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/generator/ZipfianGenerator.java | ZipfianGenerator | zetastatic | class ZipfianGenerator extends NumberGenerator {
public static final double ZIPFIAN_CONSTANT = 0.99;
/**
* Number of items.
*/
private final long items;
/**
* Min item to generate.
*/
private final long base;
/**
* The zipfian constant to use.
*/
private final double zipfianconstant;
/**
* Computed parameters for generating the distribution.
*/
private double alpha, zetan, eta, theta, zeta2theta;
/**
* The number of items used to compute zetan the last time.
*/
private long countforzeta;
/**
* Flag to prevent problems. If you increase the number of items the zipfian generator is allowed to choose from,
* this code will incrementally compute a new zeta value for the larger itemcount. However, if you decrease the
* number of items, the code computes zeta from scratch; this is expensive for large itemsets.
* Usually this is not intentional; e.g. one thread thinks the number of items is 1001 and calls "nextLong()" with
* that item count; then another thread who thinks the number of items is 1000 calls nextLong() with itemcount=1000
* triggering the expensive recomputation. (It is expensive for 100 million items, not really for 1000 items.) Why
* did the second thread think there were only 1000 items? maybe it read the item count before the first thread
* incremented it. So this flag allows you to say if you really do want that recomputation. If true, then the code
* will recompute zeta if the itemcount goes down. If false, the code will assume itemcount only goes up, and never
* recompute.
*/
private boolean allowitemcountdecrease = false;
/******************************* Constructors **************************************/
/**
* Create a zipfian generator for the specified number of items.
* @param items The number of items in the distribution.
*/
public ZipfianGenerator(long items) {
this(0, items - 1);
}
/**
* Create a zipfian generator for items between min and max.
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
*/
public ZipfianGenerator(long min, long max) {
this(min, max, ZIPFIAN_CONSTANT);
}
/**
* Create a zipfian generator for the specified number of items using the specified zipfian constant.
*
* @param items The number of items in the distribution.
* @param zipfianconstant The zipfian constant to use.
*/
public ZipfianGenerator(long items, double zipfianconstant) {
this(0, items - 1, zipfianconstant);
}
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant.
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
*/
public ZipfianGenerator(long min, long max, double zipfianconstant) {
this(min, max, zipfianconstant, zetastatic(max - min + 1, zipfianconstant));
}
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant, using
* the precomputed value of zeta.
*
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
* @param zetan The precomputed zeta constant.
*/
public ZipfianGenerator(long min, long max, double zipfianconstant, double zetan) {
items = max - min + 1;
base = min;
this.zipfianconstant = zipfianconstant;
theta = this.zipfianconstant;
zeta2theta = zeta(2, theta);
alpha = 1.0 / (1.0 - theta);
this.zetan = zetan;
countforzeta = items;
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / this.zetan);
nextValue();
}
/**************************************************************************/
/**
* Compute the zeta constant needed for the distribution. Do this from scratch for a distribution with n items,
* using the zipfian constant thetaVal. Remember the value of n, so if we change the itemcount, we can recompute zeta.
*
* @param n The number of items to compute zeta over.
* @param thetaVal The zipfian constant.
*/
double zeta(long n, double thetaVal) {
countforzeta = n;
return zetastatic(n, thetaVal);
}
/**
* Compute the zeta constant needed for the distribution. Do this from scratch for a distribution with n items,
* using the zipfian constant theta. This is a static version of the function which will not remember n.
* @param n The number of items to compute zeta over.
* @param theta The zipfian constant.
*/
static double zetastatic(long n, double theta) {
return zetastatic(0, n, theta, 0);
}
/**
* Compute the zeta constant needed for the distribution. Do this incrementally for a distribution that
* has n items now but used to have st items. Use the zipfian constant thetaVal. Remember the new value of
* n so that if we change the itemcount, we'll know to recompute zeta.
*
* @param st The number of items used to compute the last initialsum
* @param n The number of items to compute zeta over.
* @param thetaVal The zipfian constant.
* @param initialsum The value of zeta we are computing incrementally from.
*/
double zeta(long st, long n, double thetaVal, double initialsum) {
countforzeta = n;
return zetastatic(st, n, thetaVal, initialsum);
}
/**
* Compute the zeta constant needed for the distribution. Do this incrementally for a distribution that
* has n items now but used to have st items. Use the zipfian constant theta. Remember the new value of
* n so that if we change the itemcount, we'll know to recompute zeta.
* @param st The number of items used to compute the last initialsum
* @param n The number of items to compute zeta over.
* @param theta The zipfian constant.
* @param initialsum The value of zeta we are computing incrementally from.
*/
static double zetastatic(long st, long n, double theta, double initialsum) {<FILL_FUNCTION_BODY>}
/****************************************************************************************/
/**
* Generate the next item as a long.
*
* @param itemcount The number of items in the distribution.
* @return The next item in the sequence.
*/
long nextLong(long itemcount) {
//from "Quickly Generating Billion-Record Synthetic Databases", Jim Gray et al, SIGMOD 1994
if (itemcount != countforzeta) {
//have to recompute zetan and eta, since they depend on itemcount
synchronized (this) {
if (itemcount > countforzeta) {
//System.err.println("WARNING: Incrementally recomputing Zipfian distribtion. (itemcount="+itemcount+"
// countforzeta="+countforzeta+")");
//we have added more items. can compute zetan incrementally, which is cheaper
zetan = zeta(countforzeta, itemcount, theta, zetan);
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
} else if ((itemcount < countforzeta) && (allowitemcountdecrease)) {
//have to start over with zetan
//note : for large itemsets, this is very slow. so don't do it!
//TODO: can also have a negative incremental computation, e.g. if you decrease the number of items,
// then just subtract the zeta sequence terms for the items that went away. This would be faster than
// recomputing from scratch when the number of items decreases
System.err.println("WARNING: Recomputing Zipfian distribtion. This is slow and should be avoided. " +
"(itemcount=" + itemcount + " countforzeta=" + countforzeta + ")");
zetan = zeta(itemcount, theta);
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
}
}
}
double u = ThreadLocalRandom.current().nextDouble();
double uz = u * zetan;
if (uz < 1.0) {
return base;
}
if (uz < 1.0 + Math.pow(0.5, theta)) {
return base + 1;
}
long ret = base + (long) ((itemcount) * Math.pow(eta * u - eta + 1, alpha));
setLastValue(ret);
return ret;
}
/**
* Return the next value, skewed by the Zipfian distribution. The 0th item will be the most popular, followed by
* the 1st, followed by the 2nd, etc. (Or, if min != 0, the min-th item is the most popular, the min+1th item the
* next most popular, etc.) If you want the popular items scattered throughout the item space, use
* ScrambledZipfianGenerator instead.
*/
@Override
public Long nextValue() {
return nextLong(items);
}
public static void main(String[] args) {
new ZipfianGenerator(ScrambledZipfianGenerator.ITEM_COUNT);
}
/**
* @todo Implement ZipfianGenerator.mean()
*/
@Override
public double mean() {
throw new UnsupportedOperationException("@todo implement ZipfianGenerator.mean()");
}
} |
double sum = initialsum;
for (long i = st; i < n; i++) {
sum += 1 / (Math.pow(i + 1, theta));
}
//System.out.println("countforzeta="+countforzeta);
return sum;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/generator/ZipfianGenerator.java | ZipfianGenerator | nextLong | class ZipfianGenerator extends NumberGenerator {
public static final double ZIPFIAN_CONSTANT = 0.99;
/**
* Number of items.
*/
private final long items;
/**
* Min item to generate.
*/
private final long base;
/**
* The zipfian constant to use.
*/
private final double zipfianconstant;
/**
* Computed parameters for generating the distribution.
*/
private double alpha, zetan, eta, theta, zeta2theta;
/**
* The number of items used to compute zetan the last time.
*/
private long countforzeta;
/**
* Flag to prevent problems. If you increase the number of items the zipfian generator is allowed to choose from,
* this code will incrementally compute a new zeta value for the larger itemcount. However, if you decrease the
* number of items, the code computes zeta from scratch; this is expensive for large itemsets.
* Usually this is not intentional; e.g. one thread thinks the number of items is 1001 and calls "nextLong()" with
* that item count; then another thread who thinks the number of items is 1000 calls nextLong() with itemcount=1000
* triggering the expensive recomputation. (It is expensive for 100 million items, not really for 1000 items.) Why
* did the second thread think there were only 1000 items? maybe it read the item count before the first thread
* incremented it. So this flag allows you to say if you really do want that recomputation. If true, then the code
* will recompute zeta if the itemcount goes down. If false, the code will assume itemcount only goes up, and never
* recompute.
*/
private boolean allowitemcountdecrease = false;
/******************************* Constructors **************************************/
/**
* Create a zipfian generator for the specified number of items.
* @param items The number of items in the distribution.
*/
public ZipfianGenerator(long items) {
this(0, items - 1);
}
/**
* Create a zipfian generator for items between min and max.
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
*/
public ZipfianGenerator(long min, long max) {
this(min, max, ZIPFIAN_CONSTANT);
}
/**
* Create a zipfian generator for the specified number of items using the specified zipfian constant.
*
* @param items The number of items in the distribution.
* @param zipfianconstant The zipfian constant to use.
*/
public ZipfianGenerator(long items, double zipfianconstant) {
this(0, items - 1, zipfianconstant);
}
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant.
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
*/
public ZipfianGenerator(long min, long max, double zipfianconstant) {
this(min, max, zipfianconstant, zetastatic(max - min + 1, zipfianconstant));
}
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant, using
* the precomputed value of zeta.
*
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
* @param zetan The precomputed zeta constant.
*/
public ZipfianGenerator(long min, long max, double zipfianconstant, double zetan) {
items = max - min + 1;
base = min;
this.zipfianconstant = zipfianconstant;
theta = this.zipfianconstant;
zeta2theta = zeta(2, theta);
alpha = 1.0 / (1.0 - theta);
this.zetan = zetan;
countforzeta = items;
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / this.zetan);
nextValue();
}
/**************************************************************************/
/**
* Compute the zeta constant needed for the distribution. Do this from scratch for a distribution with n items,
* using the zipfian constant thetaVal. Remember the value of n, so if we change the itemcount, we can recompute zeta.
*
* @param n The number of items to compute zeta over.
* @param thetaVal The zipfian constant.
*/
double zeta(long n, double thetaVal) {
countforzeta = n;
return zetastatic(n, thetaVal);
}
/**
* Compute the zeta constant needed for the distribution. Do this from scratch for a distribution with n items,
* using the zipfian constant theta. This is a static version of the function which will not remember n.
* @param n The number of items to compute zeta over.
* @param theta The zipfian constant.
*/
static double zetastatic(long n, double theta) {
return zetastatic(0, n, theta, 0);
}
/**
* Compute the zeta constant needed for the distribution. Do this incrementally for a distribution that
* has n items now but used to have st items. Use the zipfian constant thetaVal. Remember the new value of
* n so that if we change the itemcount, we'll know to recompute zeta.
*
* @param st The number of items used to compute the last initialsum
* @param n The number of items to compute zeta over.
* @param thetaVal The zipfian constant.
* @param initialsum The value of zeta we are computing incrementally from.
*/
double zeta(long st, long n, double thetaVal, double initialsum) {
countforzeta = n;
return zetastatic(st, n, thetaVal, initialsum);
}
/**
* Compute the zeta constant needed for the distribution. Do this incrementally for a distribution that
* has n items now but used to have st items. Use the zipfian constant theta. Remember the new value of
* n so that if we change the itemcount, we'll know to recompute zeta.
* @param st The number of items used to compute the last initialsum
* @param n The number of items to compute zeta over.
* @param theta The zipfian constant.
* @param initialsum The value of zeta we are computing incrementally from.
*/
static double zetastatic(long st, long n, double theta, double initialsum) {
double sum = initialsum;
for (long i = st; i < n; i++) {
sum += 1 / (Math.pow(i + 1, theta));
}
//System.out.println("countforzeta="+countforzeta);
return sum;
}
/****************************************************************************************/
/**
* Generate the next item as a long.
*
* @param itemcount The number of items in the distribution.
* @return The next item in the sequence.
*/
long nextLong(long itemcount) {<FILL_FUNCTION_BODY>}
/**
* Return the next value, skewed by the Zipfian distribution. The 0th item will be the most popular, followed by
* the 1st, followed by the 2nd, etc. (Or, if min != 0, the min-th item is the most popular, the min+1th item the
* next most popular, etc.) If you want the popular items scattered throughout the item space, use
* ScrambledZipfianGenerator instead.
*/
@Override
public Long nextValue() {
return nextLong(items);
}
public static void main(String[] args) {
new ZipfianGenerator(ScrambledZipfianGenerator.ITEM_COUNT);
}
/**
* @todo Implement ZipfianGenerator.mean()
*/
@Override
public double mean() {
throw new UnsupportedOperationException("@todo implement ZipfianGenerator.mean()");
}
} |
//from "Quickly Generating Billion-Record Synthetic Databases", Jim Gray et al, SIGMOD 1994
if (itemcount != countforzeta) {
//have to recompute zetan and eta, since they depend on itemcount
synchronized (this) {
if (itemcount > countforzeta) {
//System.err.println("WARNING: Incrementally recomputing Zipfian distribtion. (itemcount="+itemcount+"
// countforzeta="+countforzeta+")");
//we have added more items. can compute zetan incrementally, which is cheaper
zetan = zeta(countforzeta, itemcount, theta, zetan);
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
} else if ((itemcount < countforzeta) && (allowitemcountdecrease)) {
//have to start over with zetan
//note : for large itemsets, this is very slow. so don't do it!
//TODO: can also have a negative incremental computation, e.g. if you decrease the number of items,
// then just subtract the zeta sequence terms for the items that went away. This would be faster than
// recomputing from scratch when the number of items decreases
System.err.println("WARNING: Recomputing Zipfian distribtion. This is slow and should be avoided. " +
"(itemcount=" + itemcount + " countforzeta=" + countforzeta + ")");
zetan = zeta(itemcount, theta);
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
}
}
}
double u = ThreadLocalRandom.current().nextDouble();
double uz = u * zetan;
if (uz < 1.0) {
return base;
}
if (uz < 1.0 + Math.pow(0.5, theta)) {
return base + 1;
}
long ret = base + (long) ((itemcount) * Math.pow(eta * u - eta + 1, alpha));
setLastValue(ret);
return ret;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/generator/ZipfianGenerator.java | ZipfianGenerator | nextValue | class ZipfianGenerator extends NumberGenerator {
public static final double ZIPFIAN_CONSTANT = 0.99;
/**
* Number of items.
*/
private final long items;
/**
* Min item to generate.
*/
private final long base;
/**
* The zipfian constant to use.
*/
private final double zipfianconstant;
/**
* Computed parameters for generating the distribution.
*/
private double alpha, zetan, eta, theta, zeta2theta;
/**
* The number of items used to compute zetan the last time.
*/
private long countforzeta;
/**
* Flag to prevent problems. If you increase the number of items the zipfian generator is allowed to choose from,
* this code will incrementally compute a new zeta value for the larger itemcount. However, if you decrease the
* number of items, the code computes zeta from scratch; this is expensive for large itemsets.
* Usually this is not intentional; e.g. one thread thinks the number of items is 1001 and calls "nextLong()" with
* that item count; then another thread who thinks the number of items is 1000 calls nextLong() with itemcount=1000
* triggering the expensive recomputation. (It is expensive for 100 million items, not really for 1000 items.) Why
* did the second thread think there were only 1000 items? maybe it read the item count before the first thread
* incremented it. So this flag allows you to say if you really do want that recomputation. If true, then the code
* will recompute zeta if the itemcount goes down. If false, the code will assume itemcount only goes up, and never
* recompute.
*/
private boolean allowitemcountdecrease = false;
/******************************* Constructors **************************************/
/**
* Create a zipfian generator for the specified number of items.
* @param items The number of items in the distribution.
*/
public ZipfianGenerator(long items) {
this(0, items - 1);
}
/**
* Create a zipfian generator for items between min and max.
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
*/
public ZipfianGenerator(long min, long max) {
this(min, max, ZIPFIAN_CONSTANT);
}
/**
* Create a zipfian generator for the specified number of items using the specified zipfian constant.
*
* @param items The number of items in the distribution.
* @param zipfianconstant The zipfian constant to use.
*/
public ZipfianGenerator(long items, double zipfianconstant) {
this(0, items - 1, zipfianconstant);
}
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant.
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
*/
public ZipfianGenerator(long min, long max, double zipfianconstant) {
this(min, max, zipfianconstant, zetastatic(max - min + 1, zipfianconstant));
}
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant, using
* the precomputed value of zeta.
*
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
* @param zetan The precomputed zeta constant.
*/
public ZipfianGenerator(long min, long max, double zipfianconstant, double zetan) {
items = max - min + 1;
base = min;
this.zipfianconstant = zipfianconstant;
theta = this.zipfianconstant;
zeta2theta = zeta(2, theta);
alpha = 1.0 / (1.0 - theta);
this.zetan = zetan;
countforzeta = items;
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / this.zetan);
nextValue();
}
/**************************************************************************/
/**
* Compute the zeta constant needed for the distribution. Do this from scratch for a distribution with n items,
* using the zipfian constant thetaVal. Remember the value of n, so if we change the itemcount, we can recompute zeta.
*
* @param n The number of items to compute zeta over.
* @param thetaVal The zipfian constant.
*/
double zeta(long n, double thetaVal) {
countforzeta = n;
return zetastatic(n, thetaVal);
}
/**
* Compute the zeta constant needed for the distribution. Do this from scratch for a distribution with n items,
* using the zipfian constant theta. This is a static version of the function which will not remember n.
* @param n The number of items to compute zeta over.
* @param theta The zipfian constant.
*/
static double zetastatic(long n, double theta) {
return zetastatic(0, n, theta, 0);
}
/**
* Compute the zeta constant needed for the distribution. Do this incrementally for a distribution that
* has n items now but used to have st items. Use the zipfian constant thetaVal. Remember the new value of
* n so that if we change the itemcount, we'll know to recompute zeta.
*
* @param st The number of items used to compute the last initialsum
* @param n The number of items to compute zeta over.
* @param thetaVal The zipfian constant.
* @param initialsum The value of zeta we are computing incrementally from.
*/
double zeta(long st, long n, double thetaVal, double initialsum) {
countforzeta = n;
return zetastatic(st, n, thetaVal, initialsum);
}
/**
* Compute the zeta constant needed for the distribution. Do this incrementally for a distribution that
* has n items now but used to have st items. Use the zipfian constant theta. Remember the new value of
* n so that if we change the itemcount, we'll know to recompute zeta.
* @param st The number of items used to compute the last initialsum
* @param n The number of items to compute zeta over.
* @param theta The zipfian constant.
* @param initialsum The value of zeta we are computing incrementally from.
*/
static double zetastatic(long st, long n, double theta, double initialsum) {
double sum = initialsum;
for (long i = st; i < n; i++) {
sum += 1 / (Math.pow(i + 1, theta));
}
//System.out.println("countforzeta="+countforzeta);
return sum;
}
/****************************************************************************************/
/**
* Generate the next item as a long.
*
* @param itemcount The number of items in the distribution.
* @return The next item in the sequence.
*/
long nextLong(long itemcount) {
//from "Quickly Generating Billion-Record Synthetic Databases", Jim Gray et al, SIGMOD 1994
if (itemcount != countforzeta) {
//have to recompute zetan and eta, since they depend on itemcount
synchronized (this) {
if (itemcount > countforzeta) {
//System.err.println("WARNING: Incrementally recomputing Zipfian distribtion. (itemcount="+itemcount+"
// countforzeta="+countforzeta+")");
//we have added more items. can compute zetan incrementally, which is cheaper
zetan = zeta(countforzeta, itemcount, theta, zetan);
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
} else if ((itemcount < countforzeta) && (allowitemcountdecrease)) {
//have to start over with zetan
//note : for large itemsets, this is very slow. so don't do it!
//TODO: can also have a negative incremental computation, e.g. if you decrease the number of items,
// then just subtract the zeta sequence terms for the items that went away. This would be faster than
// recomputing from scratch when the number of items decreases
System.err.println("WARNING: Recomputing Zipfian distribtion. This is slow and should be avoided. " +
"(itemcount=" + itemcount + " countforzeta=" + countforzeta + ")");
zetan = zeta(itemcount, theta);
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
}
}
}
double u = ThreadLocalRandom.current().nextDouble();
double uz = u * zetan;
if (uz < 1.0) {
return base;
}
if (uz < 1.0 + Math.pow(0.5, theta)) {
return base + 1;
}
long ret = base + (long) ((itemcount) * Math.pow(eta * u - eta + 1, alpha));
setLastValue(ret);
return ret;
}
/**
* Return the next value, skewed by the Zipfian distribution. The 0th item will be the most popular, followed by
* the 1st, followed by the 2nd, etc. (Or, if min != 0, the min-th item is the most popular, the min+1th item the
* next most popular, etc.) If you want the popular items scattered throughout the item space, use
* ScrambledZipfianGenerator instead.
*/
@Override
public Long nextValue() {<FILL_FUNCTION_BODY>}
public static void main(String[] args) {
new ZipfianGenerator(ScrambledZipfianGenerator.ITEM_COUNT);
}
/**
* @todo Implement ZipfianGenerator.mean()
*/
@Override
public double mean() {
throw new UnsupportedOperationException("@todo implement ZipfianGenerator.mean()");
}
} |
return nextLong(items);
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/generator/ZipfianGenerator.java | ZipfianGenerator | main | class ZipfianGenerator extends NumberGenerator {
public static final double ZIPFIAN_CONSTANT = 0.99;
/**
* Number of items.
*/
private final long items;
/**
* Min item to generate.
*/
private final long base;
/**
* The zipfian constant to use.
*/
private final double zipfianconstant;
/**
* Computed parameters for generating the distribution.
*/
private double alpha, zetan, eta, theta, zeta2theta;
/**
* The number of items used to compute zetan the last time.
*/
private long countforzeta;
/**
* Flag to prevent problems. If you increase the number of items the zipfian generator is allowed to choose from,
* this code will incrementally compute a new zeta value for the larger itemcount. However, if you decrease the
* number of items, the code computes zeta from scratch; this is expensive for large itemsets.
* Usually this is not intentional; e.g. one thread thinks the number of items is 1001 and calls "nextLong()" with
* that item count; then another thread who thinks the number of items is 1000 calls nextLong() with itemcount=1000
* triggering the expensive recomputation. (It is expensive for 100 million items, not really for 1000 items.) Why
* did the second thread think there were only 1000 items? maybe it read the item count before the first thread
* incremented it. So this flag allows you to say if you really do want that recomputation. If true, then the code
* will recompute zeta if the itemcount goes down. If false, the code will assume itemcount only goes up, and never
* recompute.
*/
private boolean allowitemcountdecrease = false;
/******************************* Constructors **************************************/
/**
* Create a zipfian generator for the specified number of items.
* @param items The number of items in the distribution.
*/
public ZipfianGenerator(long items) {
this(0, items - 1);
}
/**
* Create a zipfian generator for items between min and max.
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
*/
public ZipfianGenerator(long min, long max) {
this(min, max, ZIPFIAN_CONSTANT);
}
/**
* Create a zipfian generator for the specified number of items using the specified zipfian constant.
*
* @param items The number of items in the distribution.
* @param zipfianconstant The zipfian constant to use.
*/
public ZipfianGenerator(long items, double zipfianconstant) {
this(0, items - 1, zipfianconstant);
}
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant.
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
*/
public ZipfianGenerator(long min, long max, double zipfianconstant) {
this(min, max, zipfianconstant, zetastatic(max - min + 1, zipfianconstant));
}
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant, using
* the precomputed value of zeta.
*
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
* @param zetan The precomputed zeta constant.
*/
public ZipfianGenerator(long min, long max, double zipfianconstant, double zetan) {
items = max - min + 1;
base = min;
this.zipfianconstant = zipfianconstant;
theta = this.zipfianconstant;
zeta2theta = zeta(2, theta);
alpha = 1.0 / (1.0 - theta);
this.zetan = zetan;
countforzeta = items;
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / this.zetan);
nextValue();
}
/**************************************************************************/
/**
* Compute the zeta constant needed for the distribution. Do this from scratch for a distribution with n items,
* using the zipfian constant thetaVal. Remember the value of n, so if we change the itemcount, we can recompute zeta.
*
* @param n The number of items to compute zeta over.
* @param thetaVal The zipfian constant.
*/
double zeta(long n, double thetaVal) {
countforzeta = n;
return zetastatic(n, thetaVal);
}
/**
* Compute the zeta constant needed for the distribution. Do this from scratch for a distribution with n items,
* using the zipfian constant theta. This is a static version of the function which will not remember n.
* @param n The number of items to compute zeta over.
* @param theta The zipfian constant.
*/
static double zetastatic(long n, double theta) {
return zetastatic(0, n, theta, 0);
}
/**
* Compute the zeta constant needed for the distribution. Do this incrementally for a distribution that
* has n items now but used to have st items. Use the zipfian constant thetaVal. Remember the new value of
* n so that if we change the itemcount, we'll know to recompute zeta.
*
* @param st The number of items used to compute the last initialsum
* @param n The number of items to compute zeta over.
* @param thetaVal The zipfian constant.
* @param initialsum The value of zeta we are computing incrementally from.
*/
double zeta(long st, long n, double thetaVal, double initialsum) {
countforzeta = n;
return zetastatic(st, n, thetaVal, initialsum);
}
/**
* Compute the zeta constant needed for the distribution. Do this incrementally for a distribution that
* has n items now but used to have st items. Use the zipfian constant theta. Remember the new value of
* n so that if we change the itemcount, we'll know to recompute zeta.
* @param st The number of items used to compute the last initialsum
* @param n The number of items to compute zeta over.
* @param theta The zipfian constant.
* @param initialsum The value of zeta we are computing incrementally from.
*/
static double zetastatic(long st, long n, double theta, double initialsum) {
double sum = initialsum;
for (long i = st; i < n; i++) {
sum += 1 / (Math.pow(i + 1, theta));
}
//System.out.println("countforzeta="+countforzeta);
return sum;
}
/****************************************************************************************/
/**
* Generate the next item as a long.
*
* @param itemcount The number of items in the distribution.
* @return The next item in the sequence.
*/
long nextLong(long itemcount) {
//from "Quickly Generating Billion-Record Synthetic Databases", Jim Gray et al, SIGMOD 1994
if (itemcount != countforzeta) {
//have to recompute zetan and eta, since they depend on itemcount
synchronized (this) {
if (itemcount > countforzeta) {
//System.err.println("WARNING: Incrementally recomputing Zipfian distribtion. (itemcount="+itemcount+"
// countforzeta="+countforzeta+")");
//we have added more items. can compute zetan incrementally, which is cheaper
zetan = zeta(countforzeta, itemcount, theta, zetan);
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
} else if ((itemcount < countforzeta) && (allowitemcountdecrease)) {
//have to start over with zetan
//note : for large itemsets, this is very slow. so don't do it!
//TODO: can also have a negative incremental computation, e.g. if you decrease the number of items,
// then just subtract the zeta sequence terms for the items that went away. This would be faster than
// recomputing from scratch when the number of items decreases
System.err.println("WARNING: Recomputing Zipfian distribtion. This is slow and should be avoided. " +
"(itemcount=" + itemcount + " countforzeta=" + countforzeta + ")");
zetan = zeta(itemcount, theta);
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
}
}
}
double u = ThreadLocalRandom.current().nextDouble();
double uz = u * zetan;
if (uz < 1.0) {
return base;
}
if (uz < 1.0 + Math.pow(0.5, theta)) {
return base + 1;
}
long ret = base + (long) ((itemcount) * Math.pow(eta * u - eta + 1, alpha));
setLastValue(ret);
return ret;
}
/**
* Return the next value, skewed by the Zipfian distribution. The 0th item will be the most popular, followed by
* the 1st, followed by the 2nd, etc. (Or, if min != 0, the min-th item is the most popular, the min+1th item the
* next most popular, etc.) If you want the popular items scattered throughout the item space, use
* ScrambledZipfianGenerator instead.
*/
@Override
public Long nextValue() {
return nextLong(items);
}
public static void main(String[] args) {<FILL_FUNCTION_BODY>}
/**
* @todo Implement ZipfianGenerator.mean()
*/
@Override
public double mean() {
throw new UnsupportedOperationException("@todo implement ZipfianGenerator.mean()");
}
} |
new ZipfianGenerator(ScrambledZipfianGenerator.ITEM_COUNT);
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/generator/ZipfianGenerator.java | ZipfianGenerator | mean | class ZipfianGenerator extends NumberGenerator {
public static final double ZIPFIAN_CONSTANT = 0.99;
/**
* Number of items.
*/
private final long items;
/**
* Min item to generate.
*/
private final long base;
/**
* The zipfian constant to use.
*/
private final double zipfianconstant;
/**
* Computed parameters for generating the distribution.
*/
private double alpha, zetan, eta, theta, zeta2theta;
/**
* The number of items used to compute zetan the last time.
*/
private long countforzeta;
/**
* Flag to prevent problems. If you increase the number of items the zipfian generator is allowed to choose from,
* this code will incrementally compute a new zeta value for the larger itemcount. However, if you decrease the
* number of items, the code computes zeta from scratch; this is expensive for large itemsets.
* Usually this is not intentional; e.g. one thread thinks the number of items is 1001 and calls "nextLong()" with
* that item count; then another thread who thinks the number of items is 1000 calls nextLong() with itemcount=1000
* triggering the expensive recomputation. (It is expensive for 100 million items, not really for 1000 items.) Why
* did the second thread think there were only 1000 items? maybe it read the item count before the first thread
* incremented it. So this flag allows you to say if you really do want that recomputation. If true, then the code
* will recompute zeta if the itemcount goes down. If false, the code will assume itemcount only goes up, and never
* recompute.
*/
private boolean allowitemcountdecrease = false;
/******************************* Constructors **************************************/
/**
* Create a zipfian generator for the specified number of items.
* @param items The number of items in the distribution.
*/
public ZipfianGenerator(long items) {
this(0, items - 1);
}
/**
* Create a zipfian generator for items between min and max.
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
*/
public ZipfianGenerator(long min, long max) {
this(min, max, ZIPFIAN_CONSTANT);
}
/**
* Create a zipfian generator for the specified number of items using the specified zipfian constant.
*
* @param items The number of items in the distribution.
* @param zipfianconstant The zipfian constant to use.
*/
public ZipfianGenerator(long items, double zipfianconstant) {
this(0, items - 1, zipfianconstant);
}
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant.
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
*/
public ZipfianGenerator(long min, long max, double zipfianconstant) {
this(min, max, zipfianconstant, zetastatic(max - min + 1, zipfianconstant));
}
/**
* Create a zipfian generator for items between min and max (inclusive) for the specified zipfian constant, using
* the precomputed value of zeta.
*
* @param min The smallest integer to generate in the sequence.
* @param max The largest integer to generate in the sequence.
* @param zipfianconstant The zipfian constant to use.
* @param zetan The precomputed zeta constant.
*/
public ZipfianGenerator(long min, long max, double zipfianconstant, double zetan) {
items = max - min + 1;
base = min;
this.zipfianconstant = zipfianconstant;
theta = this.zipfianconstant;
zeta2theta = zeta(2, theta);
alpha = 1.0 / (1.0 - theta);
this.zetan = zetan;
countforzeta = items;
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / this.zetan);
nextValue();
}
/**************************************************************************/
/**
* Compute the zeta constant needed for the distribution. Do this from scratch for a distribution with n items,
* using the zipfian constant thetaVal. Remember the value of n, so if we change the itemcount, we can recompute zeta.
*
* @param n The number of items to compute zeta over.
* @param thetaVal The zipfian constant.
*/
double zeta(long n, double thetaVal) {
countforzeta = n;
return zetastatic(n, thetaVal);
}
/**
* Compute the zeta constant needed for the distribution. Do this from scratch for a distribution with n items,
* using the zipfian constant theta. This is a static version of the function which will not remember n.
* @param n The number of items to compute zeta over.
* @param theta The zipfian constant.
*/
static double zetastatic(long n, double theta) {
return zetastatic(0, n, theta, 0);
}
/**
* Compute the zeta constant needed for the distribution. Do this incrementally for a distribution that
* has n items now but used to have st items. Use the zipfian constant thetaVal. Remember the new value of
* n so that if we change the itemcount, we'll know to recompute zeta.
*
* @param st The number of items used to compute the last initialsum
* @param n The number of items to compute zeta over.
* @param thetaVal The zipfian constant.
* @param initialsum The value of zeta we are computing incrementally from.
*/
double zeta(long st, long n, double thetaVal, double initialsum) {
countforzeta = n;
return zetastatic(st, n, thetaVal, initialsum);
}
/**
* Compute the zeta constant needed for the distribution. Do this incrementally for a distribution that
* has n items now but used to have st items. Use the zipfian constant theta. Remember the new value of
* n so that if we change the itemcount, we'll know to recompute zeta.
* @param st The number of items used to compute the last initialsum
* @param n The number of items to compute zeta over.
* @param theta The zipfian constant.
* @param initialsum The value of zeta we are computing incrementally from.
*/
static double zetastatic(long st, long n, double theta, double initialsum) {
double sum = initialsum;
for (long i = st; i < n; i++) {
sum += 1 / (Math.pow(i + 1, theta));
}
//System.out.println("countforzeta="+countforzeta);
return sum;
}
/****************************************************************************************/
/**
* Generate the next item as a long.
*
* @param itemcount The number of items in the distribution.
* @return The next item in the sequence.
*/
long nextLong(long itemcount) {
//from "Quickly Generating Billion-Record Synthetic Databases", Jim Gray et al, SIGMOD 1994
if (itemcount != countforzeta) {
//have to recompute zetan and eta, since they depend on itemcount
synchronized (this) {
if (itemcount > countforzeta) {
//System.err.println("WARNING: Incrementally recomputing Zipfian distribtion. (itemcount="+itemcount+"
// countforzeta="+countforzeta+")");
//we have added more items. can compute zetan incrementally, which is cheaper
zetan = zeta(countforzeta, itemcount, theta, zetan);
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
} else if ((itemcount < countforzeta) && (allowitemcountdecrease)) {
//have to start over with zetan
//note : for large itemsets, this is very slow. so don't do it!
//TODO: can also have a negative incremental computation, e.g. if you decrease the number of items,
// then just subtract the zeta sequence terms for the items that went away. This would be faster than
// recomputing from scratch when the number of items decreases
System.err.println("WARNING: Recomputing Zipfian distribtion. This is slow and should be avoided. " +
"(itemcount=" + itemcount + " countforzeta=" + countforzeta + ")");
zetan = zeta(itemcount, theta);
eta = (1 - Math.pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
}
}
}
double u = ThreadLocalRandom.current().nextDouble();
double uz = u * zetan;
if (uz < 1.0) {
return base;
}
if (uz < 1.0 + Math.pow(0.5, theta)) {
return base + 1;
}
long ret = base + (long) ((itemcount) * Math.pow(eta * u - eta + 1, alpha));
setLastValue(ret);
return ret;
}
/**
* Return the next value, skewed by the Zipfian distribution. The 0th item will be the most popular, followed by
* the 1st, followed by the 2nd, etc. (Or, if min != 0, the min-th item is the most popular, the min+1th item the
* next most popular, etc.) If you want the popular items scattered throughout the item space, use
* ScrambledZipfianGenerator instead.
*/
@Override
public Long nextValue() {
return nextLong(items);
}
public static void main(String[] args) {
new ZipfianGenerator(ScrambledZipfianGenerator.ITEM_COUNT);
}
/**
* @todo Implement ZipfianGenerator.mean()
*/
@Override
public double mean() {<FILL_FUNCTION_BODY>}
} |
throw new UnsupportedOperationException("@todo implement ZipfianGenerator.mean()");
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/Measurements.java | Measurements | setProperties | class Measurements {
/**
* All supported measurement types are defined in this enum.
*/
public enum MeasurementType {
HISTOGRAM,
HDRHISTOGRAM,
HDRHISTOGRAM_AND_HISTOGRAM,
HDRHISTOGRAM_AND_RAW,
TIMESERIES,
RAW
}
public static final String MEASUREMENT_TYPE_PROPERTY = "measurementtype";
private static final String MEASUREMENT_TYPE_PROPERTY_DEFAULT = "hdrhistogram";
public static final String MEASUREMENT_INTERVAL = "measurement.interval";
private static final String MEASUREMENT_INTERVAL_DEFAULT = "op";
public static final String MEASUREMENT_TRACK_JVM_PROPERTY = "measurement.trackjvm";
public static final String MEASUREMENT_TRACK_JVM_PROPERTY_DEFAULT = "false";
private static Measurements singleton = null;
private static Properties measurementproperties = null;
public static void setProperties(Properties props) {<FILL_FUNCTION_BODY>}
/**
* Return the singleton Measurements object.
*/
public static synchronized Measurements getMeasurements() {
if (singleton == null) {
singleton = new Measurements(measurementproperties);
}
return singleton;
}
private final ConcurrentHashMap<String, OneMeasurement> opToMesurementMap;
private final ConcurrentHashMap<String, OneMeasurement> opToIntendedMesurementMap;
private final MeasurementType measurementType;
private final int measurementInterval;
private final Properties props;
/**
* Create a new object with the specified properties.
*/
public Measurements(Properties props) {
opToMesurementMap = new ConcurrentHashMap<>();
opToIntendedMesurementMap = new ConcurrentHashMap<>();
this.props = props;
String mTypeString = this.props.getProperty(MEASUREMENT_TYPE_PROPERTY, MEASUREMENT_TYPE_PROPERTY_DEFAULT);
switch (mTypeString) {
case "histogram":
measurementType = MeasurementType.HISTOGRAM;
break;
case "hdrhistogram":
measurementType = MeasurementType.HDRHISTOGRAM;
break;
case "hdrhistogram+histogram":
measurementType = MeasurementType.HDRHISTOGRAM_AND_HISTOGRAM;
break;
case "hdrhistogram+raw":
measurementType = MeasurementType.HDRHISTOGRAM_AND_RAW;
break;
case "timeseries":
measurementType = MeasurementType.TIMESERIES;
break;
case "raw":
measurementType = MeasurementType.RAW;
break;
default:
throw new IllegalArgumentException("unknown " + MEASUREMENT_TYPE_PROPERTY + "=" + mTypeString);
}
String mIntervalString = this.props.getProperty(MEASUREMENT_INTERVAL, MEASUREMENT_INTERVAL_DEFAULT);
switch (mIntervalString) {
case "op":
measurementInterval = 0;
break;
case "intended":
measurementInterval = 1;
break;
case "both":
measurementInterval = 2;
break;
default:
throw new IllegalArgumentException("unknown " + MEASUREMENT_INTERVAL + "=" + mIntervalString);
}
}
private OneMeasurement constructOneMeasurement(String name) {
switch (measurementType) {
case HISTOGRAM:
return new OneMeasurementHistogram(name, props);
case HDRHISTOGRAM:
return new OneMeasurementHdrHistogram(name, props);
case HDRHISTOGRAM_AND_HISTOGRAM:
return new TwoInOneMeasurement(name,
new OneMeasurementHdrHistogram("Hdr" + name, props),
new OneMeasurementHistogram("Bucket" + name, props));
case HDRHISTOGRAM_AND_RAW:
return new TwoInOneMeasurement(name,
new OneMeasurementHdrHistogram("Hdr" + name, props),
new OneMeasurementRaw("Raw" + name, props));
case TIMESERIES:
return new OneMeasurementTimeSeries(name, props);
case RAW:
return new OneMeasurementRaw(name, props);
default:
throw new AssertionError("Impossible to be here. Dead code reached. Bugs?");
}
}
static class StartTimeHolder {
protected long time;
long startTime() {
if (time == 0) {
return System.nanoTime();
} else {
return time;
}
}
}
private final ThreadLocal<StartTimeHolder> tlIntendedStartTime = new ThreadLocal<Measurements.StartTimeHolder>() {
protected StartTimeHolder initialValue() {
return new StartTimeHolder();
}
};
public void setIntendedStartTimeNs(long time) {
if (measurementInterval == 0) {
return;
}
tlIntendedStartTime.get().time = time;
}
public long getIntendedStartTimeNs() {
if (measurementInterval == 0) {
return 0L;
}
return tlIntendedStartTime.get().startTime();
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measure(String operation, int latency) {
if (measurementInterval == 1) {
return;
}
try {
OneMeasurement m = getOpMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measureIntended(String operation, int latency) {
if (measurementInterval == 0) {
return;
}
try {
OneMeasurement m = getOpIntendedMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
}
private OneMeasurement getOpMeasurement(String operation) {
OneMeasurement m = opToMesurementMap.get(operation);
if (m == null) {
m = constructOneMeasurement(operation);
OneMeasurement oldM = opToMesurementMap.putIfAbsent(operation, m);
if (oldM != null) {
m = oldM;
}
}
return m;
}
private OneMeasurement getOpIntendedMeasurement(String operation) {
OneMeasurement m = opToIntendedMesurementMap.get(operation);
if (m == null) {
final String name = measurementInterval == 1 ? operation : "Intended-" + operation;
m = constructOneMeasurement(name);
OneMeasurement oldM = opToIntendedMesurementMap.putIfAbsent(operation, m);
if (oldM != null) {
m = oldM;
}
}
return m;
}
/**
* Report a return code for a single DB operation.
*/
public void reportStatus(final String operation, final Status status) {
OneMeasurement m = measurementInterval == 1 ?
getOpIntendedMeasurement(operation) :
getOpMeasurement(operation);
m.reportStatus(status);
}
/**
* Export the current measurements to a suitable format.
*
* @param exporter Exporter representing the type of format to write to.
* @throws IOException Thrown if the export failed.
*/
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
for (OneMeasurement measurement : opToMesurementMap.values()) {
measurement.exportMeasurements(exporter);
}
for (OneMeasurement measurement : opToIntendedMesurementMap.values()) {
measurement.exportMeasurements(exporter);
}
}
/**
* Return a one line summary of the measurements.
*/
public synchronized String getSummary() {
String ret = "";
for (OneMeasurement m : opToMesurementMap.values()) {
ret += m.getSummary() + " ";
}
for (OneMeasurement m : opToIntendedMesurementMap.values()) {
ret += m.getSummary() + " ";
}
return ret;
}
} |
measurementproperties = props;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/Measurements.java | Measurements | getMeasurements | class Measurements {
/**
* All supported measurement types are defined in this enum.
*/
public enum MeasurementType {
HISTOGRAM,
HDRHISTOGRAM,
HDRHISTOGRAM_AND_HISTOGRAM,
HDRHISTOGRAM_AND_RAW,
TIMESERIES,
RAW
}
public static final String MEASUREMENT_TYPE_PROPERTY = "measurementtype";
private static final String MEASUREMENT_TYPE_PROPERTY_DEFAULT = "hdrhistogram";
public static final String MEASUREMENT_INTERVAL = "measurement.interval";
private static final String MEASUREMENT_INTERVAL_DEFAULT = "op";
public static final String MEASUREMENT_TRACK_JVM_PROPERTY = "measurement.trackjvm";
public static final String MEASUREMENT_TRACK_JVM_PROPERTY_DEFAULT = "false";
private static Measurements singleton = null;
private static Properties measurementproperties = null;
public static void setProperties(Properties props) {
measurementproperties = props;
}
/**
* Return the singleton Measurements object.
*/
public static synchronized Measurements getMeasurements() {<FILL_FUNCTION_BODY>}
private final ConcurrentHashMap<String, OneMeasurement> opToMesurementMap;
private final ConcurrentHashMap<String, OneMeasurement> opToIntendedMesurementMap;
private final MeasurementType measurementType;
private final int measurementInterval;
private final Properties props;
/**
* Create a new object with the specified properties.
*/
public Measurements(Properties props) {
opToMesurementMap = new ConcurrentHashMap<>();
opToIntendedMesurementMap = new ConcurrentHashMap<>();
this.props = props;
String mTypeString = this.props.getProperty(MEASUREMENT_TYPE_PROPERTY, MEASUREMENT_TYPE_PROPERTY_DEFAULT);
switch (mTypeString) {
case "histogram":
measurementType = MeasurementType.HISTOGRAM;
break;
case "hdrhistogram":
measurementType = MeasurementType.HDRHISTOGRAM;
break;
case "hdrhistogram+histogram":
measurementType = MeasurementType.HDRHISTOGRAM_AND_HISTOGRAM;
break;
case "hdrhistogram+raw":
measurementType = MeasurementType.HDRHISTOGRAM_AND_RAW;
break;
case "timeseries":
measurementType = MeasurementType.TIMESERIES;
break;
case "raw":
measurementType = MeasurementType.RAW;
break;
default:
throw new IllegalArgumentException("unknown " + MEASUREMENT_TYPE_PROPERTY + "=" + mTypeString);
}
String mIntervalString = this.props.getProperty(MEASUREMENT_INTERVAL, MEASUREMENT_INTERVAL_DEFAULT);
switch (mIntervalString) {
case "op":
measurementInterval = 0;
break;
case "intended":
measurementInterval = 1;
break;
case "both":
measurementInterval = 2;
break;
default:
throw new IllegalArgumentException("unknown " + MEASUREMENT_INTERVAL + "=" + mIntervalString);
}
}
private OneMeasurement constructOneMeasurement(String name) {
switch (measurementType) {
case HISTOGRAM:
return new OneMeasurementHistogram(name, props);
case HDRHISTOGRAM:
return new OneMeasurementHdrHistogram(name, props);
case HDRHISTOGRAM_AND_HISTOGRAM:
return new TwoInOneMeasurement(name,
new OneMeasurementHdrHistogram("Hdr" + name, props),
new OneMeasurementHistogram("Bucket" + name, props));
case HDRHISTOGRAM_AND_RAW:
return new TwoInOneMeasurement(name,
new OneMeasurementHdrHistogram("Hdr" + name, props),
new OneMeasurementRaw("Raw" + name, props));
case TIMESERIES:
return new OneMeasurementTimeSeries(name, props);
case RAW:
return new OneMeasurementRaw(name, props);
default:
throw new AssertionError("Impossible to be here. Dead code reached. Bugs?");
}
}
static class StartTimeHolder {
protected long time;
long startTime() {
if (time == 0) {
return System.nanoTime();
} else {
return time;
}
}
}
private final ThreadLocal<StartTimeHolder> tlIntendedStartTime = new ThreadLocal<Measurements.StartTimeHolder>() {
protected StartTimeHolder initialValue() {
return new StartTimeHolder();
}
};
public void setIntendedStartTimeNs(long time) {
if (measurementInterval == 0) {
return;
}
tlIntendedStartTime.get().time = time;
}
public long getIntendedStartTimeNs() {
if (measurementInterval == 0) {
return 0L;
}
return tlIntendedStartTime.get().startTime();
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measure(String operation, int latency) {
if (measurementInterval == 1) {
return;
}
try {
OneMeasurement m = getOpMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measureIntended(String operation, int latency) {
if (measurementInterval == 0) {
return;
}
try {
OneMeasurement m = getOpIntendedMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
}
private OneMeasurement getOpMeasurement(String operation) {
OneMeasurement m = opToMesurementMap.get(operation);
if (m == null) {
m = constructOneMeasurement(operation);
OneMeasurement oldM = opToMesurementMap.putIfAbsent(operation, m);
if (oldM != null) {
m = oldM;
}
}
return m;
}
private OneMeasurement getOpIntendedMeasurement(String operation) {
OneMeasurement m = opToIntendedMesurementMap.get(operation);
if (m == null) {
final String name = measurementInterval == 1 ? operation : "Intended-" + operation;
m = constructOneMeasurement(name);
OneMeasurement oldM = opToIntendedMesurementMap.putIfAbsent(operation, m);
if (oldM != null) {
m = oldM;
}
}
return m;
}
/**
* Report a return code for a single DB operation.
*/
public void reportStatus(final String operation, final Status status) {
OneMeasurement m = measurementInterval == 1 ?
getOpIntendedMeasurement(operation) :
getOpMeasurement(operation);
m.reportStatus(status);
}
/**
* Export the current measurements to a suitable format.
*
* @param exporter Exporter representing the type of format to write to.
* @throws IOException Thrown if the export failed.
*/
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
for (OneMeasurement measurement : opToMesurementMap.values()) {
measurement.exportMeasurements(exporter);
}
for (OneMeasurement measurement : opToIntendedMesurementMap.values()) {
measurement.exportMeasurements(exporter);
}
}
/**
* Return a one line summary of the measurements.
*/
public synchronized String getSummary() {
String ret = "";
for (OneMeasurement m : opToMesurementMap.values()) {
ret += m.getSummary() + " ";
}
for (OneMeasurement m : opToIntendedMesurementMap.values()) {
ret += m.getSummary() + " ";
}
return ret;
}
} |
if (singleton == null) {
singleton = new Measurements(measurementproperties);
}
return singleton;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/Measurements.java | Measurements | constructOneMeasurement | class Measurements {
/**
* All supported measurement types are defined in this enum.
*/
public enum MeasurementType {
HISTOGRAM,
HDRHISTOGRAM,
HDRHISTOGRAM_AND_HISTOGRAM,
HDRHISTOGRAM_AND_RAW,
TIMESERIES,
RAW
}
public static final String MEASUREMENT_TYPE_PROPERTY = "measurementtype";
private static final String MEASUREMENT_TYPE_PROPERTY_DEFAULT = "hdrhistogram";
public static final String MEASUREMENT_INTERVAL = "measurement.interval";
private static final String MEASUREMENT_INTERVAL_DEFAULT = "op";
public static final String MEASUREMENT_TRACK_JVM_PROPERTY = "measurement.trackjvm";
public static final String MEASUREMENT_TRACK_JVM_PROPERTY_DEFAULT = "false";
private static Measurements singleton = null;
private static Properties measurementproperties = null;
public static void setProperties(Properties props) {
measurementproperties = props;
}
/**
* Return the singleton Measurements object.
*/
public static synchronized Measurements getMeasurements() {
if (singleton == null) {
singleton = new Measurements(measurementproperties);
}
return singleton;
}
private final ConcurrentHashMap<String, OneMeasurement> opToMesurementMap;
private final ConcurrentHashMap<String, OneMeasurement> opToIntendedMesurementMap;
private final MeasurementType measurementType;
private final int measurementInterval;
private final Properties props;
/**
* Create a new object with the specified properties.
*/
public Measurements(Properties props) {
opToMesurementMap = new ConcurrentHashMap<>();
opToIntendedMesurementMap = new ConcurrentHashMap<>();
this.props = props;
String mTypeString = this.props.getProperty(MEASUREMENT_TYPE_PROPERTY, MEASUREMENT_TYPE_PROPERTY_DEFAULT);
switch (mTypeString) {
case "histogram":
measurementType = MeasurementType.HISTOGRAM;
break;
case "hdrhistogram":
measurementType = MeasurementType.HDRHISTOGRAM;
break;
case "hdrhistogram+histogram":
measurementType = MeasurementType.HDRHISTOGRAM_AND_HISTOGRAM;
break;
case "hdrhistogram+raw":
measurementType = MeasurementType.HDRHISTOGRAM_AND_RAW;
break;
case "timeseries":
measurementType = MeasurementType.TIMESERIES;
break;
case "raw":
measurementType = MeasurementType.RAW;
break;
default:
throw new IllegalArgumentException("unknown " + MEASUREMENT_TYPE_PROPERTY + "=" + mTypeString);
}
String mIntervalString = this.props.getProperty(MEASUREMENT_INTERVAL, MEASUREMENT_INTERVAL_DEFAULT);
switch (mIntervalString) {
case "op":
measurementInterval = 0;
break;
case "intended":
measurementInterval = 1;
break;
case "both":
measurementInterval = 2;
break;
default:
throw new IllegalArgumentException("unknown " + MEASUREMENT_INTERVAL + "=" + mIntervalString);
}
}
private OneMeasurement constructOneMeasurement(String name) {<FILL_FUNCTION_BODY>}
static class StartTimeHolder {
protected long time;
long startTime() {
if (time == 0) {
return System.nanoTime();
} else {
return time;
}
}
}
private final ThreadLocal<StartTimeHolder> tlIntendedStartTime = new ThreadLocal<Measurements.StartTimeHolder>() {
protected StartTimeHolder initialValue() {
return new StartTimeHolder();
}
};
public void setIntendedStartTimeNs(long time) {
if (measurementInterval == 0) {
return;
}
tlIntendedStartTime.get().time = time;
}
public long getIntendedStartTimeNs() {
if (measurementInterval == 0) {
return 0L;
}
return tlIntendedStartTime.get().startTime();
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measure(String operation, int latency) {
if (measurementInterval == 1) {
return;
}
try {
OneMeasurement m = getOpMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measureIntended(String operation, int latency) {
if (measurementInterval == 0) {
return;
}
try {
OneMeasurement m = getOpIntendedMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
}
private OneMeasurement getOpMeasurement(String operation) {
OneMeasurement m = opToMesurementMap.get(operation);
if (m == null) {
m = constructOneMeasurement(operation);
OneMeasurement oldM = opToMesurementMap.putIfAbsent(operation, m);
if (oldM != null) {
m = oldM;
}
}
return m;
}
private OneMeasurement getOpIntendedMeasurement(String operation) {
OneMeasurement m = opToIntendedMesurementMap.get(operation);
if (m == null) {
final String name = measurementInterval == 1 ? operation : "Intended-" + operation;
m = constructOneMeasurement(name);
OneMeasurement oldM = opToIntendedMesurementMap.putIfAbsent(operation, m);
if (oldM != null) {
m = oldM;
}
}
return m;
}
/**
* Report a return code for a single DB operation.
*/
public void reportStatus(final String operation, final Status status) {
OneMeasurement m = measurementInterval == 1 ?
getOpIntendedMeasurement(operation) :
getOpMeasurement(operation);
m.reportStatus(status);
}
/**
* Export the current measurements to a suitable format.
*
* @param exporter Exporter representing the type of format to write to.
* @throws IOException Thrown if the export failed.
*/
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
for (OneMeasurement measurement : opToMesurementMap.values()) {
measurement.exportMeasurements(exporter);
}
for (OneMeasurement measurement : opToIntendedMesurementMap.values()) {
measurement.exportMeasurements(exporter);
}
}
/**
* Return a one line summary of the measurements.
*/
public synchronized String getSummary() {
String ret = "";
for (OneMeasurement m : opToMesurementMap.values()) {
ret += m.getSummary() + " ";
}
for (OneMeasurement m : opToIntendedMesurementMap.values()) {
ret += m.getSummary() + " ";
}
return ret;
}
} |
switch (measurementType) {
case HISTOGRAM:
return new OneMeasurementHistogram(name, props);
case HDRHISTOGRAM:
return new OneMeasurementHdrHistogram(name, props);
case HDRHISTOGRAM_AND_HISTOGRAM:
return new TwoInOneMeasurement(name,
new OneMeasurementHdrHistogram("Hdr" + name, props),
new OneMeasurementHistogram("Bucket" + name, props));
case HDRHISTOGRAM_AND_RAW:
return new TwoInOneMeasurement(name,
new OneMeasurementHdrHistogram("Hdr" + name, props),
new OneMeasurementRaw("Raw" + name, props));
case TIMESERIES:
return new OneMeasurementTimeSeries(name, props);
case RAW:
return new OneMeasurementRaw(name, props);
default:
throw new AssertionError("Impossible to be here. Dead code reached. Bugs?");
}
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/Measurements.java | StartTimeHolder | startTime | class StartTimeHolder {
protected long time;
long startTime() {<FILL_FUNCTION_BODY>}
} |
if (time == 0) {
return System.nanoTime();
} else {
return time;
}
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/Measurements.java | StartTimeHolder | initialValue | class StartTimeHolder {
protected long time;
long startTime() {
if (time == 0) {
return System.nanoTime();
} else {
return time;
}
}
}
private final ThreadLocal<StartTimeHolder> tlIntendedStartTime = new ThreadLocal<Measurements.StartTimeHolder>() {
protected StartTimeHolder initialValue() {<FILL_FUNCTION_BODY> |
return new StartTimeHolder();
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/Measurements.java | StartTimeHolder | setIntendedStartTimeNs | class StartTimeHolder {
protected long time;
long startTime() {
if (time == 0) {
return System.nanoTime();
} else {
return time;
}
}
}
private final ThreadLocal<StartTimeHolder> tlIntendedStartTime = new ThreadLocal<Measurements.StartTimeHolder>() {
protected StartTimeHolder initialValue() {
return new StartTimeHolder();
}
};
public void setIntendedStartTimeNs(long time) {<FILL_FUNCTION_BODY> |
if (measurementInterval == 0) {
return;
}
tlIntendedStartTime.get().time = time;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/Measurements.java | StartTimeHolder | getIntendedStartTimeNs | class StartTimeHolder {
protected long time;
long startTime() {
if (time == 0) {
return System.nanoTime();
} else {
return time;
}
}
}
private final ThreadLocal<StartTimeHolder> tlIntendedStartTime = new ThreadLocal<Measurements.StartTimeHolder>() {
protected StartTimeHolder initialValue() {
return new StartTimeHolder();
}
};
public void setIntendedStartTimeNs(long time) {
if (measurementInterval == 0) {
return;
}
tlIntendedStartTime.get().time = time;
}
public long getIntendedStartTimeNs() {<FILL_FUNCTION_BODY> |
if (measurementInterval == 0) {
return 0L;
}
return tlIntendedStartTime.get().startTime();
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/Measurements.java | StartTimeHolder | measure | class StartTimeHolder {
protected long time;
long startTime() {
if (time == 0) {
return System.nanoTime();
} else {
return time;
}
}
}
private final ThreadLocal<StartTimeHolder> tlIntendedStartTime = new ThreadLocal<Measurements.StartTimeHolder>() {
protected StartTimeHolder initialValue() {
return new StartTimeHolder();
}
};
public void setIntendedStartTimeNs(long time) {
if (measurementInterval == 0) {
return;
}
tlIntendedStartTime.get().time = time;
}
public long getIntendedStartTimeNs() {
if (measurementInterval == 0) {
return 0L;
}
return tlIntendedStartTime.get().startTime();
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measure(String operation, int latency) {<FILL_FUNCTION_BODY> |
if (measurementInterval == 1) {
return;
}
try {
OneMeasurement m = getOpMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/Measurements.java | StartTimeHolder | measureIntended | class StartTimeHolder {
protected long time;
long startTime() {
if (time == 0) {
return System.nanoTime();
} else {
return time;
}
}
}
private final ThreadLocal<StartTimeHolder> tlIntendedStartTime = new ThreadLocal<Measurements.StartTimeHolder>() {
protected StartTimeHolder initialValue() {
return new StartTimeHolder();
}
};
public void setIntendedStartTimeNs(long time) {
if (measurementInterval == 0) {
return;
}
tlIntendedStartTime.get().time = time;
}
public long getIntendedStartTimeNs() {
if (measurementInterval == 0) {
return 0L;
}
return tlIntendedStartTime.get().startTime();
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measure(String operation, int latency) {
if (measurementInterval == 1) {
return;
}
try {
OneMeasurement m = getOpMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measureIntended(String operation, int latency) {<FILL_FUNCTION_BODY> |
if (measurementInterval == 0) {
return;
}
try {
OneMeasurement m = getOpIntendedMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/Measurements.java | StartTimeHolder | getOpMeasurement | class StartTimeHolder {
protected long time;
long startTime() {
if (time == 0) {
return System.nanoTime();
} else {
return time;
}
}
}
private final ThreadLocal<StartTimeHolder> tlIntendedStartTime = new ThreadLocal<Measurements.StartTimeHolder>() {
protected StartTimeHolder initialValue() {
return new StartTimeHolder();
}
};
public void setIntendedStartTimeNs(long time) {
if (measurementInterval == 0) {
return;
}
tlIntendedStartTime.get().time = time;
}
public long getIntendedStartTimeNs() {
if (measurementInterval == 0) {
return 0L;
}
return tlIntendedStartTime.get().startTime();
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measure(String operation, int latency) {
if (measurementInterval == 1) {
return;
}
try {
OneMeasurement m = getOpMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measureIntended(String operation, int latency) {
if (measurementInterval == 0) {
return;
}
try {
OneMeasurement m = getOpIntendedMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
}
private OneMeasurement getOpMeasurement(String operation) {<FILL_FUNCTION_BODY> |
OneMeasurement m = opToMesurementMap.get(operation);
if (m == null) {
m = constructOneMeasurement(operation);
OneMeasurement oldM = opToMesurementMap.putIfAbsent(operation, m);
if (oldM != null) {
m = oldM;
}
}
return m;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/Measurements.java | StartTimeHolder | getOpIntendedMeasurement | class StartTimeHolder {
protected long time;
long startTime() {
if (time == 0) {
return System.nanoTime();
} else {
return time;
}
}
}
private final ThreadLocal<StartTimeHolder> tlIntendedStartTime = new ThreadLocal<Measurements.StartTimeHolder>() {
protected StartTimeHolder initialValue() {
return new StartTimeHolder();
}
};
public void setIntendedStartTimeNs(long time) {
if (measurementInterval == 0) {
return;
}
tlIntendedStartTime.get().time = time;
}
public long getIntendedStartTimeNs() {
if (measurementInterval == 0) {
return 0L;
}
return tlIntendedStartTime.get().startTime();
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measure(String operation, int latency) {
if (measurementInterval == 1) {
return;
}
try {
OneMeasurement m = getOpMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measureIntended(String operation, int latency) {
if (measurementInterval == 0) {
return;
}
try {
OneMeasurement m = getOpIntendedMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
}
private OneMeasurement getOpMeasurement(String operation) {
OneMeasurement m = opToMesurementMap.get(operation);
if (m == null) {
m = constructOneMeasurement(operation);
OneMeasurement oldM = opToMesurementMap.putIfAbsent(operation, m);
if (oldM != null) {
m = oldM;
}
}
return m;
}
private OneMeasurement getOpIntendedMeasurement(String operation) {<FILL_FUNCTION_BODY> |
OneMeasurement m = opToIntendedMesurementMap.get(operation);
if (m == null) {
final String name = measurementInterval == 1 ? operation : "Intended-" + operation;
m = constructOneMeasurement(name);
OneMeasurement oldM = opToIntendedMesurementMap.putIfAbsent(operation, m);
if (oldM != null) {
m = oldM;
}
}
return m;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/Measurements.java | StartTimeHolder | reportStatus | class StartTimeHolder {
protected long time;
long startTime() {
if (time == 0) {
return System.nanoTime();
} else {
return time;
}
}
}
private final ThreadLocal<StartTimeHolder> tlIntendedStartTime = new ThreadLocal<Measurements.StartTimeHolder>() {
protected StartTimeHolder initialValue() {
return new StartTimeHolder();
}
};
public void setIntendedStartTimeNs(long time) {
if (measurementInterval == 0) {
return;
}
tlIntendedStartTime.get().time = time;
}
public long getIntendedStartTimeNs() {
if (measurementInterval == 0) {
return 0L;
}
return tlIntendedStartTime.get().startTime();
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measure(String operation, int latency) {
if (measurementInterval == 1) {
return;
}
try {
OneMeasurement m = getOpMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measureIntended(String operation, int latency) {
if (measurementInterval == 0) {
return;
}
try {
OneMeasurement m = getOpIntendedMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
}
private OneMeasurement getOpMeasurement(String operation) {
OneMeasurement m = opToMesurementMap.get(operation);
if (m == null) {
m = constructOneMeasurement(operation);
OneMeasurement oldM = opToMesurementMap.putIfAbsent(operation, m);
if (oldM != null) {
m = oldM;
}
}
return m;
}
private OneMeasurement getOpIntendedMeasurement(String operation) {
OneMeasurement m = opToIntendedMesurementMap.get(operation);
if (m == null) {
final String name = measurementInterval == 1 ? operation : "Intended-" + operation;
m = constructOneMeasurement(name);
OneMeasurement oldM = opToIntendedMesurementMap.putIfAbsent(operation, m);
if (oldM != null) {
m = oldM;
}
}
return m;
}
/**
* Report a return code for a single DB operation.
*/
public void reportStatus(final String operation, final Status status) {<FILL_FUNCTION_BODY> |
OneMeasurement m = measurementInterval == 1 ?
getOpIntendedMeasurement(operation) :
getOpMeasurement(operation);
m.reportStatus(status);
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/Measurements.java | StartTimeHolder | exportMeasurements | class StartTimeHolder {
protected long time;
long startTime() {
if (time == 0) {
return System.nanoTime();
} else {
return time;
}
}
}
private final ThreadLocal<StartTimeHolder> tlIntendedStartTime = new ThreadLocal<Measurements.StartTimeHolder>() {
protected StartTimeHolder initialValue() {
return new StartTimeHolder();
}
};
public void setIntendedStartTimeNs(long time) {
if (measurementInterval == 0) {
return;
}
tlIntendedStartTime.get().time = time;
}
public long getIntendedStartTimeNs() {
if (measurementInterval == 0) {
return 0L;
}
return tlIntendedStartTime.get().startTime();
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measure(String operation, int latency) {
if (measurementInterval == 1) {
return;
}
try {
OneMeasurement m = getOpMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measureIntended(String operation, int latency) {
if (measurementInterval == 0) {
return;
}
try {
OneMeasurement m = getOpIntendedMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
}
private OneMeasurement getOpMeasurement(String operation) {
OneMeasurement m = opToMesurementMap.get(operation);
if (m == null) {
m = constructOneMeasurement(operation);
OneMeasurement oldM = opToMesurementMap.putIfAbsent(operation, m);
if (oldM != null) {
m = oldM;
}
}
return m;
}
private OneMeasurement getOpIntendedMeasurement(String operation) {
OneMeasurement m = opToIntendedMesurementMap.get(operation);
if (m == null) {
final String name = measurementInterval == 1 ? operation : "Intended-" + operation;
m = constructOneMeasurement(name);
OneMeasurement oldM = opToIntendedMesurementMap.putIfAbsent(operation, m);
if (oldM != null) {
m = oldM;
}
}
return m;
}
/**
* Report a return code for a single DB operation.
*/
public void reportStatus(final String operation, final Status status) {
OneMeasurement m = measurementInterval == 1 ?
getOpIntendedMeasurement(operation) :
getOpMeasurement(operation);
m.reportStatus(status);
}
/**
* Export the current measurements to a suitable format.
*
* @param exporter Exporter representing the type of format to write to.
* @throws IOException Thrown if the export failed.
*/
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {<FILL_FUNCTION_BODY> |
for (OneMeasurement measurement : opToMesurementMap.values()) {
measurement.exportMeasurements(exporter);
}
for (OneMeasurement measurement : opToIntendedMesurementMap.values()) {
measurement.exportMeasurements(exporter);
}
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/Measurements.java | StartTimeHolder | getSummary | class StartTimeHolder {
protected long time;
long startTime() {
if (time == 0) {
return System.nanoTime();
} else {
return time;
}
}
}
private final ThreadLocal<StartTimeHolder> tlIntendedStartTime = new ThreadLocal<Measurements.StartTimeHolder>() {
protected StartTimeHolder initialValue() {
return new StartTimeHolder();
}
};
public void setIntendedStartTimeNs(long time) {
if (measurementInterval == 0) {
return;
}
tlIntendedStartTime.get().time = time;
}
public long getIntendedStartTimeNs() {
if (measurementInterval == 0) {
return 0L;
}
return tlIntendedStartTime.get().startTime();
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measure(String operation, int latency) {
if (measurementInterval == 1) {
return;
}
try {
OneMeasurement m = getOpMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
}
/**
* Report a single value of a single metric. E.g. for read latency, operation="READ" and latency is the measured
* value.
*/
public void measureIntended(String operation, int latency) {
if (measurementInterval == 0) {
return;
}
try {
OneMeasurement m = getOpIntendedMeasurement(operation);
m.measure(latency);
} catch (java.lang.ArrayIndexOutOfBoundsException e) {
// This seems like a terribly hacky way to cover up for a bug in the measurement code
System.out.println("ERROR: java.lang.ArrayIndexOutOfBoundsException - ignoring and continuing");
e.printStackTrace();
e.printStackTrace(System.out);
}
}
private OneMeasurement getOpMeasurement(String operation) {
OneMeasurement m = opToMesurementMap.get(operation);
if (m == null) {
m = constructOneMeasurement(operation);
OneMeasurement oldM = opToMesurementMap.putIfAbsent(operation, m);
if (oldM != null) {
m = oldM;
}
}
return m;
}
private OneMeasurement getOpIntendedMeasurement(String operation) {
OneMeasurement m = opToIntendedMesurementMap.get(operation);
if (m == null) {
final String name = measurementInterval == 1 ? operation : "Intended-" + operation;
m = constructOneMeasurement(name);
OneMeasurement oldM = opToIntendedMesurementMap.putIfAbsent(operation, m);
if (oldM != null) {
m = oldM;
}
}
return m;
}
/**
* Report a return code for a single DB operation.
*/
public void reportStatus(final String operation, final Status status) {
OneMeasurement m = measurementInterval == 1 ?
getOpIntendedMeasurement(operation) :
getOpMeasurement(operation);
m.reportStatus(status);
}
/**
* Export the current measurements to a suitable format.
*
* @param exporter Exporter representing the type of format to write to.
* @throws IOException Thrown if the export failed.
*/
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
for (OneMeasurement measurement : opToMesurementMap.values()) {
measurement.exportMeasurements(exporter);
}
for (OneMeasurement measurement : opToIntendedMesurementMap.values()) {
measurement.exportMeasurements(exporter);
}
}
/**
* Return a one line summary of the measurements.
*/
public synchronized String getSummary() {<FILL_FUNCTION_BODY> |
String ret = "";
for (OneMeasurement m : opToMesurementMap.values()) {
ret += m.getSummary() + " ";
}
for (OneMeasurement m : opToIntendedMesurementMap.values()) {
ret += m.getSummary() + " ";
}
return ret;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurement.java | OneMeasurement | getName | class OneMeasurement {
private final String name;
private final ConcurrentHashMap<Status, AtomicInteger> returncodes;
public String getName() {<FILL_FUNCTION_BODY>}
/**
* @param name measurement name
*/
public OneMeasurement(String name) {
this.name = name;
this.returncodes = new ConcurrentHashMap<>();
}
public abstract void measure(int latency);
public abstract String getSummary();
/**
* No need for synchronization, using CHM to deal with that.
*/
public void reportStatus(Status status) {
AtomicInteger counter = returncodes.get(status);
if (counter == null) {
counter = new AtomicInteger();
AtomicInteger other = returncodes.putIfAbsent(status, counter);
if (other != null) {
counter = other;
}
}
counter.incrementAndGet();
}
/**
* Export the current measurements to a suitable format.
*
* @param exporter Exporter representing the type of format to write to.
* @throws IOException Thrown if the export failed.
*/
public abstract void exportMeasurements(MeasurementsExporter exporter) throws IOException;
protected final void exportStatusCounts(MeasurementsExporter exporter) throws IOException {
for (Map.Entry<Status, AtomicInteger> entry : returncodes.entrySet()) {
exporter.write(getName(), "Return=" + entry.getKey().getName(), entry.getValue().get());
}
}
} |
return name;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurement.java | OneMeasurement | reportStatus | class OneMeasurement {
private final String name;
private final ConcurrentHashMap<Status, AtomicInteger> returncodes;
public String getName() {
return name;
}
/**
* @param name measurement name
*/
public OneMeasurement(String name) {
this.name = name;
this.returncodes = new ConcurrentHashMap<>();
}
public abstract void measure(int latency);
public abstract String getSummary();
/**
* No need for synchronization, using CHM to deal with that.
*/
public void reportStatus(Status status) {<FILL_FUNCTION_BODY>}
/**
* Export the current measurements to a suitable format.
*
* @param exporter Exporter representing the type of format to write to.
* @throws IOException Thrown if the export failed.
*/
public abstract void exportMeasurements(MeasurementsExporter exporter) throws IOException;
protected final void exportStatusCounts(MeasurementsExporter exporter) throws IOException {
for (Map.Entry<Status, AtomicInteger> entry : returncodes.entrySet()) {
exporter.write(getName(), "Return=" + entry.getKey().getName(), entry.getValue().get());
}
}
} |
AtomicInteger counter = returncodes.get(status);
if (counter == null) {
counter = new AtomicInteger();
AtomicInteger other = returncodes.putIfAbsent(status, counter);
if (other != null) {
counter = other;
}
}
counter.incrementAndGet();
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurement.java | OneMeasurement | exportStatusCounts | class OneMeasurement {
private final String name;
private final ConcurrentHashMap<Status, AtomicInteger> returncodes;
public String getName() {
return name;
}
/**
* @param name measurement name
*/
public OneMeasurement(String name) {
this.name = name;
this.returncodes = new ConcurrentHashMap<>();
}
public abstract void measure(int latency);
public abstract String getSummary();
/**
* No need for synchronization, using CHM to deal with that.
*/
public void reportStatus(Status status) {
AtomicInteger counter = returncodes.get(status);
if (counter == null) {
counter = new AtomicInteger();
AtomicInteger other = returncodes.putIfAbsent(status, counter);
if (other != null) {
counter = other;
}
}
counter.incrementAndGet();
}
/**
* Export the current measurements to a suitable format.
*
* @param exporter Exporter representing the type of format to write to.
* @throws IOException Thrown if the export failed.
*/
public abstract void exportMeasurements(MeasurementsExporter exporter) throws IOException;
protected final void exportStatusCounts(MeasurementsExporter exporter) throws IOException {<FILL_FUNCTION_BODY>}
} |
for (Map.Entry<Status, AtomicInteger> entry : returncodes.entrySet()) {
exporter.write(getName(), "Return=" + entry.getKey().getName(), entry.getValue().get());
}
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurementHdrHistogram.java | OneMeasurementHdrHistogram | measure | class OneMeasurementHdrHistogram extends OneMeasurement {
// we need one log per measurement histogram
private final PrintStream log;
private final HistogramLogWriter histogramLogWriter;
private final Recorder histogram;
private Histogram totalHistogram;
/**
* The name of the property for deciding what percentile values to output.
*/
public static final String PERCENTILES_PROPERTY = "hdrhistogram.percentiles";
/**
* The default value for the hdrhistogram.percentiles property.
*/
public static final String PERCENTILES_PROPERTY_DEFAULT = "95,99";
/**
* The name of the property for determining if we should print out the buckets.
*/
public static final String VERBOSE_PROPERTY = "measurement.histogram.verbose";
/**
* Whether or not to emit the histogram buckets.
*/
private final boolean verbose;
private final List<Double> percentiles;
public OneMeasurementHdrHistogram(String name, Properties props) {
super(name);
percentiles = getPercentileValues(props.getProperty(PERCENTILES_PROPERTY, PERCENTILES_PROPERTY_DEFAULT));
verbose = Boolean.valueOf(props.getProperty(VERBOSE_PROPERTY, String.valueOf(false)));
boolean shouldLog = Boolean.parseBoolean(props.getProperty("hdrhistogram.fileoutput", "false"));
if (!shouldLog) {
log = null;
histogramLogWriter = null;
} else {
try {
final String hdrOutputFilename = props.getProperty("hdrhistogram.output.path", "") + name + ".hdr";
log = new PrintStream(new FileOutputStream(hdrOutputFilename), false);
} catch (FileNotFoundException e) {
throw new RuntimeException("Failed to open hdr histogram output file", e);
}
histogramLogWriter = new HistogramLogWriter(log);
histogramLogWriter.outputComment("[Logging for: " + name + "]");
histogramLogWriter.outputLogFormatVersion();
long now = System.currentTimeMillis();
histogramLogWriter.outputStartTime(now);
histogramLogWriter.setBaseTime(now);
histogramLogWriter.outputLegend();
}
histogram = new Recorder(3);
}
/**
* It appears latency is reported in micros.
* Using {@link Recorder} to support concurrent updates to histogram.
*/
public void measure(int latencyInMicros) {<FILL_FUNCTION_BODY>}
/**
* This is called from a main thread, on orderly termination.
*/
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
// accumulate the last interval which was not caught by status thread
Histogram intervalHistogram = getIntervalHistogramAndAccumulate();
if (histogramLogWriter != null) {
histogramLogWriter.outputIntervalHistogram(intervalHistogram);
// we can close now
log.close();
}
exporter.write(getName(), "Operations", totalHistogram.getTotalCount());
exporter.write(getName(), "AverageLatency(us)", totalHistogram.getMean());
exporter.write(getName(), "MinLatency(us)", totalHistogram.getMinValue());
exporter.write(getName(), "MaxLatency(us)", totalHistogram.getMaxValue());
for (Double percentile : percentiles) {
exporter.write(getName(), ordinal(percentile) + "PercentileLatency(us)",
totalHistogram.getValueAtPercentile(percentile));
}
exportStatusCounts(exporter);
// also export totalHistogram
if (verbose) {
for (HistogramIterationValue v : totalHistogram.recordedValues()) {
int value;
if (v.getValueIteratedTo() > (long)Integer.MAX_VALUE) {
value = Integer.MAX_VALUE;
} else {
value = (int)v.getValueIteratedTo();
}
exporter.write(getName(), Integer.toString(value), (double)v.getCountAtValueIteratedTo());
}
}
}
/**
* This is called periodically from the StatusThread. There's a single
* StatusThread per Client process. We optionally serialize the interval to
* log on this opportunity.
*
* @see site.ycsb.measurements.OneMeasurement#getSummary()
*/
@Override
public String getSummary() {
Histogram intervalHistogram = getIntervalHistogramAndAccumulate();
// we use the summary interval as the histogram file interval.
if (histogramLogWriter != null) {
histogramLogWriter.outputIntervalHistogram(intervalHistogram);
}
DecimalFormat d = new DecimalFormat("#.##");
return "[" + getName() + ": Count=" + intervalHistogram.getTotalCount() + ", Max="
+ intervalHistogram.getMaxValue() + ", Min=" + intervalHistogram.getMinValue() + ", Avg="
+ d.format(intervalHistogram.getMean()) + ", 90=" + d.format(intervalHistogram.getValueAtPercentile(90))
+ ", 99=" + d.format(intervalHistogram.getValueAtPercentile(99)) + ", 99.9="
+ d.format(intervalHistogram.getValueAtPercentile(99.9)) + ", 99.99="
+ d.format(intervalHistogram.getValueAtPercentile(99.99)) + "]";
}
private Histogram getIntervalHistogramAndAccumulate() {
Histogram intervalHistogram = histogram.getIntervalHistogram();
// add this to the total time histogram.
if (totalHistogram == null) {
totalHistogram = intervalHistogram;
} else {
totalHistogram.add(intervalHistogram);
}
return intervalHistogram;
}
/**
* Helper method to parse the given percentile value string.
*
* @param percentileString - comma delimited string of Integer values
* @return An Integer List of percentile values
*/
private List<Double> getPercentileValues(String percentileString) {
List<Double> percentileValues = new ArrayList<>();
try {
for (String rawPercentile : percentileString.split(",")) {
percentileValues.add(Double.parseDouble(rawPercentile));
}
} catch (Exception e) {
// If the given hdrhistogram.percentiles value is unreadable for whatever reason,
// then calculate and return the default set.
System.err.println("[WARN] Couldn't read " + PERCENTILES_PROPERTY + " value: '" + percentileString +
"', the default of '" + PERCENTILES_PROPERTY_DEFAULT + "' will be used.");
e.printStackTrace();
return getPercentileValues(PERCENTILES_PROPERTY_DEFAULT);
}
return percentileValues;
}
/**
* Helper method to find the ordinal of any number. eg 1 -> 1st
* @param i number
* @return ordinal string
*/
private String ordinal(Double i) {
String[] suffixes = new String[]{"th", "st", "nd", "rd", "th", "th", "th", "th", "th", "th"};
Integer j = i.intValue();
if (i % 1 == 0) {
switch (j % 100) {
case 11:
case 12:
case 13:
return j + "th";
default:
return j + suffixes[j % 10];
}
} else {
return i.toString();
}
}
} |
histogram.recordValue(latencyInMicros);
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurementHdrHistogram.java | OneMeasurementHdrHistogram | exportMeasurements | class OneMeasurementHdrHistogram extends OneMeasurement {
// we need one log per measurement histogram
private final PrintStream log;
private final HistogramLogWriter histogramLogWriter;
private final Recorder histogram;
private Histogram totalHistogram;
/**
* The name of the property for deciding what percentile values to output.
*/
public static final String PERCENTILES_PROPERTY = "hdrhistogram.percentiles";
/**
* The default value for the hdrhistogram.percentiles property.
*/
public static final String PERCENTILES_PROPERTY_DEFAULT = "95,99";
/**
* The name of the property for determining if we should print out the buckets.
*/
public static final String VERBOSE_PROPERTY = "measurement.histogram.verbose";
/**
* Whether or not to emit the histogram buckets.
*/
private final boolean verbose;
private final List<Double> percentiles;
public OneMeasurementHdrHistogram(String name, Properties props) {
super(name);
percentiles = getPercentileValues(props.getProperty(PERCENTILES_PROPERTY, PERCENTILES_PROPERTY_DEFAULT));
verbose = Boolean.valueOf(props.getProperty(VERBOSE_PROPERTY, String.valueOf(false)));
boolean shouldLog = Boolean.parseBoolean(props.getProperty("hdrhistogram.fileoutput", "false"));
if (!shouldLog) {
log = null;
histogramLogWriter = null;
} else {
try {
final String hdrOutputFilename = props.getProperty("hdrhistogram.output.path", "") + name + ".hdr";
log = new PrintStream(new FileOutputStream(hdrOutputFilename), false);
} catch (FileNotFoundException e) {
throw new RuntimeException("Failed to open hdr histogram output file", e);
}
histogramLogWriter = new HistogramLogWriter(log);
histogramLogWriter.outputComment("[Logging for: " + name + "]");
histogramLogWriter.outputLogFormatVersion();
long now = System.currentTimeMillis();
histogramLogWriter.outputStartTime(now);
histogramLogWriter.setBaseTime(now);
histogramLogWriter.outputLegend();
}
histogram = new Recorder(3);
}
/**
* It appears latency is reported in micros.
* Using {@link Recorder} to support concurrent updates to histogram.
*/
public void measure(int latencyInMicros) {
histogram.recordValue(latencyInMicros);
}
/**
* This is called from a main thread, on orderly termination.
*/
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {<FILL_FUNCTION_BODY>}
/**
* This is called periodically from the StatusThread. There's a single
* StatusThread per Client process. We optionally serialize the interval to
* log on this opportunity.
*
* @see site.ycsb.measurements.OneMeasurement#getSummary()
*/
@Override
public String getSummary() {
Histogram intervalHistogram = getIntervalHistogramAndAccumulate();
// we use the summary interval as the histogram file interval.
if (histogramLogWriter != null) {
histogramLogWriter.outputIntervalHistogram(intervalHistogram);
}
DecimalFormat d = new DecimalFormat("#.##");
return "[" + getName() + ": Count=" + intervalHistogram.getTotalCount() + ", Max="
+ intervalHistogram.getMaxValue() + ", Min=" + intervalHistogram.getMinValue() + ", Avg="
+ d.format(intervalHistogram.getMean()) + ", 90=" + d.format(intervalHistogram.getValueAtPercentile(90))
+ ", 99=" + d.format(intervalHistogram.getValueAtPercentile(99)) + ", 99.9="
+ d.format(intervalHistogram.getValueAtPercentile(99.9)) + ", 99.99="
+ d.format(intervalHistogram.getValueAtPercentile(99.99)) + "]";
}
private Histogram getIntervalHistogramAndAccumulate() {
Histogram intervalHistogram = histogram.getIntervalHistogram();
// add this to the total time histogram.
if (totalHistogram == null) {
totalHistogram = intervalHistogram;
} else {
totalHistogram.add(intervalHistogram);
}
return intervalHistogram;
}
/**
* Helper method to parse the given percentile value string.
*
* @param percentileString - comma delimited string of Integer values
* @return An Integer List of percentile values
*/
private List<Double> getPercentileValues(String percentileString) {
List<Double> percentileValues = new ArrayList<>();
try {
for (String rawPercentile : percentileString.split(",")) {
percentileValues.add(Double.parseDouble(rawPercentile));
}
} catch (Exception e) {
// If the given hdrhistogram.percentiles value is unreadable for whatever reason,
// then calculate and return the default set.
System.err.println("[WARN] Couldn't read " + PERCENTILES_PROPERTY + " value: '" + percentileString +
"', the default of '" + PERCENTILES_PROPERTY_DEFAULT + "' will be used.");
e.printStackTrace();
return getPercentileValues(PERCENTILES_PROPERTY_DEFAULT);
}
return percentileValues;
}
/**
* Helper method to find the ordinal of any number. eg 1 -> 1st
* @param i number
* @return ordinal string
*/
private String ordinal(Double i) {
String[] suffixes = new String[]{"th", "st", "nd", "rd", "th", "th", "th", "th", "th", "th"};
Integer j = i.intValue();
if (i % 1 == 0) {
switch (j % 100) {
case 11:
case 12:
case 13:
return j + "th";
default:
return j + suffixes[j % 10];
}
} else {
return i.toString();
}
}
} |
// accumulate the last interval which was not caught by status thread
Histogram intervalHistogram = getIntervalHistogramAndAccumulate();
if (histogramLogWriter != null) {
histogramLogWriter.outputIntervalHistogram(intervalHistogram);
// we can close now
log.close();
}
exporter.write(getName(), "Operations", totalHistogram.getTotalCount());
exporter.write(getName(), "AverageLatency(us)", totalHistogram.getMean());
exporter.write(getName(), "MinLatency(us)", totalHistogram.getMinValue());
exporter.write(getName(), "MaxLatency(us)", totalHistogram.getMaxValue());
for (Double percentile : percentiles) {
exporter.write(getName(), ordinal(percentile) + "PercentileLatency(us)",
totalHistogram.getValueAtPercentile(percentile));
}
exportStatusCounts(exporter);
// also export totalHistogram
if (verbose) {
for (HistogramIterationValue v : totalHistogram.recordedValues()) {
int value;
if (v.getValueIteratedTo() > (long)Integer.MAX_VALUE) {
value = Integer.MAX_VALUE;
} else {
value = (int)v.getValueIteratedTo();
}
exporter.write(getName(), Integer.toString(value), (double)v.getCountAtValueIteratedTo());
}
}
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurementHdrHistogram.java | OneMeasurementHdrHistogram | getSummary | class OneMeasurementHdrHistogram extends OneMeasurement {
// we need one log per measurement histogram
private final PrintStream log;
private final HistogramLogWriter histogramLogWriter;
private final Recorder histogram;
private Histogram totalHistogram;
/**
* The name of the property for deciding what percentile values to output.
*/
public static final String PERCENTILES_PROPERTY = "hdrhistogram.percentiles";
/**
* The default value for the hdrhistogram.percentiles property.
*/
public static final String PERCENTILES_PROPERTY_DEFAULT = "95,99";
/**
* The name of the property for determining if we should print out the buckets.
*/
public static final String VERBOSE_PROPERTY = "measurement.histogram.verbose";
/**
* Whether or not to emit the histogram buckets.
*/
private final boolean verbose;
private final List<Double> percentiles;
public OneMeasurementHdrHistogram(String name, Properties props) {
super(name);
percentiles = getPercentileValues(props.getProperty(PERCENTILES_PROPERTY, PERCENTILES_PROPERTY_DEFAULT));
verbose = Boolean.valueOf(props.getProperty(VERBOSE_PROPERTY, String.valueOf(false)));
boolean shouldLog = Boolean.parseBoolean(props.getProperty("hdrhistogram.fileoutput", "false"));
if (!shouldLog) {
log = null;
histogramLogWriter = null;
} else {
try {
final String hdrOutputFilename = props.getProperty("hdrhistogram.output.path", "") + name + ".hdr";
log = new PrintStream(new FileOutputStream(hdrOutputFilename), false);
} catch (FileNotFoundException e) {
throw new RuntimeException("Failed to open hdr histogram output file", e);
}
histogramLogWriter = new HistogramLogWriter(log);
histogramLogWriter.outputComment("[Logging for: " + name + "]");
histogramLogWriter.outputLogFormatVersion();
long now = System.currentTimeMillis();
histogramLogWriter.outputStartTime(now);
histogramLogWriter.setBaseTime(now);
histogramLogWriter.outputLegend();
}
histogram = new Recorder(3);
}
/**
* It appears latency is reported in micros.
* Using {@link Recorder} to support concurrent updates to histogram.
*/
public void measure(int latencyInMicros) {
histogram.recordValue(latencyInMicros);
}
/**
* This is called from a main thread, on orderly termination.
*/
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
// accumulate the last interval which was not caught by status thread
Histogram intervalHistogram = getIntervalHistogramAndAccumulate();
if (histogramLogWriter != null) {
histogramLogWriter.outputIntervalHistogram(intervalHistogram);
// we can close now
log.close();
}
exporter.write(getName(), "Operations", totalHistogram.getTotalCount());
exporter.write(getName(), "AverageLatency(us)", totalHistogram.getMean());
exporter.write(getName(), "MinLatency(us)", totalHistogram.getMinValue());
exporter.write(getName(), "MaxLatency(us)", totalHistogram.getMaxValue());
for (Double percentile : percentiles) {
exporter.write(getName(), ordinal(percentile) + "PercentileLatency(us)",
totalHistogram.getValueAtPercentile(percentile));
}
exportStatusCounts(exporter);
// also export totalHistogram
if (verbose) {
for (HistogramIterationValue v : totalHistogram.recordedValues()) {
int value;
if (v.getValueIteratedTo() > (long)Integer.MAX_VALUE) {
value = Integer.MAX_VALUE;
} else {
value = (int)v.getValueIteratedTo();
}
exporter.write(getName(), Integer.toString(value), (double)v.getCountAtValueIteratedTo());
}
}
}
/**
* This is called periodically from the StatusThread. There's a single
* StatusThread per Client process. We optionally serialize the interval to
* log on this opportunity.
*
* @see site.ycsb.measurements.OneMeasurement#getSummary()
*/
@Override
public String getSummary() {<FILL_FUNCTION_BODY>}
private Histogram getIntervalHistogramAndAccumulate() {
Histogram intervalHistogram = histogram.getIntervalHistogram();
// add this to the total time histogram.
if (totalHistogram == null) {
totalHistogram = intervalHistogram;
} else {
totalHistogram.add(intervalHistogram);
}
return intervalHistogram;
}
/**
* Helper method to parse the given percentile value string.
*
* @param percentileString - comma delimited string of Integer values
* @return An Integer List of percentile values
*/
private List<Double> getPercentileValues(String percentileString) {
List<Double> percentileValues = new ArrayList<>();
try {
for (String rawPercentile : percentileString.split(",")) {
percentileValues.add(Double.parseDouble(rawPercentile));
}
} catch (Exception e) {
// If the given hdrhistogram.percentiles value is unreadable for whatever reason,
// then calculate and return the default set.
System.err.println("[WARN] Couldn't read " + PERCENTILES_PROPERTY + " value: '" + percentileString +
"', the default of '" + PERCENTILES_PROPERTY_DEFAULT + "' will be used.");
e.printStackTrace();
return getPercentileValues(PERCENTILES_PROPERTY_DEFAULT);
}
return percentileValues;
}
/**
* Helper method to find the ordinal of any number. eg 1 -> 1st
* @param i number
* @return ordinal string
*/
private String ordinal(Double i) {
String[] suffixes = new String[]{"th", "st", "nd", "rd", "th", "th", "th", "th", "th", "th"};
Integer j = i.intValue();
if (i % 1 == 0) {
switch (j % 100) {
case 11:
case 12:
case 13:
return j + "th";
default:
return j + suffixes[j % 10];
}
} else {
return i.toString();
}
}
} |
Histogram intervalHistogram = getIntervalHistogramAndAccumulate();
// we use the summary interval as the histogram file interval.
if (histogramLogWriter != null) {
histogramLogWriter.outputIntervalHistogram(intervalHistogram);
}
DecimalFormat d = new DecimalFormat("#.##");
return "[" + getName() + ": Count=" + intervalHistogram.getTotalCount() + ", Max="
+ intervalHistogram.getMaxValue() + ", Min=" + intervalHistogram.getMinValue() + ", Avg="
+ d.format(intervalHistogram.getMean()) + ", 90=" + d.format(intervalHistogram.getValueAtPercentile(90))
+ ", 99=" + d.format(intervalHistogram.getValueAtPercentile(99)) + ", 99.9="
+ d.format(intervalHistogram.getValueAtPercentile(99.9)) + ", 99.99="
+ d.format(intervalHistogram.getValueAtPercentile(99.99)) + "]";
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurementHdrHistogram.java | OneMeasurementHdrHistogram | getIntervalHistogramAndAccumulate | class OneMeasurementHdrHistogram extends OneMeasurement {
// we need one log per measurement histogram
private final PrintStream log;
private final HistogramLogWriter histogramLogWriter;
private final Recorder histogram;
private Histogram totalHistogram;
/**
* The name of the property for deciding what percentile values to output.
*/
public static final String PERCENTILES_PROPERTY = "hdrhistogram.percentiles";
/**
* The default value for the hdrhistogram.percentiles property.
*/
public static final String PERCENTILES_PROPERTY_DEFAULT = "95,99";
/**
* The name of the property for determining if we should print out the buckets.
*/
public static final String VERBOSE_PROPERTY = "measurement.histogram.verbose";
/**
* Whether or not to emit the histogram buckets.
*/
private final boolean verbose;
private final List<Double> percentiles;
public OneMeasurementHdrHistogram(String name, Properties props) {
super(name);
percentiles = getPercentileValues(props.getProperty(PERCENTILES_PROPERTY, PERCENTILES_PROPERTY_DEFAULT));
verbose = Boolean.valueOf(props.getProperty(VERBOSE_PROPERTY, String.valueOf(false)));
boolean shouldLog = Boolean.parseBoolean(props.getProperty("hdrhistogram.fileoutput", "false"));
if (!shouldLog) {
log = null;
histogramLogWriter = null;
} else {
try {
final String hdrOutputFilename = props.getProperty("hdrhistogram.output.path", "") + name + ".hdr";
log = new PrintStream(new FileOutputStream(hdrOutputFilename), false);
} catch (FileNotFoundException e) {
throw new RuntimeException("Failed to open hdr histogram output file", e);
}
histogramLogWriter = new HistogramLogWriter(log);
histogramLogWriter.outputComment("[Logging for: " + name + "]");
histogramLogWriter.outputLogFormatVersion();
long now = System.currentTimeMillis();
histogramLogWriter.outputStartTime(now);
histogramLogWriter.setBaseTime(now);
histogramLogWriter.outputLegend();
}
histogram = new Recorder(3);
}
/**
* It appears latency is reported in micros.
* Using {@link Recorder} to support concurrent updates to histogram.
*/
public void measure(int latencyInMicros) {
histogram.recordValue(latencyInMicros);
}
/**
* This is called from a main thread, on orderly termination.
*/
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
// accumulate the last interval which was not caught by status thread
Histogram intervalHistogram = getIntervalHistogramAndAccumulate();
if (histogramLogWriter != null) {
histogramLogWriter.outputIntervalHistogram(intervalHistogram);
// we can close now
log.close();
}
exporter.write(getName(), "Operations", totalHistogram.getTotalCount());
exporter.write(getName(), "AverageLatency(us)", totalHistogram.getMean());
exporter.write(getName(), "MinLatency(us)", totalHistogram.getMinValue());
exporter.write(getName(), "MaxLatency(us)", totalHistogram.getMaxValue());
for (Double percentile : percentiles) {
exporter.write(getName(), ordinal(percentile) + "PercentileLatency(us)",
totalHistogram.getValueAtPercentile(percentile));
}
exportStatusCounts(exporter);
// also export totalHistogram
if (verbose) {
for (HistogramIterationValue v : totalHistogram.recordedValues()) {
int value;
if (v.getValueIteratedTo() > (long)Integer.MAX_VALUE) {
value = Integer.MAX_VALUE;
} else {
value = (int)v.getValueIteratedTo();
}
exporter.write(getName(), Integer.toString(value), (double)v.getCountAtValueIteratedTo());
}
}
}
/**
* This is called periodically from the StatusThread. There's a single
* StatusThread per Client process. We optionally serialize the interval to
* log on this opportunity.
*
* @see site.ycsb.measurements.OneMeasurement#getSummary()
*/
@Override
public String getSummary() {
Histogram intervalHistogram = getIntervalHistogramAndAccumulate();
// we use the summary interval as the histogram file interval.
if (histogramLogWriter != null) {
histogramLogWriter.outputIntervalHistogram(intervalHistogram);
}
DecimalFormat d = new DecimalFormat("#.##");
return "[" + getName() + ": Count=" + intervalHistogram.getTotalCount() + ", Max="
+ intervalHistogram.getMaxValue() + ", Min=" + intervalHistogram.getMinValue() + ", Avg="
+ d.format(intervalHistogram.getMean()) + ", 90=" + d.format(intervalHistogram.getValueAtPercentile(90))
+ ", 99=" + d.format(intervalHistogram.getValueAtPercentile(99)) + ", 99.9="
+ d.format(intervalHistogram.getValueAtPercentile(99.9)) + ", 99.99="
+ d.format(intervalHistogram.getValueAtPercentile(99.99)) + "]";
}
private Histogram getIntervalHistogramAndAccumulate() {<FILL_FUNCTION_BODY>}
/**
* Helper method to parse the given percentile value string.
*
* @param percentileString - comma delimited string of Integer values
* @return An Integer List of percentile values
*/
private List<Double> getPercentileValues(String percentileString) {
List<Double> percentileValues = new ArrayList<>();
try {
for (String rawPercentile : percentileString.split(",")) {
percentileValues.add(Double.parseDouble(rawPercentile));
}
} catch (Exception e) {
// If the given hdrhistogram.percentiles value is unreadable for whatever reason,
// then calculate and return the default set.
System.err.println("[WARN] Couldn't read " + PERCENTILES_PROPERTY + " value: '" + percentileString +
"', the default of '" + PERCENTILES_PROPERTY_DEFAULT + "' will be used.");
e.printStackTrace();
return getPercentileValues(PERCENTILES_PROPERTY_DEFAULT);
}
return percentileValues;
}
/**
* Helper method to find the ordinal of any number. eg 1 -> 1st
* @param i number
* @return ordinal string
*/
private String ordinal(Double i) {
String[] suffixes = new String[]{"th", "st", "nd", "rd", "th", "th", "th", "th", "th", "th"};
Integer j = i.intValue();
if (i % 1 == 0) {
switch (j % 100) {
case 11:
case 12:
case 13:
return j + "th";
default:
return j + suffixes[j % 10];
}
} else {
return i.toString();
}
}
} |
Histogram intervalHistogram = histogram.getIntervalHistogram();
// add this to the total time histogram.
if (totalHistogram == null) {
totalHistogram = intervalHistogram;
} else {
totalHistogram.add(intervalHistogram);
}
return intervalHistogram;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurementHdrHistogram.java | OneMeasurementHdrHistogram | getPercentileValues | class OneMeasurementHdrHistogram extends OneMeasurement {
// we need one log per measurement histogram
private final PrintStream log;
private final HistogramLogWriter histogramLogWriter;
private final Recorder histogram;
private Histogram totalHistogram;
/**
* The name of the property for deciding what percentile values to output.
*/
public static final String PERCENTILES_PROPERTY = "hdrhistogram.percentiles";
/**
* The default value for the hdrhistogram.percentiles property.
*/
public static final String PERCENTILES_PROPERTY_DEFAULT = "95,99";
/**
* The name of the property for determining if we should print out the buckets.
*/
public static final String VERBOSE_PROPERTY = "measurement.histogram.verbose";
/**
* Whether or not to emit the histogram buckets.
*/
private final boolean verbose;
private final List<Double> percentiles;
public OneMeasurementHdrHistogram(String name, Properties props) {
super(name);
percentiles = getPercentileValues(props.getProperty(PERCENTILES_PROPERTY, PERCENTILES_PROPERTY_DEFAULT));
verbose = Boolean.valueOf(props.getProperty(VERBOSE_PROPERTY, String.valueOf(false)));
boolean shouldLog = Boolean.parseBoolean(props.getProperty("hdrhistogram.fileoutput", "false"));
if (!shouldLog) {
log = null;
histogramLogWriter = null;
} else {
try {
final String hdrOutputFilename = props.getProperty("hdrhistogram.output.path", "") + name + ".hdr";
log = new PrintStream(new FileOutputStream(hdrOutputFilename), false);
} catch (FileNotFoundException e) {
throw new RuntimeException("Failed to open hdr histogram output file", e);
}
histogramLogWriter = new HistogramLogWriter(log);
histogramLogWriter.outputComment("[Logging for: " + name + "]");
histogramLogWriter.outputLogFormatVersion();
long now = System.currentTimeMillis();
histogramLogWriter.outputStartTime(now);
histogramLogWriter.setBaseTime(now);
histogramLogWriter.outputLegend();
}
histogram = new Recorder(3);
}
/**
* It appears latency is reported in micros.
* Using {@link Recorder} to support concurrent updates to histogram.
*/
public void measure(int latencyInMicros) {
histogram.recordValue(latencyInMicros);
}
/**
* This is called from a main thread, on orderly termination.
*/
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
// accumulate the last interval which was not caught by status thread
Histogram intervalHistogram = getIntervalHistogramAndAccumulate();
if (histogramLogWriter != null) {
histogramLogWriter.outputIntervalHistogram(intervalHistogram);
// we can close now
log.close();
}
exporter.write(getName(), "Operations", totalHistogram.getTotalCount());
exporter.write(getName(), "AverageLatency(us)", totalHistogram.getMean());
exporter.write(getName(), "MinLatency(us)", totalHistogram.getMinValue());
exporter.write(getName(), "MaxLatency(us)", totalHistogram.getMaxValue());
for (Double percentile : percentiles) {
exporter.write(getName(), ordinal(percentile) + "PercentileLatency(us)",
totalHistogram.getValueAtPercentile(percentile));
}
exportStatusCounts(exporter);
// also export totalHistogram
if (verbose) {
for (HistogramIterationValue v : totalHistogram.recordedValues()) {
int value;
if (v.getValueIteratedTo() > (long)Integer.MAX_VALUE) {
value = Integer.MAX_VALUE;
} else {
value = (int)v.getValueIteratedTo();
}
exporter.write(getName(), Integer.toString(value), (double)v.getCountAtValueIteratedTo());
}
}
}
/**
* This is called periodically from the StatusThread. There's a single
* StatusThread per Client process. We optionally serialize the interval to
* log on this opportunity.
*
* @see site.ycsb.measurements.OneMeasurement#getSummary()
*/
@Override
public String getSummary() {
Histogram intervalHistogram = getIntervalHistogramAndAccumulate();
// we use the summary interval as the histogram file interval.
if (histogramLogWriter != null) {
histogramLogWriter.outputIntervalHistogram(intervalHistogram);
}
DecimalFormat d = new DecimalFormat("#.##");
return "[" + getName() + ": Count=" + intervalHistogram.getTotalCount() + ", Max="
+ intervalHistogram.getMaxValue() + ", Min=" + intervalHistogram.getMinValue() + ", Avg="
+ d.format(intervalHistogram.getMean()) + ", 90=" + d.format(intervalHistogram.getValueAtPercentile(90))
+ ", 99=" + d.format(intervalHistogram.getValueAtPercentile(99)) + ", 99.9="
+ d.format(intervalHistogram.getValueAtPercentile(99.9)) + ", 99.99="
+ d.format(intervalHistogram.getValueAtPercentile(99.99)) + "]";
}
private Histogram getIntervalHistogramAndAccumulate() {
Histogram intervalHistogram = histogram.getIntervalHistogram();
// add this to the total time histogram.
if (totalHistogram == null) {
totalHistogram = intervalHistogram;
} else {
totalHistogram.add(intervalHistogram);
}
return intervalHistogram;
}
/**
* Helper method to parse the given percentile value string.
*
* @param percentileString - comma delimited string of Integer values
* @return An Integer List of percentile values
*/
private List<Double> getPercentileValues(String percentileString) {<FILL_FUNCTION_BODY>}
/**
* Helper method to find the ordinal of any number. eg 1 -> 1st
* @param i number
* @return ordinal string
*/
private String ordinal(Double i) {
String[] suffixes = new String[]{"th", "st", "nd", "rd", "th", "th", "th", "th", "th", "th"};
Integer j = i.intValue();
if (i % 1 == 0) {
switch (j % 100) {
case 11:
case 12:
case 13:
return j + "th";
default:
return j + suffixes[j % 10];
}
} else {
return i.toString();
}
}
} |
List<Double> percentileValues = new ArrayList<>();
try {
for (String rawPercentile : percentileString.split(",")) {
percentileValues.add(Double.parseDouble(rawPercentile));
}
} catch (Exception e) {
// If the given hdrhistogram.percentiles value is unreadable for whatever reason,
// then calculate and return the default set.
System.err.println("[WARN] Couldn't read " + PERCENTILES_PROPERTY + " value: '" + percentileString +
"', the default of '" + PERCENTILES_PROPERTY_DEFAULT + "' will be used.");
e.printStackTrace();
return getPercentileValues(PERCENTILES_PROPERTY_DEFAULT);
}
return percentileValues;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurementHdrHistogram.java | OneMeasurementHdrHistogram | ordinal | class OneMeasurementHdrHistogram extends OneMeasurement {
// we need one log per measurement histogram
private final PrintStream log;
private final HistogramLogWriter histogramLogWriter;
private final Recorder histogram;
private Histogram totalHistogram;
/**
* The name of the property for deciding what percentile values to output.
*/
public static final String PERCENTILES_PROPERTY = "hdrhistogram.percentiles";
/**
* The default value for the hdrhistogram.percentiles property.
*/
public static final String PERCENTILES_PROPERTY_DEFAULT = "95,99";
/**
* The name of the property for determining if we should print out the buckets.
*/
public static final String VERBOSE_PROPERTY = "measurement.histogram.verbose";
/**
* Whether or not to emit the histogram buckets.
*/
private final boolean verbose;
private final List<Double> percentiles;
public OneMeasurementHdrHistogram(String name, Properties props) {
super(name);
percentiles = getPercentileValues(props.getProperty(PERCENTILES_PROPERTY, PERCENTILES_PROPERTY_DEFAULT));
verbose = Boolean.valueOf(props.getProperty(VERBOSE_PROPERTY, String.valueOf(false)));
boolean shouldLog = Boolean.parseBoolean(props.getProperty("hdrhistogram.fileoutput", "false"));
if (!shouldLog) {
log = null;
histogramLogWriter = null;
} else {
try {
final String hdrOutputFilename = props.getProperty("hdrhistogram.output.path", "") + name + ".hdr";
log = new PrintStream(new FileOutputStream(hdrOutputFilename), false);
} catch (FileNotFoundException e) {
throw new RuntimeException("Failed to open hdr histogram output file", e);
}
histogramLogWriter = new HistogramLogWriter(log);
histogramLogWriter.outputComment("[Logging for: " + name + "]");
histogramLogWriter.outputLogFormatVersion();
long now = System.currentTimeMillis();
histogramLogWriter.outputStartTime(now);
histogramLogWriter.setBaseTime(now);
histogramLogWriter.outputLegend();
}
histogram = new Recorder(3);
}
/**
* It appears latency is reported in micros.
* Using {@link Recorder} to support concurrent updates to histogram.
*/
public void measure(int latencyInMicros) {
histogram.recordValue(latencyInMicros);
}
/**
* This is called from a main thread, on orderly termination.
*/
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
// accumulate the last interval which was not caught by status thread
Histogram intervalHistogram = getIntervalHistogramAndAccumulate();
if (histogramLogWriter != null) {
histogramLogWriter.outputIntervalHistogram(intervalHistogram);
// we can close now
log.close();
}
exporter.write(getName(), "Operations", totalHistogram.getTotalCount());
exporter.write(getName(), "AverageLatency(us)", totalHistogram.getMean());
exporter.write(getName(), "MinLatency(us)", totalHistogram.getMinValue());
exporter.write(getName(), "MaxLatency(us)", totalHistogram.getMaxValue());
for (Double percentile : percentiles) {
exporter.write(getName(), ordinal(percentile) + "PercentileLatency(us)",
totalHistogram.getValueAtPercentile(percentile));
}
exportStatusCounts(exporter);
// also export totalHistogram
if (verbose) {
for (HistogramIterationValue v : totalHistogram.recordedValues()) {
int value;
if (v.getValueIteratedTo() > (long)Integer.MAX_VALUE) {
value = Integer.MAX_VALUE;
} else {
value = (int)v.getValueIteratedTo();
}
exporter.write(getName(), Integer.toString(value), (double)v.getCountAtValueIteratedTo());
}
}
}
/**
* This is called periodically from the StatusThread. There's a single
* StatusThread per Client process. We optionally serialize the interval to
* log on this opportunity.
*
* @see site.ycsb.measurements.OneMeasurement#getSummary()
*/
@Override
public String getSummary() {
Histogram intervalHistogram = getIntervalHistogramAndAccumulate();
// we use the summary interval as the histogram file interval.
if (histogramLogWriter != null) {
histogramLogWriter.outputIntervalHistogram(intervalHistogram);
}
DecimalFormat d = new DecimalFormat("#.##");
return "[" + getName() + ": Count=" + intervalHistogram.getTotalCount() + ", Max="
+ intervalHistogram.getMaxValue() + ", Min=" + intervalHistogram.getMinValue() + ", Avg="
+ d.format(intervalHistogram.getMean()) + ", 90=" + d.format(intervalHistogram.getValueAtPercentile(90))
+ ", 99=" + d.format(intervalHistogram.getValueAtPercentile(99)) + ", 99.9="
+ d.format(intervalHistogram.getValueAtPercentile(99.9)) + ", 99.99="
+ d.format(intervalHistogram.getValueAtPercentile(99.99)) + "]";
}
private Histogram getIntervalHistogramAndAccumulate() {
Histogram intervalHistogram = histogram.getIntervalHistogram();
// add this to the total time histogram.
if (totalHistogram == null) {
totalHistogram = intervalHistogram;
} else {
totalHistogram.add(intervalHistogram);
}
return intervalHistogram;
}
/**
* Helper method to parse the given percentile value string.
*
* @param percentileString - comma delimited string of Integer values
* @return An Integer List of percentile values
*/
private List<Double> getPercentileValues(String percentileString) {
List<Double> percentileValues = new ArrayList<>();
try {
for (String rawPercentile : percentileString.split(",")) {
percentileValues.add(Double.parseDouble(rawPercentile));
}
} catch (Exception e) {
// If the given hdrhistogram.percentiles value is unreadable for whatever reason,
// then calculate and return the default set.
System.err.println("[WARN] Couldn't read " + PERCENTILES_PROPERTY + " value: '" + percentileString +
"', the default of '" + PERCENTILES_PROPERTY_DEFAULT + "' will be used.");
e.printStackTrace();
return getPercentileValues(PERCENTILES_PROPERTY_DEFAULT);
}
return percentileValues;
}
/**
* Helper method to find the ordinal of any number. eg 1 -> 1st
* @param i number
* @return ordinal string
*/
private String ordinal(Double i) {<FILL_FUNCTION_BODY>}
} |
String[] suffixes = new String[]{"th", "st", "nd", "rd", "th", "th", "th", "th", "th", "th"};
Integer j = i.intValue();
if (i % 1 == 0) {
switch (j % 100) {
case 11:
case 12:
case 13:
return j + "th";
default:
return j + suffixes[j % 10];
}
} else {
return i.toString();
}
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurementHistogram.java | OneMeasurementHistogram | measure | class OneMeasurementHistogram extends OneMeasurement {
public static final String BUCKETS = "histogram.buckets";
public static final String BUCKETS_DEFAULT = "1000";
public static final String VERBOSE_PROPERTY = "measurement.histogram.verbose";
/**
* Specify the range of latencies to track in the histogram.
*/
private final int buckets;
/**
* Groups operations in discrete blocks of 1ms width.
*/
private long[] histogram;
/**
* Counts all operations outside the histogram's range.
*/
private long histogramoverflow;
/**
* The total number of reported operations.
*/
private long operations;
/**
* The sum of each latency measurement over all operations.
* Calculated in ms.
*/
private long totallatency;
/**
* The sum of each latency measurement squared over all operations.
* Used to calculate variance of latency.
* Calculated in ms.
*/
private double totalsquaredlatency;
/**
* Whether or not to emit the histogram buckets.
*/
private final boolean verbose;
//keep a windowed version of these stats for printing status
private long windowoperations;
private long windowtotallatency;
private int min;
private int max;
public OneMeasurementHistogram(String name, Properties props) {
super(name);
buckets = Integer.parseInt(props.getProperty(BUCKETS, BUCKETS_DEFAULT));
verbose = Boolean.valueOf(props.getProperty(VERBOSE_PROPERTY, String.valueOf(false)));
histogram = new long[buckets];
histogramoverflow = 0;
operations = 0;
totallatency = 0;
totalsquaredlatency = 0;
windowoperations = 0;
windowtotallatency = 0;
min = -1;
max = -1;
}
/* (non-Javadoc)
* @see site.ycsb.OneMeasurement#measure(int)
*/
public synchronized void measure(int latency) {<FILL_FUNCTION_BODY>}
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
double mean = totallatency / ((double) operations);
double variance = totalsquaredlatency / ((double) operations) - (mean * mean);
exporter.write(getName(), "Operations", operations);
exporter.write(getName(), "AverageLatency(us)", mean);
exporter.write(getName(), "LatencyVariance(us)", variance);
exporter.write(getName(), "MinLatency(us)", min);
exporter.write(getName(), "MaxLatency(us)", max);
long opcounter=0;
boolean done95th = false;
for (int i = 0; i < buckets; i++) {
opcounter += histogram[i];
if ((!done95th) && (((double) opcounter) / ((double) operations) >= 0.95)) {
exporter.write(getName(), "95thPercentileLatency(us)", i * 1000);
done95th = true;
}
if (((double) opcounter) / ((double) operations) >= 0.99) {
exporter.write(getName(), "99thPercentileLatency(us)", i * 1000);
break;
}
}
exportStatusCounts(exporter);
if (verbose) {
for (int i = 0; i < buckets; i++) {
exporter.write(getName(), Integer.toString(i), histogram[i]);
}
exporter.write(getName(), ">" + buckets, histogramoverflow);
}
}
@Override
public String getSummary() {
if (windowoperations == 0) {
return "";
}
DecimalFormat d = new DecimalFormat("#.##");
double report = ((double) windowtotallatency) / ((double) windowoperations);
windowtotallatency = 0;
windowoperations = 0;
return "[" + getName() + " AverageLatency(us)=" + d.format(report) + "]";
}
} |
//latency reported in us and collected in bucket by ms.
if (latency / 1000 >= buckets) {
histogramoverflow++;
} else {
histogram[latency / 1000]++;
}
operations++;
totallatency += latency;
totalsquaredlatency += ((double) latency) * ((double) latency);
windowoperations++;
windowtotallatency += latency;
if ((min < 0) || (latency < min)) {
min = latency;
}
if ((max < 0) || (latency > max)) {
max = latency;
}
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurementHistogram.java | OneMeasurementHistogram | exportMeasurements | class OneMeasurementHistogram extends OneMeasurement {
public static final String BUCKETS = "histogram.buckets";
public static final String BUCKETS_DEFAULT = "1000";
public static final String VERBOSE_PROPERTY = "measurement.histogram.verbose";
/**
* Specify the range of latencies to track in the histogram.
*/
private final int buckets;
/**
* Groups operations in discrete blocks of 1ms width.
*/
private long[] histogram;
/**
* Counts all operations outside the histogram's range.
*/
private long histogramoverflow;
/**
* The total number of reported operations.
*/
private long operations;
/**
* The sum of each latency measurement over all operations.
* Calculated in ms.
*/
private long totallatency;
/**
* The sum of each latency measurement squared over all operations.
* Used to calculate variance of latency.
* Calculated in ms.
*/
private double totalsquaredlatency;
/**
* Whether or not to emit the histogram buckets.
*/
private final boolean verbose;
//keep a windowed version of these stats for printing status
private long windowoperations;
private long windowtotallatency;
private int min;
private int max;
public OneMeasurementHistogram(String name, Properties props) {
super(name);
buckets = Integer.parseInt(props.getProperty(BUCKETS, BUCKETS_DEFAULT));
verbose = Boolean.valueOf(props.getProperty(VERBOSE_PROPERTY, String.valueOf(false)));
histogram = new long[buckets];
histogramoverflow = 0;
operations = 0;
totallatency = 0;
totalsquaredlatency = 0;
windowoperations = 0;
windowtotallatency = 0;
min = -1;
max = -1;
}
/* (non-Javadoc)
* @see site.ycsb.OneMeasurement#measure(int)
*/
public synchronized void measure(int latency) {
//latency reported in us and collected in bucket by ms.
if (latency / 1000 >= buckets) {
histogramoverflow++;
} else {
histogram[latency / 1000]++;
}
operations++;
totallatency += latency;
totalsquaredlatency += ((double) latency) * ((double) latency);
windowoperations++;
windowtotallatency += latency;
if ((min < 0) || (latency < min)) {
min = latency;
}
if ((max < 0) || (latency > max)) {
max = latency;
}
}
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {<FILL_FUNCTION_BODY>}
@Override
public String getSummary() {
if (windowoperations == 0) {
return "";
}
DecimalFormat d = new DecimalFormat("#.##");
double report = ((double) windowtotallatency) / ((double) windowoperations);
windowtotallatency = 0;
windowoperations = 0;
return "[" + getName() + " AverageLatency(us)=" + d.format(report) + "]";
}
} |
double mean = totallatency / ((double) operations);
double variance = totalsquaredlatency / ((double) operations) - (mean * mean);
exporter.write(getName(), "Operations", operations);
exporter.write(getName(), "AverageLatency(us)", mean);
exporter.write(getName(), "LatencyVariance(us)", variance);
exporter.write(getName(), "MinLatency(us)", min);
exporter.write(getName(), "MaxLatency(us)", max);
long opcounter=0;
boolean done95th = false;
for (int i = 0; i < buckets; i++) {
opcounter += histogram[i];
if ((!done95th) && (((double) opcounter) / ((double) operations) >= 0.95)) {
exporter.write(getName(), "95thPercentileLatency(us)", i * 1000);
done95th = true;
}
if (((double) opcounter) / ((double) operations) >= 0.99) {
exporter.write(getName(), "99thPercentileLatency(us)", i * 1000);
break;
}
}
exportStatusCounts(exporter);
if (verbose) {
for (int i = 0; i < buckets; i++) {
exporter.write(getName(), Integer.toString(i), histogram[i]);
}
exporter.write(getName(), ">" + buckets, histogramoverflow);
}
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurementHistogram.java | OneMeasurementHistogram | getSummary | class OneMeasurementHistogram extends OneMeasurement {
public static final String BUCKETS = "histogram.buckets";
public static final String BUCKETS_DEFAULT = "1000";
public static final String VERBOSE_PROPERTY = "measurement.histogram.verbose";
/**
* Specify the range of latencies to track in the histogram.
*/
private final int buckets;
/**
* Groups operations in discrete blocks of 1ms width.
*/
private long[] histogram;
/**
* Counts all operations outside the histogram's range.
*/
private long histogramoverflow;
/**
* The total number of reported operations.
*/
private long operations;
/**
* The sum of each latency measurement over all operations.
* Calculated in ms.
*/
private long totallatency;
/**
* The sum of each latency measurement squared over all operations.
* Used to calculate variance of latency.
* Calculated in ms.
*/
private double totalsquaredlatency;
/**
* Whether or not to emit the histogram buckets.
*/
private final boolean verbose;
//keep a windowed version of these stats for printing status
private long windowoperations;
private long windowtotallatency;
private int min;
private int max;
public OneMeasurementHistogram(String name, Properties props) {
super(name);
buckets = Integer.parseInt(props.getProperty(BUCKETS, BUCKETS_DEFAULT));
verbose = Boolean.valueOf(props.getProperty(VERBOSE_PROPERTY, String.valueOf(false)));
histogram = new long[buckets];
histogramoverflow = 0;
operations = 0;
totallatency = 0;
totalsquaredlatency = 0;
windowoperations = 0;
windowtotallatency = 0;
min = -1;
max = -1;
}
/* (non-Javadoc)
* @see site.ycsb.OneMeasurement#measure(int)
*/
public synchronized void measure(int latency) {
//latency reported in us and collected in bucket by ms.
if (latency / 1000 >= buckets) {
histogramoverflow++;
} else {
histogram[latency / 1000]++;
}
operations++;
totallatency += latency;
totalsquaredlatency += ((double) latency) * ((double) latency);
windowoperations++;
windowtotallatency += latency;
if ((min < 0) || (latency < min)) {
min = latency;
}
if ((max < 0) || (latency > max)) {
max = latency;
}
}
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
double mean = totallatency / ((double) operations);
double variance = totalsquaredlatency / ((double) operations) - (mean * mean);
exporter.write(getName(), "Operations", operations);
exporter.write(getName(), "AverageLatency(us)", mean);
exporter.write(getName(), "LatencyVariance(us)", variance);
exporter.write(getName(), "MinLatency(us)", min);
exporter.write(getName(), "MaxLatency(us)", max);
long opcounter=0;
boolean done95th = false;
for (int i = 0; i < buckets; i++) {
opcounter += histogram[i];
if ((!done95th) && (((double) opcounter) / ((double) operations) >= 0.95)) {
exporter.write(getName(), "95thPercentileLatency(us)", i * 1000);
done95th = true;
}
if (((double) opcounter) / ((double) operations) >= 0.99) {
exporter.write(getName(), "99thPercentileLatency(us)", i * 1000);
break;
}
}
exportStatusCounts(exporter);
if (verbose) {
for (int i = 0; i < buckets; i++) {
exporter.write(getName(), Integer.toString(i), histogram[i]);
}
exporter.write(getName(), ">" + buckets, histogramoverflow);
}
}
@Override
public String getSummary() {<FILL_FUNCTION_BODY>}
} |
if (windowoperations == 0) {
return "";
}
DecimalFormat d = new DecimalFormat("#.##");
double report = ((double) windowtotallatency) / ((double) windowoperations);
windowtotallatency = 0;
windowoperations = 0;
return "[" + getName() + " AverageLatency(us)=" + d.format(report) + "]";
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurementRaw.java | RawDataPoint | timeStamp | class RawDataPoint {
private final long timestamp;
private final int value;
public RawDataPoint(int value) {
this.timestamp = System.currentTimeMillis();
this.value = value;
}
public long timeStamp() {<FILL_FUNCTION_BODY>}
public int value() {
return value;
}
} |
return timestamp;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurementRaw.java | RawDataPoint | value | class RawDataPoint {
private final long timestamp;
private final int value;
public RawDataPoint(int value) {
this.timestamp = System.currentTimeMillis();
this.value = value;
}
public long timeStamp() {
return timestamp;
}
public int value() {<FILL_FUNCTION_BODY>}
} |
return value;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurementRaw.java | RawDataPointComparator | compare | class RawDataPointComparator implements Comparator<RawDataPoint> {
@Override
public int compare(RawDataPoint p1, RawDataPoint p2) {<FILL_FUNCTION_BODY>}
} |
if (p1.value() < p2.value()) {
return -1;
} else if (p1.value() == p2.value()) {
return 0;
} else {
return 1;
}
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurementRaw.java | RawDataPointComparator | measure | class RawDataPointComparator implements Comparator<RawDataPoint> {
@Override
public int compare(RawDataPoint p1, RawDataPoint p2) {
if (p1.value() < p2.value()) {
return -1;
} else if (p1.value() == p2.value()) {
return 0;
} else {
return 1;
}
}
}
/**
* Optionally, user can configure an output file to save the raw data points.
* Default is none, raw results will be written to stdout.
*
*/
public static final String OUTPUT_FILE_PATH = "measurement.raw.output_file";
public static final String OUTPUT_FILE_PATH_DEFAULT = "";
/**
* Optionally, user can request to not output summary stats. This is useful
* if the user chains the raw measurement type behind the HdrHistogram type
* which already outputs summary stats. But even in that case, the user may
* still want this class to compute summary stats for them, especially if
* they want accurate computation of percentiles (because percentils computed
* by histogram classes are still approximations).
*/
public static final String NO_SUMMARY_STATS = "measurement.raw.no_summary";
public static final String NO_SUMMARY_STATS_DEFAULT = "false";
private final PrintStream outputStream;
private boolean noSummaryStats = false;
private LinkedList<RawDataPoint> measurements;
private long totalLatency = 0;
// A window of stats to print summary for at the next getSummary() call.
// It's supposed to be a one line summary, so we will just print count and
// average.
private int windowOperations = 0;
private long windowTotalLatency = 0;
public OneMeasurementRaw(String name, Properties props) {
super(name);
String outputFilePath = props.getProperty(OUTPUT_FILE_PATH, OUTPUT_FILE_PATH_DEFAULT);
if (!outputFilePath.isEmpty()) {
System.out.println("Raw data measurement: will output to result file: " +
outputFilePath);
try {
outputStream = new PrintStream(
new FileOutputStream(outputFilePath, true),
true);
} catch (FileNotFoundException e) {
throw new RuntimeException("Failed to open raw data output file", e);
}
} else {
System.out.println("Raw data measurement: will output to stdout.");
outputStream = System.out;
}
noSummaryStats = Boolean.parseBoolean(props.getProperty(NO_SUMMARY_STATS,
NO_SUMMARY_STATS_DEFAULT));
measurements = new LinkedList<>();
}
@Override
public synchronized void measure(int latency) {<FILL_FUNCTION_BODY> |
totalLatency += latency;
windowTotalLatency += latency;
windowOperations++;
measurements.add(new RawDataPoint(latency));
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurementRaw.java | RawDataPointComparator | exportMeasurements | class RawDataPointComparator implements Comparator<RawDataPoint> {
@Override
public int compare(RawDataPoint p1, RawDataPoint p2) {
if (p1.value() < p2.value()) {
return -1;
} else if (p1.value() == p2.value()) {
return 0;
} else {
return 1;
}
}
}
/**
* Optionally, user can configure an output file to save the raw data points.
* Default is none, raw results will be written to stdout.
*
*/
public static final String OUTPUT_FILE_PATH = "measurement.raw.output_file";
public static final String OUTPUT_FILE_PATH_DEFAULT = "";
/**
* Optionally, user can request to not output summary stats. This is useful
* if the user chains the raw measurement type behind the HdrHistogram type
* which already outputs summary stats. But even in that case, the user may
* still want this class to compute summary stats for them, especially if
* they want accurate computation of percentiles (because percentils computed
* by histogram classes are still approximations).
*/
public static final String NO_SUMMARY_STATS = "measurement.raw.no_summary";
public static final String NO_SUMMARY_STATS_DEFAULT = "false";
private final PrintStream outputStream;
private boolean noSummaryStats = false;
private LinkedList<RawDataPoint> measurements;
private long totalLatency = 0;
// A window of stats to print summary for at the next getSummary() call.
// It's supposed to be a one line summary, so we will just print count and
// average.
private int windowOperations = 0;
private long windowTotalLatency = 0;
public OneMeasurementRaw(String name, Properties props) {
super(name);
String outputFilePath = props.getProperty(OUTPUT_FILE_PATH, OUTPUT_FILE_PATH_DEFAULT);
if (!outputFilePath.isEmpty()) {
System.out.println("Raw data measurement: will output to result file: " +
outputFilePath);
try {
outputStream = new PrintStream(
new FileOutputStream(outputFilePath, true),
true);
} catch (FileNotFoundException e) {
throw new RuntimeException("Failed to open raw data output file", e);
}
} else {
System.out.println("Raw data measurement: will output to stdout.");
outputStream = System.out;
}
noSummaryStats = Boolean.parseBoolean(props.getProperty(NO_SUMMARY_STATS,
NO_SUMMARY_STATS_DEFAULT));
measurements = new LinkedList<>();
}
@Override
public synchronized void measure(int latency) {
totalLatency += latency;
windowTotalLatency += latency;
windowOperations++;
measurements.add(new RawDataPoint(latency));
}
@Override
public void exportMeasurements(MeasurementsExporter exporter)
throws IOException {<FILL_FUNCTION_BODY> |
// Output raw data points first then print out a summary of percentiles to
// stdout.
outputStream.println(getName() +
" latency raw data: op, timestamp(ms), latency(us)");
for (RawDataPoint point : measurements) {
outputStream.println(
String.format("%s,%d,%d", getName(), point.timeStamp(),
point.value()));
}
if (outputStream != System.out) {
outputStream.close();
}
int totalOps = measurements.size();
exporter.write(getName(), "Total Operations", totalOps);
if (totalOps > 0 && !noSummaryStats) {
exporter.write(getName(),
"Below is a summary of latency in microseconds:", -1);
exporter.write(getName(), "Average",
(double) totalLatency / (double) totalOps);
Collections.sort(measurements, new RawDataPointComparator());
exporter.write(getName(), "Min", measurements.get(0).value());
exporter.write(
getName(), "Max", measurements.get(totalOps - 1).value());
exporter.write(
getName(), "p1", measurements.get((int) (totalOps * 0.01)).value());
exporter.write(
getName(), "p5", measurements.get((int) (totalOps * 0.05)).value());
exporter.write(
getName(), "p50", measurements.get((int) (totalOps * 0.5)).value());
exporter.write(
getName(), "p90", measurements.get((int) (totalOps * 0.9)).value());
exporter.write(
getName(), "p95", measurements.get((int) (totalOps * 0.95)).value());
exporter.write(
getName(), "p99", measurements.get((int) (totalOps * 0.99)).value());
exporter.write(getName(), "p99.9",
measurements.get((int) (totalOps * 0.999)).value());
exporter.write(getName(), "p99.99",
measurements.get((int) (totalOps * 0.9999)).value());
}
exportStatusCounts(exporter);
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurementRaw.java | RawDataPointComparator | getSummary | class RawDataPointComparator implements Comparator<RawDataPoint> {
@Override
public int compare(RawDataPoint p1, RawDataPoint p2) {
if (p1.value() < p2.value()) {
return -1;
} else if (p1.value() == p2.value()) {
return 0;
} else {
return 1;
}
}
}
/**
* Optionally, user can configure an output file to save the raw data points.
* Default is none, raw results will be written to stdout.
*
*/
public static final String OUTPUT_FILE_PATH = "measurement.raw.output_file";
public static final String OUTPUT_FILE_PATH_DEFAULT = "";
/**
* Optionally, user can request to not output summary stats. This is useful
* if the user chains the raw measurement type behind the HdrHistogram type
* which already outputs summary stats. But even in that case, the user may
* still want this class to compute summary stats for them, especially if
* they want accurate computation of percentiles (because percentils computed
* by histogram classes are still approximations).
*/
public static final String NO_SUMMARY_STATS = "measurement.raw.no_summary";
public static final String NO_SUMMARY_STATS_DEFAULT = "false";
private final PrintStream outputStream;
private boolean noSummaryStats = false;
private LinkedList<RawDataPoint> measurements;
private long totalLatency = 0;
// A window of stats to print summary for at the next getSummary() call.
// It's supposed to be a one line summary, so we will just print count and
// average.
private int windowOperations = 0;
private long windowTotalLatency = 0;
public OneMeasurementRaw(String name, Properties props) {
super(name);
String outputFilePath = props.getProperty(OUTPUT_FILE_PATH, OUTPUT_FILE_PATH_DEFAULT);
if (!outputFilePath.isEmpty()) {
System.out.println("Raw data measurement: will output to result file: " +
outputFilePath);
try {
outputStream = new PrintStream(
new FileOutputStream(outputFilePath, true),
true);
} catch (FileNotFoundException e) {
throw new RuntimeException("Failed to open raw data output file", e);
}
} else {
System.out.println("Raw data measurement: will output to stdout.");
outputStream = System.out;
}
noSummaryStats = Boolean.parseBoolean(props.getProperty(NO_SUMMARY_STATS,
NO_SUMMARY_STATS_DEFAULT));
measurements = new LinkedList<>();
}
@Override
public synchronized void measure(int latency) {
totalLatency += latency;
windowTotalLatency += latency;
windowOperations++;
measurements.add(new RawDataPoint(latency));
}
@Override
public void exportMeasurements(MeasurementsExporter exporter)
throws IOException {
// Output raw data points first then print out a summary of percentiles to
// stdout.
outputStream.println(getName() +
" latency raw data: op, timestamp(ms), latency(us)");
for (RawDataPoint point : measurements) {
outputStream.println(
String.format("%s,%d,%d", getName(), point.timeStamp(),
point.value()));
}
if (outputStream != System.out) {
outputStream.close();
}
int totalOps = measurements.size();
exporter.write(getName(), "Total Operations", totalOps);
if (totalOps > 0 && !noSummaryStats) {
exporter.write(getName(),
"Below is a summary of latency in microseconds:", -1);
exporter.write(getName(), "Average",
(double) totalLatency / (double) totalOps);
Collections.sort(measurements, new RawDataPointComparator());
exporter.write(getName(), "Min", measurements.get(0).value());
exporter.write(
getName(), "Max", measurements.get(totalOps - 1).value());
exporter.write(
getName(), "p1", measurements.get((int) (totalOps * 0.01)).value());
exporter.write(
getName(), "p5", measurements.get((int) (totalOps * 0.05)).value());
exporter.write(
getName(), "p50", measurements.get((int) (totalOps * 0.5)).value());
exporter.write(
getName(), "p90", measurements.get((int) (totalOps * 0.9)).value());
exporter.write(
getName(), "p95", measurements.get((int) (totalOps * 0.95)).value());
exporter.write(
getName(), "p99", measurements.get((int) (totalOps * 0.99)).value());
exporter.write(getName(), "p99.9",
measurements.get((int) (totalOps * 0.999)).value());
exporter.write(getName(), "p99.99",
measurements.get((int) (totalOps * 0.9999)).value());
}
exportStatusCounts(exporter);
}
@Override
public synchronized String getSummary() {<FILL_FUNCTION_BODY> |
if (windowOperations == 0) {
return "";
}
String toReturn = String.format("%s count: %d, average latency(us): %.2f",
getName(), windowOperations,
(double) windowTotalLatency / (double) windowOperations);
windowTotalLatency = 0;
windowOperations = 0;
return toReturn;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurementTimeSeries.java | OneMeasurementTimeSeries | checkEndOfUnit | class OneMeasurementTimeSeries extends OneMeasurement {
/**
* Granularity for time series; measurements will be averaged in chunks of this granularity. Units are milliseconds.
*/
public static final String GRANULARITY = "timeseries.granularity";
public static final String GRANULARITY_DEFAULT = "1000";
private final int granularity;
private final Vector<SeriesUnit> measurements;
private long start = -1;
private long currentunit = -1;
private long count = 0;
private long sum = 0;
private long operations = 0;
private long totallatency = 0;
//keep a windowed version of these stats for printing status
private int windowoperations = 0;
private long windowtotallatency = 0;
private int min = -1;
private int max = -1;
public OneMeasurementTimeSeries(String name, Properties props) {
super(name);
granularity = Integer.parseInt(props.getProperty(GRANULARITY, GRANULARITY_DEFAULT));
measurements = new Vector<>();
}
private synchronized void checkEndOfUnit(boolean forceend) {<FILL_FUNCTION_BODY>}
@Override
public void measure(int latency) {
checkEndOfUnit(false);
count++;
sum += latency;
totallatency += latency;
operations++;
windowoperations++;
windowtotallatency += latency;
if (latency > max) {
max = latency;
}
if ((latency < min) || (min < 0)) {
min = latency;
}
}
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
checkEndOfUnit(true);
exporter.write(getName(), "Operations", operations);
exporter.write(getName(), "AverageLatency(us)", (((double) totallatency) / ((double) operations)));
exporter.write(getName(), "MinLatency(us)", min);
exporter.write(getName(), "MaxLatency(us)", max);
// TODO: 95th and 99th percentile latency
exportStatusCounts(exporter);
for (SeriesUnit unit : measurements) {
exporter.write(getName(), Long.toString(unit.time), unit.average);
}
}
@Override
public String getSummary() {
if (windowoperations == 0) {
return "";
}
DecimalFormat d = new DecimalFormat("#.##");
double report = ((double) windowtotallatency) / ((double) windowoperations);
windowtotallatency = 0;
windowoperations = 0;
return "[" + getName() + " AverageLatency(us)=" + d.format(report) + "]";
}
} |
long now = System.currentTimeMillis();
if (start < 0) {
currentunit = 0;
start = now;
}
long unit = ((now - start) / granularity) * granularity;
if ((unit > currentunit) || (forceend)) {
double avg = ((double) sum) / ((double) count);
measurements.add(new SeriesUnit(currentunit, avg));
currentunit = unit;
count = 0;
sum = 0;
}
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurementTimeSeries.java | OneMeasurementTimeSeries | measure | class OneMeasurementTimeSeries extends OneMeasurement {
/**
* Granularity for time series; measurements will be averaged in chunks of this granularity. Units are milliseconds.
*/
public static final String GRANULARITY = "timeseries.granularity";
public static final String GRANULARITY_DEFAULT = "1000";
private final int granularity;
private final Vector<SeriesUnit> measurements;
private long start = -1;
private long currentunit = -1;
private long count = 0;
private long sum = 0;
private long operations = 0;
private long totallatency = 0;
//keep a windowed version of these stats for printing status
private int windowoperations = 0;
private long windowtotallatency = 0;
private int min = -1;
private int max = -1;
public OneMeasurementTimeSeries(String name, Properties props) {
super(name);
granularity = Integer.parseInt(props.getProperty(GRANULARITY, GRANULARITY_DEFAULT));
measurements = new Vector<>();
}
private synchronized void checkEndOfUnit(boolean forceend) {
long now = System.currentTimeMillis();
if (start < 0) {
currentunit = 0;
start = now;
}
long unit = ((now - start) / granularity) * granularity;
if ((unit > currentunit) || (forceend)) {
double avg = ((double) sum) / ((double) count);
measurements.add(new SeriesUnit(currentunit, avg));
currentunit = unit;
count = 0;
sum = 0;
}
}
@Override
public void measure(int latency) {<FILL_FUNCTION_BODY>}
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
checkEndOfUnit(true);
exporter.write(getName(), "Operations", operations);
exporter.write(getName(), "AverageLatency(us)", (((double) totallatency) / ((double) operations)));
exporter.write(getName(), "MinLatency(us)", min);
exporter.write(getName(), "MaxLatency(us)", max);
// TODO: 95th and 99th percentile latency
exportStatusCounts(exporter);
for (SeriesUnit unit : measurements) {
exporter.write(getName(), Long.toString(unit.time), unit.average);
}
}
@Override
public String getSummary() {
if (windowoperations == 0) {
return "";
}
DecimalFormat d = new DecimalFormat("#.##");
double report = ((double) windowtotallatency) / ((double) windowoperations);
windowtotallatency = 0;
windowoperations = 0;
return "[" + getName() + " AverageLatency(us)=" + d.format(report) + "]";
}
} |
checkEndOfUnit(false);
count++;
sum += latency;
totallatency += latency;
operations++;
windowoperations++;
windowtotallatency += latency;
if (latency > max) {
max = latency;
}
if ((latency < min) || (min < 0)) {
min = latency;
}
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurementTimeSeries.java | OneMeasurementTimeSeries | exportMeasurements | class OneMeasurementTimeSeries extends OneMeasurement {
/**
* Granularity for time series; measurements will be averaged in chunks of this granularity. Units are milliseconds.
*/
public static final String GRANULARITY = "timeseries.granularity";
public static final String GRANULARITY_DEFAULT = "1000";
private final int granularity;
private final Vector<SeriesUnit> measurements;
private long start = -1;
private long currentunit = -1;
private long count = 0;
private long sum = 0;
private long operations = 0;
private long totallatency = 0;
//keep a windowed version of these stats for printing status
private int windowoperations = 0;
private long windowtotallatency = 0;
private int min = -1;
private int max = -1;
public OneMeasurementTimeSeries(String name, Properties props) {
super(name);
granularity = Integer.parseInt(props.getProperty(GRANULARITY, GRANULARITY_DEFAULT));
measurements = new Vector<>();
}
private synchronized void checkEndOfUnit(boolean forceend) {
long now = System.currentTimeMillis();
if (start < 0) {
currentunit = 0;
start = now;
}
long unit = ((now - start) / granularity) * granularity;
if ((unit > currentunit) || (forceend)) {
double avg = ((double) sum) / ((double) count);
measurements.add(new SeriesUnit(currentunit, avg));
currentunit = unit;
count = 0;
sum = 0;
}
}
@Override
public void measure(int latency) {
checkEndOfUnit(false);
count++;
sum += latency;
totallatency += latency;
operations++;
windowoperations++;
windowtotallatency += latency;
if (latency > max) {
max = latency;
}
if ((latency < min) || (min < 0)) {
min = latency;
}
}
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {<FILL_FUNCTION_BODY>}
@Override
public String getSummary() {
if (windowoperations == 0) {
return "";
}
DecimalFormat d = new DecimalFormat("#.##");
double report = ((double) windowtotallatency) / ((double) windowoperations);
windowtotallatency = 0;
windowoperations = 0;
return "[" + getName() + " AverageLatency(us)=" + d.format(report) + "]";
}
} |
checkEndOfUnit(true);
exporter.write(getName(), "Operations", operations);
exporter.write(getName(), "AverageLatency(us)", (((double) totallatency) / ((double) operations)));
exporter.write(getName(), "MinLatency(us)", min);
exporter.write(getName(), "MaxLatency(us)", max);
// TODO: 95th and 99th percentile latency
exportStatusCounts(exporter);
for (SeriesUnit unit : measurements) {
exporter.write(getName(), Long.toString(unit.time), unit.average);
}
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/OneMeasurementTimeSeries.java | OneMeasurementTimeSeries | getSummary | class OneMeasurementTimeSeries extends OneMeasurement {
/**
* Granularity for time series; measurements will be averaged in chunks of this granularity. Units are milliseconds.
*/
public static final String GRANULARITY = "timeseries.granularity";
public static final String GRANULARITY_DEFAULT = "1000";
private final int granularity;
private final Vector<SeriesUnit> measurements;
private long start = -1;
private long currentunit = -1;
private long count = 0;
private long sum = 0;
private long operations = 0;
private long totallatency = 0;
//keep a windowed version of these stats for printing status
private int windowoperations = 0;
private long windowtotallatency = 0;
private int min = -1;
private int max = -1;
public OneMeasurementTimeSeries(String name, Properties props) {
super(name);
granularity = Integer.parseInt(props.getProperty(GRANULARITY, GRANULARITY_DEFAULT));
measurements = new Vector<>();
}
private synchronized void checkEndOfUnit(boolean forceend) {
long now = System.currentTimeMillis();
if (start < 0) {
currentunit = 0;
start = now;
}
long unit = ((now - start) / granularity) * granularity;
if ((unit > currentunit) || (forceend)) {
double avg = ((double) sum) / ((double) count);
measurements.add(new SeriesUnit(currentunit, avg));
currentunit = unit;
count = 0;
sum = 0;
}
}
@Override
public void measure(int latency) {
checkEndOfUnit(false);
count++;
sum += latency;
totallatency += latency;
operations++;
windowoperations++;
windowtotallatency += latency;
if (latency > max) {
max = latency;
}
if ((latency < min) || (min < 0)) {
min = latency;
}
}
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
checkEndOfUnit(true);
exporter.write(getName(), "Operations", operations);
exporter.write(getName(), "AverageLatency(us)", (((double) totallatency) / ((double) operations)));
exporter.write(getName(), "MinLatency(us)", min);
exporter.write(getName(), "MaxLatency(us)", max);
// TODO: 95th and 99th percentile latency
exportStatusCounts(exporter);
for (SeriesUnit unit : measurements) {
exporter.write(getName(), Long.toString(unit.time), unit.average);
}
}
@Override
public String getSummary() {<FILL_FUNCTION_BODY>}
} |
if (windowoperations == 0) {
return "";
}
DecimalFormat d = new DecimalFormat("#.##");
double report = ((double) windowtotallatency) / ((double) windowoperations);
windowtotallatency = 0;
windowoperations = 0;
return "[" + getName() + " AverageLatency(us)=" + d.format(report) + "]";
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/TwoInOneMeasurement.java | TwoInOneMeasurement | reportStatus | class TwoInOneMeasurement extends OneMeasurement {
private final OneMeasurement thing1, thing2;
public TwoInOneMeasurement(String name, OneMeasurement thing1, OneMeasurement thing2) {
super(name);
this.thing1 = thing1;
this.thing2 = thing2;
}
/**
* No need for synchronization, using CHM to deal with that.
*/
@Override
public void reportStatus(final Status status) {<FILL_FUNCTION_BODY>}
/**
* It appears latency is reported in micros.
* Using {@link org.HdrHistogram.Recorder} to support concurrent updates to histogram.
*/
@Override
public void measure(int latencyInMicros) {
thing1.measure(latencyInMicros);
thing2.measure(latencyInMicros);
}
/**
* This is called from a main thread, on orderly termination.
*/
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
thing1.exportMeasurements(exporter);
thing2.exportMeasurements(exporter);
}
/**
* This is called periodically from the StatusThread. There's a single StatusThread per Client process.
* We optionally serialize the interval to log on this opportunity.
*
* @see site.ycsb.measurements.OneMeasurement#getSummary()
*/
@Override
public String getSummary() {
return thing1.getSummary() + "\n" + thing2.getSummary();
}
} |
thing1.reportStatus(status);
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/TwoInOneMeasurement.java | TwoInOneMeasurement | measure | class TwoInOneMeasurement extends OneMeasurement {
private final OneMeasurement thing1, thing2;
public TwoInOneMeasurement(String name, OneMeasurement thing1, OneMeasurement thing2) {
super(name);
this.thing1 = thing1;
this.thing2 = thing2;
}
/**
* No need for synchronization, using CHM to deal with that.
*/
@Override
public void reportStatus(final Status status) {
thing1.reportStatus(status);
}
/**
* It appears latency is reported in micros.
* Using {@link org.HdrHistogram.Recorder} to support concurrent updates to histogram.
*/
@Override
public void measure(int latencyInMicros) {<FILL_FUNCTION_BODY>}
/**
* This is called from a main thread, on orderly termination.
*/
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
thing1.exportMeasurements(exporter);
thing2.exportMeasurements(exporter);
}
/**
* This is called periodically from the StatusThread. There's a single StatusThread per Client process.
* We optionally serialize the interval to log on this opportunity.
*
* @see site.ycsb.measurements.OneMeasurement#getSummary()
*/
@Override
public String getSummary() {
return thing1.getSummary() + "\n" + thing2.getSummary();
}
} |
thing1.measure(latencyInMicros);
thing2.measure(latencyInMicros);
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/TwoInOneMeasurement.java | TwoInOneMeasurement | exportMeasurements | class TwoInOneMeasurement extends OneMeasurement {
private final OneMeasurement thing1, thing2;
public TwoInOneMeasurement(String name, OneMeasurement thing1, OneMeasurement thing2) {
super(name);
this.thing1 = thing1;
this.thing2 = thing2;
}
/**
* No need for synchronization, using CHM to deal with that.
*/
@Override
public void reportStatus(final Status status) {
thing1.reportStatus(status);
}
/**
* It appears latency is reported in micros.
* Using {@link org.HdrHistogram.Recorder} to support concurrent updates to histogram.
*/
@Override
public void measure(int latencyInMicros) {
thing1.measure(latencyInMicros);
thing2.measure(latencyInMicros);
}
/**
* This is called from a main thread, on orderly termination.
*/
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {<FILL_FUNCTION_BODY>}
/**
* This is called periodically from the StatusThread. There's a single StatusThread per Client process.
* We optionally serialize the interval to log on this opportunity.
*
* @see site.ycsb.measurements.OneMeasurement#getSummary()
*/
@Override
public String getSummary() {
return thing1.getSummary() + "\n" + thing2.getSummary();
}
} |
thing1.exportMeasurements(exporter);
thing2.exportMeasurements(exporter);
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/TwoInOneMeasurement.java | TwoInOneMeasurement | getSummary | class TwoInOneMeasurement extends OneMeasurement {
private final OneMeasurement thing1, thing2;
public TwoInOneMeasurement(String name, OneMeasurement thing1, OneMeasurement thing2) {
super(name);
this.thing1 = thing1;
this.thing2 = thing2;
}
/**
* No need for synchronization, using CHM to deal with that.
*/
@Override
public void reportStatus(final Status status) {
thing1.reportStatus(status);
}
/**
* It appears latency is reported in micros.
* Using {@link org.HdrHistogram.Recorder} to support concurrent updates to histogram.
*/
@Override
public void measure(int latencyInMicros) {
thing1.measure(latencyInMicros);
thing2.measure(latencyInMicros);
}
/**
* This is called from a main thread, on orderly termination.
*/
@Override
public void exportMeasurements(MeasurementsExporter exporter) throws IOException {
thing1.exportMeasurements(exporter);
thing2.exportMeasurements(exporter);
}
/**
* This is called periodically from the StatusThread. There's a single StatusThread per Client process.
* We optionally serialize the interval to log on this opportunity.
*
* @see site.ycsb.measurements.OneMeasurement#getSummary()
*/
@Override
public String getSummary() {<FILL_FUNCTION_BODY>}
} |
return thing1.getSummary() + "\n" + thing2.getSummary();
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/exporter/JSONArrayMeasurementsExporter.java | JSONArrayMeasurementsExporter | write | class JSONArrayMeasurementsExporter implements MeasurementsExporter {
private final JsonFactory factory = new JsonFactory();
private JsonGenerator g;
public JSONArrayMeasurementsExporter(OutputStream os) throws IOException {
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(os));
g = factory.createJsonGenerator(bw);
g.setPrettyPrinter(new DefaultPrettyPrinter());
g.writeStartArray();
}
public void write(String metric, String measurement, int i) throws IOException {<FILL_FUNCTION_BODY>}
public void write(String metric, String measurement, long i) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
}
public void write(String metric, String measurement, double d) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", d);
g.writeEndObject();
}
public void close() throws IOException {
if (g != null) {
g.writeEndArray();
g.close();
}
}
} |
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/exporter/JSONArrayMeasurementsExporter.java | JSONArrayMeasurementsExporter | write | class JSONArrayMeasurementsExporter implements MeasurementsExporter {
private final JsonFactory factory = new JsonFactory();
private JsonGenerator g;
public JSONArrayMeasurementsExporter(OutputStream os) throws IOException {
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(os));
g = factory.createJsonGenerator(bw);
g.setPrettyPrinter(new DefaultPrettyPrinter());
g.writeStartArray();
}
public void write(String metric, String measurement, int i) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
}
public void write(String metric, String measurement, long i) throws IOException {<FILL_FUNCTION_BODY>}
public void write(String metric, String measurement, double d) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", d);
g.writeEndObject();
}
public void close() throws IOException {
if (g != null) {
g.writeEndArray();
g.close();
}
}
} |
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/exporter/JSONArrayMeasurementsExporter.java | JSONArrayMeasurementsExporter | write | class JSONArrayMeasurementsExporter implements MeasurementsExporter {
private final JsonFactory factory = new JsonFactory();
private JsonGenerator g;
public JSONArrayMeasurementsExporter(OutputStream os) throws IOException {
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(os));
g = factory.createJsonGenerator(bw);
g.setPrettyPrinter(new DefaultPrettyPrinter());
g.writeStartArray();
}
public void write(String metric, String measurement, int i) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
}
public void write(String metric, String measurement, long i) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
}
public void write(String metric, String measurement, double d) throws IOException {<FILL_FUNCTION_BODY>}
public void close() throws IOException {
if (g != null) {
g.writeEndArray();
g.close();
}
}
} |
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", d);
g.writeEndObject();
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/exporter/JSONArrayMeasurementsExporter.java | JSONArrayMeasurementsExporter | close | class JSONArrayMeasurementsExporter implements MeasurementsExporter {
private final JsonFactory factory = new JsonFactory();
private JsonGenerator g;
public JSONArrayMeasurementsExporter(OutputStream os) throws IOException {
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(os));
g = factory.createJsonGenerator(bw);
g.setPrettyPrinter(new DefaultPrettyPrinter());
g.writeStartArray();
}
public void write(String metric, String measurement, int i) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
}
public void write(String metric, String measurement, long i) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
}
public void write(String metric, String measurement, double d) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", d);
g.writeEndObject();
}
public void close() throws IOException {<FILL_FUNCTION_BODY>}
} |
if (g != null) {
g.writeEndArray();
g.close();
}
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/exporter/JSONMeasurementsExporter.java | JSONMeasurementsExporter | write | class JSONMeasurementsExporter implements MeasurementsExporter {
private final JsonFactory factory = new JsonFactory();
private JsonGenerator g;
public JSONMeasurementsExporter(OutputStream os) throws IOException {
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(os));
g = factory.createJsonGenerator(bw);
g.setPrettyPrinter(new DefaultPrettyPrinter());
}
public void write(String metric, String measurement, int i) throws IOException {<FILL_FUNCTION_BODY>}
public void write(String metric, String measurement, long i) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
}
public void write(String metric, String measurement, double d) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", d);
g.writeEndObject();
}
public void close() throws IOException {
if (g != null) {
g.close();
}
}
} |
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/exporter/JSONMeasurementsExporter.java | JSONMeasurementsExporter | write | class JSONMeasurementsExporter implements MeasurementsExporter {
private final JsonFactory factory = new JsonFactory();
private JsonGenerator g;
public JSONMeasurementsExporter(OutputStream os) throws IOException {
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(os));
g = factory.createJsonGenerator(bw);
g.setPrettyPrinter(new DefaultPrettyPrinter());
}
public void write(String metric, String measurement, int i) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
}
public void write(String metric, String measurement, long i) throws IOException {<FILL_FUNCTION_BODY>}
public void write(String metric, String measurement, double d) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", d);
g.writeEndObject();
}
public void close() throws IOException {
if (g != null) {
g.close();
}
}
} |
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/exporter/JSONMeasurementsExporter.java | JSONMeasurementsExporter | write | class JSONMeasurementsExporter implements MeasurementsExporter {
private final JsonFactory factory = new JsonFactory();
private JsonGenerator g;
public JSONMeasurementsExporter(OutputStream os) throws IOException {
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(os));
g = factory.createJsonGenerator(bw);
g.setPrettyPrinter(new DefaultPrettyPrinter());
}
public void write(String metric, String measurement, int i) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
}
public void write(String metric, String measurement, long i) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
}
public void write(String metric, String measurement, double d) throws IOException {<FILL_FUNCTION_BODY>}
public void close() throws IOException {
if (g != null) {
g.close();
}
}
} |
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", d);
g.writeEndObject();
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/exporter/JSONMeasurementsExporter.java | JSONMeasurementsExporter | close | class JSONMeasurementsExporter implements MeasurementsExporter {
private final JsonFactory factory = new JsonFactory();
private JsonGenerator g;
public JSONMeasurementsExporter(OutputStream os) throws IOException {
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(os));
g = factory.createJsonGenerator(bw);
g.setPrettyPrinter(new DefaultPrettyPrinter());
}
public void write(String metric, String measurement, int i) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
}
public void write(String metric, String measurement, long i) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", i);
g.writeEndObject();
}
public void write(String metric, String measurement, double d) throws IOException {
g.writeStartObject();
g.writeStringField("metric", metric);
g.writeStringField("measurement", measurement);
g.writeNumberField("value", d);
g.writeEndObject();
}
public void close() throws IOException {<FILL_FUNCTION_BODY>}
} |
if (g != null) {
g.close();
}
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/exporter/MeasurementsExporter.java | null | null | null | null |
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/exporter/TextMeasurementsExporter.java | TextMeasurementsExporter | write | class TextMeasurementsExporter implements MeasurementsExporter {
private final BufferedWriter bw;
public TextMeasurementsExporter(OutputStream os) {
this.bw = new BufferedWriter(new OutputStreamWriter(os));
}
public void write(String metric, String measurement, int i) throws IOException {<FILL_FUNCTION_BODY>}
public void write(String metric, String measurement, long i) throws IOException {
bw.write("[" + metric + "], " + measurement + ", " + i);
bw.newLine();
}
public void write(String metric, String measurement, double d) throws IOException {
bw.write("[" + metric + "], " + measurement + ", " + d);
bw.newLine();
}
public void close() throws IOException {
this.bw.close();
}
} |
bw.write("[" + metric + "], " + measurement + ", " + i);
bw.newLine();
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/exporter/TextMeasurementsExporter.java | TextMeasurementsExporter | write | class TextMeasurementsExporter implements MeasurementsExporter {
private final BufferedWriter bw;
public TextMeasurementsExporter(OutputStream os) {
this.bw = new BufferedWriter(new OutputStreamWriter(os));
}
public void write(String metric, String measurement, int i) throws IOException {
bw.write("[" + metric + "], " + measurement + ", " + i);
bw.newLine();
}
public void write(String metric, String measurement, long i) throws IOException {<FILL_FUNCTION_BODY>}
public void write(String metric, String measurement, double d) throws IOException {
bw.write("[" + metric + "], " + measurement + ", " + d);
bw.newLine();
}
public void close() throws IOException {
this.bw.close();
}
} |
bw.write("[" + metric + "], " + measurement + ", " + i);
bw.newLine();
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/exporter/TextMeasurementsExporter.java | TextMeasurementsExporter | write | class TextMeasurementsExporter implements MeasurementsExporter {
private final BufferedWriter bw;
public TextMeasurementsExporter(OutputStream os) {
this.bw = new BufferedWriter(new OutputStreamWriter(os));
}
public void write(String metric, String measurement, int i) throws IOException {
bw.write("[" + metric + "], " + measurement + ", " + i);
bw.newLine();
}
public void write(String metric, String measurement, long i) throws IOException {
bw.write("[" + metric + "], " + measurement + ", " + i);
bw.newLine();
}
public void write(String metric, String measurement, double d) throws IOException {<FILL_FUNCTION_BODY>}
public void close() throws IOException {
this.bw.close();
}
} |
bw.write("[" + metric + "], " + measurement + ", " + d);
bw.newLine();
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/measurements/exporter/TextMeasurementsExporter.java | TextMeasurementsExporter | close | class TextMeasurementsExporter implements MeasurementsExporter {
private final BufferedWriter bw;
public TextMeasurementsExporter(OutputStream os) {
this.bw = new BufferedWriter(new OutputStreamWriter(os));
}
public void write(String metric, String measurement, int i) throws IOException {
bw.write("[" + metric + "], " + measurement + ", " + i);
bw.newLine();
}
public void write(String metric, String measurement, long i) throws IOException {
bw.write("[" + metric + "], " + measurement + ", " + i);
bw.newLine();
}
public void write(String metric, String measurement, double d) throws IOException {
bw.write("[" + metric + "], " + measurement + ", " + d);
bw.newLine();
}
public void close() throws IOException {<FILL_FUNCTION_BODY>}
} |
this.bw.close();
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/ConstantOccupancyWorkload.java | ConstantOccupancyWorkload | init | class ConstantOccupancyWorkload extends CoreWorkload {
private long disksize;
private long storageages;
private double occupancy;
private long objectCount;
public static final String STORAGE_AGE_PROPERTY = "storageages";
public static final long STORAGE_AGE_PROPERTY_DEFAULT = 10;
public static final String DISK_SIZE_PROPERTY = "disksize";
public static final long DISK_SIZE_PROPERTY_DEFAULT = 100 * 1000 * 1000;
public static final String OCCUPANCY_PROPERTY = "occupancy";
public static final double OCCUPANCY_PROPERTY_DEFAULT = 0.9;
@Override
public void init(Properties p) throws WorkloadException {<FILL_FUNCTION_BODY>}
} |
disksize = Long.parseLong(p.getProperty(DISK_SIZE_PROPERTY, String.valueOf(DISK_SIZE_PROPERTY_DEFAULT)));
storageages = Long.parseLong(p.getProperty(STORAGE_AGE_PROPERTY, String.valueOf(STORAGE_AGE_PROPERTY_DEFAULT)));
occupancy = Double.parseDouble(p.getProperty(OCCUPANCY_PROPERTY, String.valueOf(OCCUPANCY_PROPERTY_DEFAULT)));
if (p.getProperty(Client.RECORD_COUNT_PROPERTY) != null ||
p.getProperty(Client.INSERT_COUNT_PROPERTY) != null ||
p.getProperty(Client.OPERATION_COUNT_PROPERTY) != null) {
System.err.println("Warning: record, insert or operation count was set prior to initting " +
"ConstantOccupancyWorkload. Overriding old values.");
}
NumberGenerator g = CoreWorkload.getFieldLengthGenerator(p);
double fieldsize = g.mean();
int fieldcount = Integer.parseInt(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT));
objectCount = (long) (occupancy * (disksize / (fieldsize * fieldcount)));
if (objectCount == 0) {
throw new IllegalStateException("Object count was zero. Perhaps disksize is too low?");
}
p.setProperty(Client.RECORD_COUNT_PROPERTY, String.valueOf(objectCount));
p.setProperty(Client.OPERATION_COUNT_PROPERTY, String.valueOf(storageages * objectCount));
p.setProperty(Client.INSERT_COUNT_PROPERTY, String.valueOf(objectCount));
super.init(p);
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/CoreWorkload.java | CoreWorkload | buildKeyName | class CoreWorkload extends Workload {
/**
* The name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY = "table";
/**
* The default name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY_DEFAULT = "usertable";
protected String table;
/**
* The name of the property for the number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY = "fieldcount";
/**
* Default number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY_DEFAULT = "10";
private List<String> fieldnames;
/**
* The name of the property for the field length distribution. Options are "uniform", "zipfian"
* (favouring short records), "constant", and "histogram".
* <p>
* If "uniform", "zipfian" or "constant", the maximum field length will be that specified by the
* fieldlength property. If "histogram", then the histogram will be read from the filename
* specified in the "fieldlengthhistogram" property.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY = "fieldlengthdistribution";
/**
* The default field length distribution.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "constant";
/**
* The name of the property for the length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY = "fieldlength";
/**
* The default maximum length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY_DEFAULT = "100";
/**
* The name of the property for the minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY = "minfieldlength";
/**
* The default minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of a property that specifies the filename containing the field length histogram (only
* used if fieldlengthdistribution is "histogram").
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY = "fieldlengthhistogram";
/**
* The default filename containing a field length histogram.
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT = "hist.txt";
/**
* Generator object that produces field lengths. The value of this depends on the properties that
* start with "FIELD_LENGTH_".
*/
protected NumberGenerator fieldlengthgenerator;
/**
* The name of the property for deciding whether to read one field (false) or all fields (true) of
* a record.
*/
public static final String READ_ALL_FIELDS_PROPERTY = "readallfields";
/**
* The default value for the readallfields property.
*/
public static final String READ_ALL_FIELDS_PROPERTY_DEFAULT = "true";
protected boolean readallfields;
/**
* The name of the property for determining how to read all the fields when readallfields is true.
* If set to true, all the field names will be passed into the underlying client. If set to false,
* null will be passed into the underlying client. When passed a null, some clients may retrieve
* the entire row with a wildcard, which may be slower than naming all the fields.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY = "readallfieldsbyname";
/**
* The default value for the readallfieldsbyname property.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT = "false";
protected boolean readallfieldsbyname;
/**
* The name of the property for deciding whether to write one field (false) or all fields (true)
* of a record.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY = "writeallfields";
/**
* The default value for the writeallfields property.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY_DEFAULT = "false";
protected boolean writeallfields;
/**
* The name of the property for deciding whether to check all returned
* data against the formation template to ensure data integrity.
*/
public static final String DATA_INTEGRITY_PROPERTY = "dataintegrity";
/**
* The default value for the dataintegrity property.
*/
public static final String DATA_INTEGRITY_PROPERTY_DEFAULT = "false";
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
private boolean dataintegrity;
/**
* The name of the property for the proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY = "readproportion";
/**
* The default proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY_DEFAULT = "0.95";
/**
* The name of the property for the proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY = "updateproportion";
/**
* The default proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY_DEFAULT = "0.05";
/**
* The name of the property for the proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY = "insertproportion";
/**
* The default proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY = "scanproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are read-modify-write.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY = "readmodifywriteproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the the distribution of requests across the keyspace. Options are
* "uniform", "zipfian" and "latest"
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY = "requestdistribution";
/**
* The default distribution of requests across the keyspace.
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for adding zero padding to record numbers in order to match
* string sort order. Controls the number of 0s to left pad with.
*/
public static final String ZERO_PADDING_PROPERTY = "zeropadding";
/**
* The default zero padding value. Matches integer sort order
*/
public static final String ZERO_PADDING_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the min scan length (number of records).
*/
public static final String MIN_SCAN_LENGTH_PROPERTY = "minscanlength";
/**
* The default min scan length.
*/
public static final String MIN_SCAN_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the max scan length (number of records).
*/
public static final String MAX_SCAN_LENGTH_PROPERTY = "maxscanlength";
/**
* The default max scan length.
*/
public static final String MAX_SCAN_LENGTH_PROPERTY_DEFAULT = "1000";
/**
* The name of the property for the scan length distribution. Options are "uniform" and "zipfian"
* (favoring short scans)
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY = "scanlengthdistribution";
/**
* The default max scan length.
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for the order to insert records. Options are "ordered" or "hashed"
*/
public static final String INSERT_ORDER_PROPERTY = "insertorder";
/**
* Default insert order.
*/
public static final String INSERT_ORDER_PROPERTY_DEFAULT = "hashed";
/**
* Percentage data items that constitute the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION = "hotspotdatafraction";
/**
* Default value of the size of the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION_DEFAULT = "0.2";
/**
* Percentage operations that access the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION = "hotspotopnfraction";
/**
* Default value of the percentage operations accessing the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION_DEFAULT = "0.8";
/**
* How many times to retry when insertion of a single item to a DB fails.
*/
public static final String INSERTION_RETRY_LIMIT = "core_workload_insertion_retry_limit";
public static final String INSERTION_RETRY_LIMIT_DEFAULT = "0";
/**
* On average, how long to wait between the retries, in seconds.
*/
public static final String INSERTION_RETRY_INTERVAL = "core_workload_insertion_retry_interval";
public static final String INSERTION_RETRY_INTERVAL_DEFAULT = "3";
/**
* Field name prefix.
*/
public static final String FIELD_NAME_PREFIX = "fieldnameprefix";
/**
* Default value of the field name prefix.
*/
public static final String FIELD_NAME_PREFIX_DEFAULT = "field";
protected NumberGenerator keysequence;
protected DiscreteGenerator operationchooser;
protected NumberGenerator keychooser;
protected NumberGenerator fieldchooser;
protected AcknowledgedCounterGenerator transactioninsertkeysequence;
protected NumberGenerator scanlength;
protected boolean orderedinserts;
protected long fieldcount;
protected long recordcount;
protected int zeropadding;
protected int insertionRetryLimit;
protected int insertionRetryInterval;
private Measurements measurements = Measurements.getMeasurements();
public static String buildKeyName(long keynum, int zeropadding, boolean orderedinserts) {<FILL_FUNCTION_BODY>}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
NumberGenerator fieldlengthgenerator;
String fieldlengthdistribution = p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY, FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
int fieldlength =
Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
int minfieldlength =
Integer.parseInt(p.getProperty(MIN_FIELD_LENGTH_PROPERTY, MIN_FIELD_LENGTH_PROPERTY_DEFAULT));
String fieldlengthhistogram = p.getProperty(
FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY, FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT);
if (fieldlengthdistribution.compareTo("constant") == 0) {
fieldlengthgenerator = new ConstantIntegerGenerator(fieldlength);
} else if (fieldlengthdistribution.compareTo("uniform") == 0) {
fieldlengthgenerator = new UniformLongGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("zipfian") == 0) {
fieldlengthgenerator = new ZipfianGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("histogram") == 0) {
try {
fieldlengthgenerator = new HistogramGenerator(fieldlengthhistogram);
} catch (IOException e) {
throw new WorkloadException(
"Couldn't read field length histogram file: " + fieldlengthhistogram, e);
}
} else {
throw new WorkloadException(
"Unknown field length distribution \"" + fieldlengthdistribution + "\"");
}
return fieldlengthgenerator;
}
/**
* Initialize the scenario.
* Called once, in the main client thread, before any operations are started.
*/
@Override
public void init(Properties p) throws WorkloadException {
table = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
fieldcount =
Long.parseLong(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT));
final String fieldnameprefix = p.getProperty(FIELD_NAME_PREFIX, FIELD_NAME_PREFIX_DEFAULT);
fieldnames = new ArrayList<>();
for (int i = 0; i < fieldcount; i++) {
fieldnames.add(fieldnameprefix + i);
}
fieldlengthgenerator = CoreWorkload.getFieldLengthGenerator(p);
recordcount =
Long.parseLong(p.getProperty(Client.RECORD_COUNT_PROPERTY, Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
String requestdistrib =
p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
int minscanlength =
Integer.parseInt(p.getProperty(MIN_SCAN_LENGTH_PROPERTY, MIN_SCAN_LENGTH_PROPERTY_DEFAULT));
int maxscanlength =
Integer.parseInt(p.getProperty(MAX_SCAN_LENGTH_PROPERTY, MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(SCAN_LENGTH_DISTRIBUTION_PROPERTY, SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
long insertstart =
Long.parseLong(p.getProperty(INSERT_START_PROPERTY, INSERT_START_PROPERTY_DEFAULT));
long insertcount=
Integer.parseInt(p.getProperty(INSERT_COUNT_PROPERTY, String.valueOf(recordcount - insertstart)));
// Confirm valid values for insertstart and insertcount in relation to recordcount
if (recordcount < (insertstart + insertcount)) {
System.err.println("Invalid combination of insertstart, insertcount and recordcount.");
System.err.println("recordcount must be bigger than insertstart + insertcount.");
System.exit(-1);
}
zeropadding =
Integer.parseInt(p.getProperty(ZERO_PADDING_PROPERTY, ZERO_PADDING_PROPERTY_DEFAULT));
readallfields = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_PROPERTY, READ_ALL_FIELDS_PROPERTY_DEFAULT));
readallfieldsbyname = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_BY_NAME_PROPERTY, READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT));
writeallfields = Boolean.parseBoolean(
p.getProperty(WRITE_ALL_FIELDS_PROPERTY, WRITE_ALL_FIELDS_PROPERTY_DEFAULT));
dataintegrity = Boolean.parseBoolean(
p.getProperty(DATA_INTEGRITY_PROPERTY, DATA_INTEGRITY_PROPERTY_DEFAULT));
// Confirm that fieldlengthgenerator returns a constant if data
// integrity check requested.
if (dataintegrity && !(p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT)).equals("constant")) {
System.err.println("Must have constant field size to check data integrity.");
System.exit(-1);
}
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
if (p.getProperty(INSERT_ORDER_PROPERTY, INSERT_ORDER_PROPERTY_DEFAULT).compareTo("hashed") == 0) {
orderedinserts = false;
} else {
orderedinserts = true;
}
keysequence = new CounterGenerator(insertstart);
operationchooser = createOperationGenerator(p);
transactioninsertkeysequence = new AcknowledgedCounterGenerator(recordcount);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("exponential") == 0) {
double percentile = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordcount * frac);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
// it does this by generating a random "next key" in part by taking the modulus over the
// number of keys.
// If the number of keys changes, this would shift the modulus, and we don't want that to
// change which keys are popular so we'll actually construct the scrambled zipfian generator
// with a keyspace that is larger than exists at the beginning of the test. that is, we'll predict
// the number of inserts, and tell the scrambled zipfian generator the number of existing keys
// plus the number of predicted keys as the total keyspace. then, if the generator picks a key
// that hasn't been inserted yet, will just ignore it and pick another key. this way, the size of
// the keyspace doesn't change from the perspective of the scrambled zipfian generator
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
int opcount = Integer.parseInt(p.getProperty(Client.OPERATION_COUNT_PROPERTY));
int expectednewkeys = (int) ((opcount) * insertproportion * 2.0); // 2 is fudge factor
keychooser = new ScrambledZipfianGenerator(insertstart, insertstart + insertcount + expectednewkeys);
} else if (requestdistrib.compareTo("latest") == 0) {
keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(insertstart, insertstart + insertcount - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
fieldchooser = new UniformLongGenerator(0, fieldcount - 1);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(minscanlength, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(minscanlength, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
insertionRetryLimit = Integer.parseInt(p.getProperty(
INSERTION_RETRY_LIMIT, INSERTION_RETRY_LIMIT_DEFAULT));
insertionRetryInterval = Integer.parseInt(p.getProperty(
INSERTION_RETRY_INTERVAL, INSERTION_RETRY_INTERVAL_DEFAULT));
}
/**
* Builds a value for a randomly chosen field.
*/
private HashMap<String, ByteIterator> buildSingleValue(String key) {
HashMap<String, ByteIterator> value = new HashMap<>();
String fieldkey = fieldnames.get(fieldchooser.nextValue().intValue());
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
value.put(fieldkey, data);
return value;
}
/**
* Builds values for all fields.
*/
private HashMap<String, ByteIterator> buildValues(String key) {
HashMap<String, ByteIterator> values = new HashMap<>();
for (String fieldkey : fieldnames) {
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
values.put(fieldkey, data);
}
return values;
}
/**
* Build a deterministic value given the key information.
*/
private String buildDeterministicValue(String key, String fieldkey) {
int size = fieldlengthgenerator.nextValue().intValue();
StringBuilder sb = new StringBuilder(size);
sb.append(key);
sb.append(':');
sb.append(fieldkey);
while (sb.length() < size) {
sb.append(':');
sb.append(sb.toString().hashCode());
}
sb.setLength(size);
return sb.toString();
}
/**
* Do one insert operation. Because it will be called concurrently from multiple client threads,
* this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
int keynum = keysequence.nextValue().intValue();
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
Status status;
int numOfRetries = 0;
do {
status = db.insert(table, dbkey, values);
if (null != status && status.isOk()) {
break;
}
// Retry if configured. Without retrying, the load process will fail
// even if one single insertion fails. User can optionally configure
// an insertion retry limit (default is 0) to enable retry.
if (++numOfRetries <= insertionRetryLimit) {
System.err.println("Retrying insertion, retry count: " + numOfRetries);
try {
// Sleep for a random number between [0.8, 1.2)*insertionRetryInterval.
int sleepTime = (int) (1000 * insertionRetryInterval * (0.8 + 0.4 * Math.random()));
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
break;
}
} else {
System.err.println("Error inserting, not retrying any more. number of attempts: " + numOfRetries +
"Insertion Retry Limit: " + insertionRetryLimit);
break;
}
} while (true);
return null != status && status.isOk();
}
/**
* Do one transaction operation. Because it will be called concurrently from multiple client
* threads, this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if(operation == null) {
return false;
}
switch (operation) {
case "READ":
doTransactionRead(db);
break;
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "SCAN":
doTransactionScan(db);
break;
default:
doTransactionReadModifyWrite(db);
}
return true;
}
/**
* Results are reported in the first three buckets of the histogram under
* the label "VERIFY".
* Bucket 0 means the expected data was returned.
* Bucket 1 means incorrect data was returned.
* Bucket 2 means null data was returned when some data was expected.
*/
protected void verifyRow(String key, HashMap<String, ByteIterator> cells) {
Status verifyStatus = Status.OK;
long startTime = System.nanoTime();
if (!cells.isEmpty()) {
for (Map.Entry<String, ByteIterator> entry : cells.entrySet()) {
if (!entry.getValue().toString().equals(buildDeterministicValue(key, entry.getKey()))) {
verifyStatus = Status.UNEXPECTED_STATE;
break;
}
}
} else {
// This assumes that null data is never valid
verifyStatus = Status.ERROR;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
}
long nextKeynum() {
long keynum;
if (keychooser instanceof ExponentialGenerator) {
do {
keynum = transactioninsertkeysequence.lastValue() - keychooser.nextValue().intValue();
} while (keynum < 0);
} else {
do {
keynum = keychooser.nextValue().intValue();
} while (keynum > transactioninsertkeysequence.lastValue());
}
return keynum;
}
public void doTransactionRead(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
} else if (dataintegrity || readallfieldsbyname) {
// pass the full field list if dataintegrity is on for verification
fields = new HashSet<String>(fieldnames);
}
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
db.read(table, keyname, fields, cells);
if (dataintegrity) {
verifyRow(keyname, cells);
}
}
public void doTransactionReadModifyWrite(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
// do the transaction
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
db.read(table, keyname, fields, cells);
db.update(table, keyname, values);
long en = System.nanoTime();
if (dataintegrity) {
verifyRow(keyname, cells);
}
measurements.measure("READ-MODIFY-WRITE", (int) ((en - st) / 1000));
measurements.measureIntended("READ-MODIFY-WRITE", (int) ((en - ist) / 1000));
}
public void doTransactionScan(DB db) {
// choose a random key
long keynum = nextKeynum();
String startkeyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
// choose a random scan length
int len = scanlength.nextValue().intValue();
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
db.scan(table, startkeyname, len, fields, new Vector<HashMap<String, ByteIterator>>());
}
public void doTransactionUpdate(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
db.update(table, keyname, values);
}
public void doTransactionInsert(DB db) {
// choose the next key
long keynum = transactioninsertkeysequence.nextValue();
try {
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
db.insert(table, dbkey, values);
} finally {
transactioninsertkeysequence.acknowledge(keynum);
}
}
/**
* Creates a weighted discrete values with database operations for a workload to perform.
* Weights/proportions are read from the properties list and defaults are used
* when values are not configured.
* Current operations are "READ", "UPDATE", "INSERT", "SCAN" and "READMODIFYWRITE".
*
* @param p The properties list to pull weights from.
* @return A generator that can be used to determine the next operation to perform.
* @throws IllegalArgumentException if the properties object was null.
*/
protected static DiscreteGenerator createOperationGenerator(final Properties p) {
if (p == null) {
throw new IllegalArgumentException("Properties object cannot be null");
}
final double readproportion = Double.parseDouble(
p.getProperty(READ_PROPORTION_PROPERTY, READ_PROPORTION_PROPERTY_DEFAULT));
final double updateproportion = Double.parseDouble(
p.getProperty(UPDATE_PROPORTION_PROPERTY, UPDATE_PROPORTION_PROPERTY_DEFAULT));
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
final double scanproportion = Double.parseDouble(
p.getProperty(SCAN_PROPORTION_PROPERTY, SCAN_PROPORTION_PROPERTY_DEFAULT));
final double readmodifywriteproportion = Double.parseDouble(p.getProperty(
READMODIFYWRITE_PROPORTION_PROPERTY, READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT));
final DiscreteGenerator operationchooser = new DiscreteGenerator();
if (readproportion > 0) {
operationchooser.addValue(readproportion, "READ");
}
if (updateproportion > 0) {
operationchooser.addValue(updateproportion, "UPDATE");
}
if (insertproportion > 0) {
operationchooser.addValue(insertproportion, "INSERT");
}
if (scanproportion > 0) {
operationchooser.addValue(scanproportion, "SCAN");
}
if (readmodifywriteproportion > 0) {
operationchooser.addValue(readmodifywriteproportion, "READMODIFYWRITE");
}
return operationchooser;
}
} |
if (!orderedinserts) {
keynum = Utils.hash(keynum);
}
String value = Long.toString(keynum);
int fill = zeropadding - value.length();
String prekey = "user";
for (int i = 0; i < fill; i++) {
prekey += '0';
}
return prekey + value;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/CoreWorkload.java | CoreWorkload | getFieldLengthGenerator | class CoreWorkload extends Workload {
/**
* The name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY = "table";
/**
* The default name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY_DEFAULT = "usertable";
protected String table;
/**
* The name of the property for the number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY = "fieldcount";
/**
* Default number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY_DEFAULT = "10";
private List<String> fieldnames;
/**
* The name of the property for the field length distribution. Options are "uniform", "zipfian"
* (favouring short records), "constant", and "histogram".
* <p>
* If "uniform", "zipfian" or "constant", the maximum field length will be that specified by the
* fieldlength property. If "histogram", then the histogram will be read from the filename
* specified in the "fieldlengthhistogram" property.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY = "fieldlengthdistribution";
/**
* The default field length distribution.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "constant";
/**
* The name of the property for the length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY = "fieldlength";
/**
* The default maximum length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY_DEFAULT = "100";
/**
* The name of the property for the minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY = "minfieldlength";
/**
* The default minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of a property that specifies the filename containing the field length histogram (only
* used if fieldlengthdistribution is "histogram").
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY = "fieldlengthhistogram";
/**
* The default filename containing a field length histogram.
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT = "hist.txt";
/**
* Generator object that produces field lengths. The value of this depends on the properties that
* start with "FIELD_LENGTH_".
*/
protected NumberGenerator fieldlengthgenerator;
/**
* The name of the property for deciding whether to read one field (false) or all fields (true) of
* a record.
*/
public static final String READ_ALL_FIELDS_PROPERTY = "readallfields";
/**
* The default value for the readallfields property.
*/
public static final String READ_ALL_FIELDS_PROPERTY_DEFAULT = "true";
protected boolean readallfields;
/**
* The name of the property for determining how to read all the fields when readallfields is true.
* If set to true, all the field names will be passed into the underlying client. If set to false,
* null will be passed into the underlying client. When passed a null, some clients may retrieve
* the entire row with a wildcard, which may be slower than naming all the fields.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY = "readallfieldsbyname";
/**
* The default value for the readallfieldsbyname property.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT = "false";
protected boolean readallfieldsbyname;
/**
* The name of the property for deciding whether to write one field (false) or all fields (true)
* of a record.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY = "writeallfields";
/**
* The default value for the writeallfields property.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY_DEFAULT = "false";
protected boolean writeallfields;
/**
* The name of the property for deciding whether to check all returned
* data against the formation template to ensure data integrity.
*/
public static final String DATA_INTEGRITY_PROPERTY = "dataintegrity";
/**
* The default value for the dataintegrity property.
*/
public static final String DATA_INTEGRITY_PROPERTY_DEFAULT = "false";
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
private boolean dataintegrity;
/**
* The name of the property for the proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY = "readproportion";
/**
* The default proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY_DEFAULT = "0.95";
/**
* The name of the property for the proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY = "updateproportion";
/**
* The default proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY_DEFAULT = "0.05";
/**
* The name of the property for the proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY = "insertproportion";
/**
* The default proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY = "scanproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are read-modify-write.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY = "readmodifywriteproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the the distribution of requests across the keyspace. Options are
* "uniform", "zipfian" and "latest"
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY = "requestdistribution";
/**
* The default distribution of requests across the keyspace.
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for adding zero padding to record numbers in order to match
* string sort order. Controls the number of 0s to left pad with.
*/
public static final String ZERO_PADDING_PROPERTY = "zeropadding";
/**
* The default zero padding value. Matches integer sort order
*/
public static final String ZERO_PADDING_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the min scan length (number of records).
*/
public static final String MIN_SCAN_LENGTH_PROPERTY = "minscanlength";
/**
* The default min scan length.
*/
public static final String MIN_SCAN_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the max scan length (number of records).
*/
public static final String MAX_SCAN_LENGTH_PROPERTY = "maxscanlength";
/**
* The default max scan length.
*/
public static final String MAX_SCAN_LENGTH_PROPERTY_DEFAULT = "1000";
/**
* The name of the property for the scan length distribution. Options are "uniform" and "zipfian"
* (favoring short scans)
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY = "scanlengthdistribution";
/**
* The default max scan length.
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for the order to insert records. Options are "ordered" or "hashed"
*/
public static final String INSERT_ORDER_PROPERTY = "insertorder";
/**
* Default insert order.
*/
public static final String INSERT_ORDER_PROPERTY_DEFAULT = "hashed";
/**
* Percentage data items that constitute the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION = "hotspotdatafraction";
/**
* Default value of the size of the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION_DEFAULT = "0.2";
/**
* Percentage operations that access the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION = "hotspotopnfraction";
/**
* Default value of the percentage operations accessing the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION_DEFAULT = "0.8";
/**
* How many times to retry when insertion of a single item to a DB fails.
*/
public static final String INSERTION_RETRY_LIMIT = "core_workload_insertion_retry_limit";
public static final String INSERTION_RETRY_LIMIT_DEFAULT = "0";
/**
* On average, how long to wait between the retries, in seconds.
*/
public static final String INSERTION_RETRY_INTERVAL = "core_workload_insertion_retry_interval";
public static final String INSERTION_RETRY_INTERVAL_DEFAULT = "3";
/**
* Field name prefix.
*/
public static final String FIELD_NAME_PREFIX = "fieldnameprefix";
/**
* Default value of the field name prefix.
*/
public static final String FIELD_NAME_PREFIX_DEFAULT = "field";
protected NumberGenerator keysequence;
protected DiscreteGenerator operationchooser;
protected NumberGenerator keychooser;
protected NumberGenerator fieldchooser;
protected AcknowledgedCounterGenerator transactioninsertkeysequence;
protected NumberGenerator scanlength;
protected boolean orderedinserts;
protected long fieldcount;
protected long recordcount;
protected int zeropadding;
protected int insertionRetryLimit;
protected int insertionRetryInterval;
private Measurements measurements = Measurements.getMeasurements();
public static String buildKeyName(long keynum, int zeropadding, boolean orderedinserts) {
if (!orderedinserts) {
keynum = Utils.hash(keynum);
}
String value = Long.toString(keynum);
int fill = zeropadding - value.length();
String prekey = "user";
for (int i = 0; i < fill; i++) {
prekey += '0';
}
return prekey + value;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {<FILL_FUNCTION_BODY>}
/**
* Initialize the scenario.
* Called once, in the main client thread, before any operations are started.
*/
@Override
public void init(Properties p) throws WorkloadException {
table = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
fieldcount =
Long.parseLong(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT));
final String fieldnameprefix = p.getProperty(FIELD_NAME_PREFIX, FIELD_NAME_PREFIX_DEFAULT);
fieldnames = new ArrayList<>();
for (int i = 0; i < fieldcount; i++) {
fieldnames.add(fieldnameprefix + i);
}
fieldlengthgenerator = CoreWorkload.getFieldLengthGenerator(p);
recordcount =
Long.parseLong(p.getProperty(Client.RECORD_COUNT_PROPERTY, Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
String requestdistrib =
p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
int minscanlength =
Integer.parseInt(p.getProperty(MIN_SCAN_LENGTH_PROPERTY, MIN_SCAN_LENGTH_PROPERTY_DEFAULT));
int maxscanlength =
Integer.parseInt(p.getProperty(MAX_SCAN_LENGTH_PROPERTY, MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(SCAN_LENGTH_DISTRIBUTION_PROPERTY, SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
long insertstart =
Long.parseLong(p.getProperty(INSERT_START_PROPERTY, INSERT_START_PROPERTY_DEFAULT));
long insertcount=
Integer.parseInt(p.getProperty(INSERT_COUNT_PROPERTY, String.valueOf(recordcount - insertstart)));
// Confirm valid values for insertstart and insertcount in relation to recordcount
if (recordcount < (insertstart + insertcount)) {
System.err.println("Invalid combination of insertstart, insertcount and recordcount.");
System.err.println("recordcount must be bigger than insertstart + insertcount.");
System.exit(-1);
}
zeropadding =
Integer.parseInt(p.getProperty(ZERO_PADDING_PROPERTY, ZERO_PADDING_PROPERTY_DEFAULT));
readallfields = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_PROPERTY, READ_ALL_FIELDS_PROPERTY_DEFAULT));
readallfieldsbyname = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_BY_NAME_PROPERTY, READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT));
writeallfields = Boolean.parseBoolean(
p.getProperty(WRITE_ALL_FIELDS_PROPERTY, WRITE_ALL_FIELDS_PROPERTY_DEFAULT));
dataintegrity = Boolean.parseBoolean(
p.getProperty(DATA_INTEGRITY_PROPERTY, DATA_INTEGRITY_PROPERTY_DEFAULT));
// Confirm that fieldlengthgenerator returns a constant if data
// integrity check requested.
if (dataintegrity && !(p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT)).equals("constant")) {
System.err.println("Must have constant field size to check data integrity.");
System.exit(-1);
}
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
if (p.getProperty(INSERT_ORDER_PROPERTY, INSERT_ORDER_PROPERTY_DEFAULT).compareTo("hashed") == 0) {
orderedinserts = false;
} else {
orderedinserts = true;
}
keysequence = new CounterGenerator(insertstart);
operationchooser = createOperationGenerator(p);
transactioninsertkeysequence = new AcknowledgedCounterGenerator(recordcount);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("exponential") == 0) {
double percentile = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordcount * frac);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
// it does this by generating a random "next key" in part by taking the modulus over the
// number of keys.
// If the number of keys changes, this would shift the modulus, and we don't want that to
// change which keys are popular so we'll actually construct the scrambled zipfian generator
// with a keyspace that is larger than exists at the beginning of the test. that is, we'll predict
// the number of inserts, and tell the scrambled zipfian generator the number of existing keys
// plus the number of predicted keys as the total keyspace. then, if the generator picks a key
// that hasn't been inserted yet, will just ignore it and pick another key. this way, the size of
// the keyspace doesn't change from the perspective of the scrambled zipfian generator
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
int opcount = Integer.parseInt(p.getProperty(Client.OPERATION_COUNT_PROPERTY));
int expectednewkeys = (int) ((opcount) * insertproportion * 2.0); // 2 is fudge factor
keychooser = new ScrambledZipfianGenerator(insertstart, insertstart + insertcount + expectednewkeys);
} else if (requestdistrib.compareTo("latest") == 0) {
keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(insertstart, insertstart + insertcount - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
fieldchooser = new UniformLongGenerator(0, fieldcount - 1);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(minscanlength, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(minscanlength, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
insertionRetryLimit = Integer.parseInt(p.getProperty(
INSERTION_RETRY_LIMIT, INSERTION_RETRY_LIMIT_DEFAULT));
insertionRetryInterval = Integer.parseInt(p.getProperty(
INSERTION_RETRY_INTERVAL, INSERTION_RETRY_INTERVAL_DEFAULT));
}
/**
* Builds a value for a randomly chosen field.
*/
private HashMap<String, ByteIterator> buildSingleValue(String key) {
HashMap<String, ByteIterator> value = new HashMap<>();
String fieldkey = fieldnames.get(fieldchooser.nextValue().intValue());
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
value.put(fieldkey, data);
return value;
}
/**
* Builds values for all fields.
*/
private HashMap<String, ByteIterator> buildValues(String key) {
HashMap<String, ByteIterator> values = new HashMap<>();
for (String fieldkey : fieldnames) {
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
values.put(fieldkey, data);
}
return values;
}
/**
* Build a deterministic value given the key information.
*/
private String buildDeterministicValue(String key, String fieldkey) {
int size = fieldlengthgenerator.nextValue().intValue();
StringBuilder sb = new StringBuilder(size);
sb.append(key);
sb.append(':');
sb.append(fieldkey);
while (sb.length() < size) {
sb.append(':');
sb.append(sb.toString().hashCode());
}
sb.setLength(size);
return sb.toString();
}
/**
* Do one insert operation. Because it will be called concurrently from multiple client threads,
* this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
int keynum = keysequence.nextValue().intValue();
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
Status status;
int numOfRetries = 0;
do {
status = db.insert(table, dbkey, values);
if (null != status && status.isOk()) {
break;
}
// Retry if configured. Without retrying, the load process will fail
// even if one single insertion fails. User can optionally configure
// an insertion retry limit (default is 0) to enable retry.
if (++numOfRetries <= insertionRetryLimit) {
System.err.println("Retrying insertion, retry count: " + numOfRetries);
try {
// Sleep for a random number between [0.8, 1.2)*insertionRetryInterval.
int sleepTime = (int) (1000 * insertionRetryInterval * (0.8 + 0.4 * Math.random()));
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
break;
}
} else {
System.err.println("Error inserting, not retrying any more. number of attempts: " + numOfRetries +
"Insertion Retry Limit: " + insertionRetryLimit);
break;
}
} while (true);
return null != status && status.isOk();
}
/**
* Do one transaction operation. Because it will be called concurrently from multiple client
* threads, this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if(operation == null) {
return false;
}
switch (operation) {
case "READ":
doTransactionRead(db);
break;
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "SCAN":
doTransactionScan(db);
break;
default:
doTransactionReadModifyWrite(db);
}
return true;
}
/**
* Results are reported in the first three buckets of the histogram under
* the label "VERIFY".
* Bucket 0 means the expected data was returned.
* Bucket 1 means incorrect data was returned.
* Bucket 2 means null data was returned when some data was expected.
*/
protected void verifyRow(String key, HashMap<String, ByteIterator> cells) {
Status verifyStatus = Status.OK;
long startTime = System.nanoTime();
if (!cells.isEmpty()) {
for (Map.Entry<String, ByteIterator> entry : cells.entrySet()) {
if (!entry.getValue().toString().equals(buildDeterministicValue(key, entry.getKey()))) {
verifyStatus = Status.UNEXPECTED_STATE;
break;
}
}
} else {
// This assumes that null data is never valid
verifyStatus = Status.ERROR;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
}
long nextKeynum() {
long keynum;
if (keychooser instanceof ExponentialGenerator) {
do {
keynum = transactioninsertkeysequence.lastValue() - keychooser.nextValue().intValue();
} while (keynum < 0);
} else {
do {
keynum = keychooser.nextValue().intValue();
} while (keynum > transactioninsertkeysequence.lastValue());
}
return keynum;
}
public void doTransactionRead(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
} else if (dataintegrity || readallfieldsbyname) {
// pass the full field list if dataintegrity is on for verification
fields = new HashSet<String>(fieldnames);
}
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
db.read(table, keyname, fields, cells);
if (dataintegrity) {
verifyRow(keyname, cells);
}
}
public void doTransactionReadModifyWrite(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
// do the transaction
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
db.read(table, keyname, fields, cells);
db.update(table, keyname, values);
long en = System.nanoTime();
if (dataintegrity) {
verifyRow(keyname, cells);
}
measurements.measure("READ-MODIFY-WRITE", (int) ((en - st) / 1000));
measurements.measureIntended("READ-MODIFY-WRITE", (int) ((en - ist) / 1000));
}
public void doTransactionScan(DB db) {
// choose a random key
long keynum = nextKeynum();
String startkeyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
// choose a random scan length
int len = scanlength.nextValue().intValue();
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
db.scan(table, startkeyname, len, fields, new Vector<HashMap<String, ByteIterator>>());
}
public void doTransactionUpdate(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
db.update(table, keyname, values);
}
public void doTransactionInsert(DB db) {
// choose the next key
long keynum = transactioninsertkeysequence.nextValue();
try {
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
db.insert(table, dbkey, values);
} finally {
transactioninsertkeysequence.acknowledge(keynum);
}
}
/**
* Creates a weighted discrete values with database operations for a workload to perform.
* Weights/proportions are read from the properties list and defaults are used
* when values are not configured.
* Current operations are "READ", "UPDATE", "INSERT", "SCAN" and "READMODIFYWRITE".
*
* @param p The properties list to pull weights from.
* @return A generator that can be used to determine the next operation to perform.
* @throws IllegalArgumentException if the properties object was null.
*/
protected static DiscreteGenerator createOperationGenerator(final Properties p) {
if (p == null) {
throw new IllegalArgumentException("Properties object cannot be null");
}
final double readproportion = Double.parseDouble(
p.getProperty(READ_PROPORTION_PROPERTY, READ_PROPORTION_PROPERTY_DEFAULT));
final double updateproportion = Double.parseDouble(
p.getProperty(UPDATE_PROPORTION_PROPERTY, UPDATE_PROPORTION_PROPERTY_DEFAULT));
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
final double scanproportion = Double.parseDouble(
p.getProperty(SCAN_PROPORTION_PROPERTY, SCAN_PROPORTION_PROPERTY_DEFAULT));
final double readmodifywriteproportion = Double.parseDouble(p.getProperty(
READMODIFYWRITE_PROPORTION_PROPERTY, READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT));
final DiscreteGenerator operationchooser = new DiscreteGenerator();
if (readproportion > 0) {
operationchooser.addValue(readproportion, "READ");
}
if (updateproportion > 0) {
operationchooser.addValue(updateproportion, "UPDATE");
}
if (insertproportion > 0) {
operationchooser.addValue(insertproportion, "INSERT");
}
if (scanproportion > 0) {
operationchooser.addValue(scanproportion, "SCAN");
}
if (readmodifywriteproportion > 0) {
operationchooser.addValue(readmodifywriteproportion, "READMODIFYWRITE");
}
return operationchooser;
}
} |
NumberGenerator fieldlengthgenerator;
String fieldlengthdistribution = p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY, FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
int fieldlength =
Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
int minfieldlength =
Integer.parseInt(p.getProperty(MIN_FIELD_LENGTH_PROPERTY, MIN_FIELD_LENGTH_PROPERTY_DEFAULT));
String fieldlengthhistogram = p.getProperty(
FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY, FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT);
if (fieldlengthdistribution.compareTo("constant") == 0) {
fieldlengthgenerator = new ConstantIntegerGenerator(fieldlength);
} else if (fieldlengthdistribution.compareTo("uniform") == 0) {
fieldlengthgenerator = new UniformLongGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("zipfian") == 0) {
fieldlengthgenerator = new ZipfianGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("histogram") == 0) {
try {
fieldlengthgenerator = new HistogramGenerator(fieldlengthhistogram);
} catch (IOException e) {
throw new WorkloadException(
"Couldn't read field length histogram file: " + fieldlengthhistogram, e);
}
} else {
throw new WorkloadException(
"Unknown field length distribution \"" + fieldlengthdistribution + "\"");
}
return fieldlengthgenerator;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/CoreWorkload.java | CoreWorkload | init | class CoreWorkload extends Workload {
/**
* The name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY = "table";
/**
* The default name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY_DEFAULT = "usertable";
protected String table;
/**
* The name of the property for the number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY = "fieldcount";
/**
* Default number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY_DEFAULT = "10";
private List<String> fieldnames;
/**
* The name of the property for the field length distribution. Options are "uniform", "zipfian"
* (favouring short records), "constant", and "histogram".
* <p>
* If "uniform", "zipfian" or "constant", the maximum field length will be that specified by the
* fieldlength property. If "histogram", then the histogram will be read from the filename
* specified in the "fieldlengthhistogram" property.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY = "fieldlengthdistribution";
/**
* The default field length distribution.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "constant";
/**
* The name of the property for the length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY = "fieldlength";
/**
* The default maximum length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY_DEFAULT = "100";
/**
* The name of the property for the minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY = "minfieldlength";
/**
* The default minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of a property that specifies the filename containing the field length histogram (only
* used if fieldlengthdistribution is "histogram").
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY = "fieldlengthhistogram";
/**
* The default filename containing a field length histogram.
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT = "hist.txt";
/**
* Generator object that produces field lengths. The value of this depends on the properties that
* start with "FIELD_LENGTH_".
*/
protected NumberGenerator fieldlengthgenerator;
/**
* The name of the property for deciding whether to read one field (false) or all fields (true) of
* a record.
*/
public static final String READ_ALL_FIELDS_PROPERTY = "readallfields";
/**
* The default value for the readallfields property.
*/
public static final String READ_ALL_FIELDS_PROPERTY_DEFAULT = "true";
protected boolean readallfields;
/**
* The name of the property for determining how to read all the fields when readallfields is true.
* If set to true, all the field names will be passed into the underlying client. If set to false,
* null will be passed into the underlying client. When passed a null, some clients may retrieve
* the entire row with a wildcard, which may be slower than naming all the fields.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY = "readallfieldsbyname";
/**
* The default value for the readallfieldsbyname property.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT = "false";
protected boolean readallfieldsbyname;
/**
* The name of the property for deciding whether to write one field (false) or all fields (true)
* of a record.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY = "writeallfields";
/**
* The default value for the writeallfields property.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY_DEFAULT = "false";
protected boolean writeallfields;
/**
* The name of the property for deciding whether to check all returned
* data against the formation template to ensure data integrity.
*/
public static final String DATA_INTEGRITY_PROPERTY = "dataintegrity";
/**
* The default value for the dataintegrity property.
*/
public static final String DATA_INTEGRITY_PROPERTY_DEFAULT = "false";
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
private boolean dataintegrity;
/**
* The name of the property for the proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY = "readproportion";
/**
* The default proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY_DEFAULT = "0.95";
/**
* The name of the property for the proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY = "updateproportion";
/**
* The default proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY_DEFAULT = "0.05";
/**
* The name of the property for the proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY = "insertproportion";
/**
* The default proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY = "scanproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are read-modify-write.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY = "readmodifywriteproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the the distribution of requests across the keyspace. Options are
* "uniform", "zipfian" and "latest"
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY = "requestdistribution";
/**
* The default distribution of requests across the keyspace.
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for adding zero padding to record numbers in order to match
* string sort order. Controls the number of 0s to left pad with.
*/
public static final String ZERO_PADDING_PROPERTY = "zeropadding";
/**
* The default zero padding value. Matches integer sort order
*/
public static final String ZERO_PADDING_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the min scan length (number of records).
*/
public static final String MIN_SCAN_LENGTH_PROPERTY = "minscanlength";
/**
* The default min scan length.
*/
public static final String MIN_SCAN_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the max scan length (number of records).
*/
public static final String MAX_SCAN_LENGTH_PROPERTY = "maxscanlength";
/**
* The default max scan length.
*/
public static final String MAX_SCAN_LENGTH_PROPERTY_DEFAULT = "1000";
/**
* The name of the property for the scan length distribution. Options are "uniform" and "zipfian"
* (favoring short scans)
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY = "scanlengthdistribution";
/**
* The default max scan length.
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for the order to insert records. Options are "ordered" or "hashed"
*/
public static final String INSERT_ORDER_PROPERTY = "insertorder";
/**
* Default insert order.
*/
public static final String INSERT_ORDER_PROPERTY_DEFAULT = "hashed";
/**
* Percentage data items that constitute the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION = "hotspotdatafraction";
/**
* Default value of the size of the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION_DEFAULT = "0.2";
/**
* Percentage operations that access the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION = "hotspotopnfraction";
/**
* Default value of the percentage operations accessing the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION_DEFAULT = "0.8";
/**
* How many times to retry when insertion of a single item to a DB fails.
*/
public static final String INSERTION_RETRY_LIMIT = "core_workload_insertion_retry_limit";
public static final String INSERTION_RETRY_LIMIT_DEFAULT = "0";
/**
* On average, how long to wait between the retries, in seconds.
*/
public static final String INSERTION_RETRY_INTERVAL = "core_workload_insertion_retry_interval";
public static final String INSERTION_RETRY_INTERVAL_DEFAULT = "3";
/**
* Field name prefix.
*/
public static final String FIELD_NAME_PREFIX = "fieldnameprefix";
/**
* Default value of the field name prefix.
*/
public static final String FIELD_NAME_PREFIX_DEFAULT = "field";
protected NumberGenerator keysequence;
protected DiscreteGenerator operationchooser;
protected NumberGenerator keychooser;
protected NumberGenerator fieldchooser;
protected AcknowledgedCounterGenerator transactioninsertkeysequence;
protected NumberGenerator scanlength;
protected boolean orderedinserts;
protected long fieldcount;
protected long recordcount;
protected int zeropadding;
protected int insertionRetryLimit;
protected int insertionRetryInterval;
private Measurements measurements = Measurements.getMeasurements();
public static String buildKeyName(long keynum, int zeropadding, boolean orderedinserts) {
if (!orderedinserts) {
keynum = Utils.hash(keynum);
}
String value = Long.toString(keynum);
int fill = zeropadding - value.length();
String prekey = "user";
for (int i = 0; i < fill; i++) {
prekey += '0';
}
return prekey + value;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
NumberGenerator fieldlengthgenerator;
String fieldlengthdistribution = p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY, FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
int fieldlength =
Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
int minfieldlength =
Integer.parseInt(p.getProperty(MIN_FIELD_LENGTH_PROPERTY, MIN_FIELD_LENGTH_PROPERTY_DEFAULT));
String fieldlengthhistogram = p.getProperty(
FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY, FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT);
if (fieldlengthdistribution.compareTo("constant") == 0) {
fieldlengthgenerator = new ConstantIntegerGenerator(fieldlength);
} else if (fieldlengthdistribution.compareTo("uniform") == 0) {
fieldlengthgenerator = new UniformLongGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("zipfian") == 0) {
fieldlengthgenerator = new ZipfianGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("histogram") == 0) {
try {
fieldlengthgenerator = new HistogramGenerator(fieldlengthhistogram);
} catch (IOException e) {
throw new WorkloadException(
"Couldn't read field length histogram file: " + fieldlengthhistogram, e);
}
} else {
throw new WorkloadException(
"Unknown field length distribution \"" + fieldlengthdistribution + "\"");
}
return fieldlengthgenerator;
}
/**
* Initialize the scenario.
* Called once, in the main client thread, before any operations are started.
*/
@Override
public void init(Properties p) throws WorkloadException {<FILL_FUNCTION_BODY>}
/**
* Builds a value for a randomly chosen field.
*/
private HashMap<String, ByteIterator> buildSingleValue(String key) {
HashMap<String, ByteIterator> value = new HashMap<>();
String fieldkey = fieldnames.get(fieldchooser.nextValue().intValue());
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
value.put(fieldkey, data);
return value;
}
/**
* Builds values for all fields.
*/
private HashMap<String, ByteIterator> buildValues(String key) {
HashMap<String, ByteIterator> values = new HashMap<>();
for (String fieldkey : fieldnames) {
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
values.put(fieldkey, data);
}
return values;
}
/**
* Build a deterministic value given the key information.
*/
private String buildDeterministicValue(String key, String fieldkey) {
int size = fieldlengthgenerator.nextValue().intValue();
StringBuilder sb = new StringBuilder(size);
sb.append(key);
sb.append(':');
sb.append(fieldkey);
while (sb.length() < size) {
sb.append(':');
sb.append(sb.toString().hashCode());
}
sb.setLength(size);
return sb.toString();
}
/**
* Do one insert operation. Because it will be called concurrently from multiple client threads,
* this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
int keynum = keysequence.nextValue().intValue();
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
Status status;
int numOfRetries = 0;
do {
status = db.insert(table, dbkey, values);
if (null != status && status.isOk()) {
break;
}
// Retry if configured. Without retrying, the load process will fail
// even if one single insertion fails. User can optionally configure
// an insertion retry limit (default is 0) to enable retry.
if (++numOfRetries <= insertionRetryLimit) {
System.err.println("Retrying insertion, retry count: " + numOfRetries);
try {
// Sleep for a random number between [0.8, 1.2)*insertionRetryInterval.
int sleepTime = (int) (1000 * insertionRetryInterval * (0.8 + 0.4 * Math.random()));
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
break;
}
} else {
System.err.println("Error inserting, not retrying any more. number of attempts: " + numOfRetries +
"Insertion Retry Limit: " + insertionRetryLimit);
break;
}
} while (true);
return null != status && status.isOk();
}
/**
* Do one transaction operation. Because it will be called concurrently from multiple client
* threads, this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if(operation == null) {
return false;
}
switch (operation) {
case "READ":
doTransactionRead(db);
break;
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "SCAN":
doTransactionScan(db);
break;
default:
doTransactionReadModifyWrite(db);
}
return true;
}
/**
* Results are reported in the first three buckets of the histogram under
* the label "VERIFY".
* Bucket 0 means the expected data was returned.
* Bucket 1 means incorrect data was returned.
* Bucket 2 means null data was returned when some data was expected.
*/
protected void verifyRow(String key, HashMap<String, ByteIterator> cells) {
Status verifyStatus = Status.OK;
long startTime = System.nanoTime();
if (!cells.isEmpty()) {
for (Map.Entry<String, ByteIterator> entry : cells.entrySet()) {
if (!entry.getValue().toString().equals(buildDeterministicValue(key, entry.getKey()))) {
verifyStatus = Status.UNEXPECTED_STATE;
break;
}
}
} else {
// This assumes that null data is never valid
verifyStatus = Status.ERROR;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
}
long nextKeynum() {
long keynum;
if (keychooser instanceof ExponentialGenerator) {
do {
keynum = transactioninsertkeysequence.lastValue() - keychooser.nextValue().intValue();
} while (keynum < 0);
} else {
do {
keynum = keychooser.nextValue().intValue();
} while (keynum > transactioninsertkeysequence.lastValue());
}
return keynum;
}
public void doTransactionRead(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
} else if (dataintegrity || readallfieldsbyname) {
// pass the full field list if dataintegrity is on for verification
fields = new HashSet<String>(fieldnames);
}
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
db.read(table, keyname, fields, cells);
if (dataintegrity) {
verifyRow(keyname, cells);
}
}
public void doTransactionReadModifyWrite(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
// do the transaction
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
db.read(table, keyname, fields, cells);
db.update(table, keyname, values);
long en = System.nanoTime();
if (dataintegrity) {
verifyRow(keyname, cells);
}
measurements.measure("READ-MODIFY-WRITE", (int) ((en - st) / 1000));
measurements.measureIntended("READ-MODIFY-WRITE", (int) ((en - ist) / 1000));
}
public void doTransactionScan(DB db) {
// choose a random key
long keynum = nextKeynum();
String startkeyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
// choose a random scan length
int len = scanlength.nextValue().intValue();
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
db.scan(table, startkeyname, len, fields, new Vector<HashMap<String, ByteIterator>>());
}
public void doTransactionUpdate(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
db.update(table, keyname, values);
}
public void doTransactionInsert(DB db) {
// choose the next key
long keynum = transactioninsertkeysequence.nextValue();
try {
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
db.insert(table, dbkey, values);
} finally {
transactioninsertkeysequence.acknowledge(keynum);
}
}
/**
* Creates a weighted discrete values with database operations for a workload to perform.
* Weights/proportions are read from the properties list and defaults are used
* when values are not configured.
* Current operations are "READ", "UPDATE", "INSERT", "SCAN" and "READMODIFYWRITE".
*
* @param p The properties list to pull weights from.
* @return A generator that can be used to determine the next operation to perform.
* @throws IllegalArgumentException if the properties object was null.
*/
protected static DiscreteGenerator createOperationGenerator(final Properties p) {
if (p == null) {
throw new IllegalArgumentException("Properties object cannot be null");
}
final double readproportion = Double.parseDouble(
p.getProperty(READ_PROPORTION_PROPERTY, READ_PROPORTION_PROPERTY_DEFAULT));
final double updateproportion = Double.parseDouble(
p.getProperty(UPDATE_PROPORTION_PROPERTY, UPDATE_PROPORTION_PROPERTY_DEFAULT));
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
final double scanproportion = Double.parseDouble(
p.getProperty(SCAN_PROPORTION_PROPERTY, SCAN_PROPORTION_PROPERTY_DEFAULT));
final double readmodifywriteproportion = Double.parseDouble(p.getProperty(
READMODIFYWRITE_PROPORTION_PROPERTY, READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT));
final DiscreteGenerator operationchooser = new DiscreteGenerator();
if (readproportion > 0) {
operationchooser.addValue(readproportion, "READ");
}
if (updateproportion > 0) {
operationchooser.addValue(updateproportion, "UPDATE");
}
if (insertproportion > 0) {
operationchooser.addValue(insertproportion, "INSERT");
}
if (scanproportion > 0) {
operationchooser.addValue(scanproportion, "SCAN");
}
if (readmodifywriteproportion > 0) {
operationchooser.addValue(readmodifywriteproportion, "READMODIFYWRITE");
}
return operationchooser;
}
} |
table = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
fieldcount =
Long.parseLong(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT));
final String fieldnameprefix = p.getProperty(FIELD_NAME_PREFIX, FIELD_NAME_PREFIX_DEFAULT);
fieldnames = new ArrayList<>();
for (int i = 0; i < fieldcount; i++) {
fieldnames.add(fieldnameprefix + i);
}
fieldlengthgenerator = CoreWorkload.getFieldLengthGenerator(p);
recordcount =
Long.parseLong(p.getProperty(Client.RECORD_COUNT_PROPERTY, Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
String requestdistrib =
p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
int minscanlength =
Integer.parseInt(p.getProperty(MIN_SCAN_LENGTH_PROPERTY, MIN_SCAN_LENGTH_PROPERTY_DEFAULT));
int maxscanlength =
Integer.parseInt(p.getProperty(MAX_SCAN_LENGTH_PROPERTY, MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(SCAN_LENGTH_DISTRIBUTION_PROPERTY, SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
long insertstart =
Long.parseLong(p.getProperty(INSERT_START_PROPERTY, INSERT_START_PROPERTY_DEFAULT));
long insertcount=
Integer.parseInt(p.getProperty(INSERT_COUNT_PROPERTY, String.valueOf(recordcount - insertstart)));
// Confirm valid values for insertstart and insertcount in relation to recordcount
if (recordcount < (insertstart + insertcount)) {
System.err.println("Invalid combination of insertstart, insertcount and recordcount.");
System.err.println("recordcount must be bigger than insertstart + insertcount.");
System.exit(-1);
}
zeropadding =
Integer.parseInt(p.getProperty(ZERO_PADDING_PROPERTY, ZERO_PADDING_PROPERTY_DEFAULT));
readallfields = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_PROPERTY, READ_ALL_FIELDS_PROPERTY_DEFAULT));
readallfieldsbyname = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_BY_NAME_PROPERTY, READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT));
writeallfields = Boolean.parseBoolean(
p.getProperty(WRITE_ALL_FIELDS_PROPERTY, WRITE_ALL_FIELDS_PROPERTY_DEFAULT));
dataintegrity = Boolean.parseBoolean(
p.getProperty(DATA_INTEGRITY_PROPERTY, DATA_INTEGRITY_PROPERTY_DEFAULT));
// Confirm that fieldlengthgenerator returns a constant if data
// integrity check requested.
if (dataintegrity && !(p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT)).equals("constant")) {
System.err.println("Must have constant field size to check data integrity.");
System.exit(-1);
}
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
if (p.getProperty(INSERT_ORDER_PROPERTY, INSERT_ORDER_PROPERTY_DEFAULT).compareTo("hashed") == 0) {
orderedinserts = false;
} else {
orderedinserts = true;
}
keysequence = new CounterGenerator(insertstart);
operationchooser = createOperationGenerator(p);
transactioninsertkeysequence = new AcknowledgedCounterGenerator(recordcount);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("exponential") == 0) {
double percentile = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordcount * frac);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
// it does this by generating a random "next key" in part by taking the modulus over the
// number of keys.
// If the number of keys changes, this would shift the modulus, and we don't want that to
// change which keys are popular so we'll actually construct the scrambled zipfian generator
// with a keyspace that is larger than exists at the beginning of the test. that is, we'll predict
// the number of inserts, and tell the scrambled zipfian generator the number of existing keys
// plus the number of predicted keys as the total keyspace. then, if the generator picks a key
// that hasn't been inserted yet, will just ignore it and pick another key. this way, the size of
// the keyspace doesn't change from the perspective of the scrambled zipfian generator
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
int opcount = Integer.parseInt(p.getProperty(Client.OPERATION_COUNT_PROPERTY));
int expectednewkeys = (int) ((opcount) * insertproportion * 2.0); // 2 is fudge factor
keychooser = new ScrambledZipfianGenerator(insertstart, insertstart + insertcount + expectednewkeys);
} else if (requestdistrib.compareTo("latest") == 0) {
keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(insertstart, insertstart + insertcount - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
fieldchooser = new UniformLongGenerator(0, fieldcount - 1);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(minscanlength, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(minscanlength, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
insertionRetryLimit = Integer.parseInt(p.getProperty(
INSERTION_RETRY_LIMIT, INSERTION_RETRY_LIMIT_DEFAULT));
insertionRetryInterval = Integer.parseInt(p.getProperty(
INSERTION_RETRY_INTERVAL, INSERTION_RETRY_INTERVAL_DEFAULT));
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/CoreWorkload.java | CoreWorkload | buildSingleValue | class CoreWorkload extends Workload {
/**
* The name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY = "table";
/**
* The default name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY_DEFAULT = "usertable";
protected String table;
/**
* The name of the property for the number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY = "fieldcount";
/**
* Default number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY_DEFAULT = "10";
private List<String> fieldnames;
/**
* The name of the property for the field length distribution. Options are "uniform", "zipfian"
* (favouring short records), "constant", and "histogram".
* <p>
* If "uniform", "zipfian" or "constant", the maximum field length will be that specified by the
* fieldlength property. If "histogram", then the histogram will be read from the filename
* specified in the "fieldlengthhistogram" property.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY = "fieldlengthdistribution";
/**
* The default field length distribution.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "constant";
/**
* The name of the property for the length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY = "fieldlength";
/**
* The default maximum length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY_DEFAULT = "100";
/**
* The name of the property for the minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY = "minfieldlength";
/**
* The default minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of a property that specifies the filename containing the field length histogram (only
* used if fieldlengthdistribution is "histogram").
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY = "fieldlengthhistogram";
/**
* The default filename containing a field length histogram.
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT = "hist.txt";
/**
* Generator object that produces field lengths. The value of this depends on the properties that
* start with "FIELD_LENGTH_".
*/
protected NumberGenerator fieldlengthgenerator;
/**
* The name of the property for deciding whether to read one field (false) or all fields (true) of
* a record.
*/
public static final String READ_ALL_FIELDS_PROPERTY = "readallfields";
/**
* The default value for the readallfields property.
*/
public static final String READ_ALL_FIELDS_PROPERTY_DEFAULT = "true";
protected boolean readallfields;
/**
* The name of the property for determining how to read all the fields when readallfields is true.
* If set to true, all the field names will be passed into the underlying client. If set to false,
* null will be passed into the underlying client. When passed a null, some clients may retrieve
* the entire row with a wildcard, which may be slower than naming all the fields.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY = "readallfieldsbyname";
/**
* The default value for the readallfieldsbyname property.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT = "false";
protected boolean readallfieldsbyname;
/**
* The name of the property for deciding whether to write one field (false) or all fields (true)
* of a record.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY = "writeallfields";
/**
* The default value for the writeallfields property.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY_DEFAULT = "false";
protected boolean writeallfields;
/**
* The name of the property for deciding whether to check all returned
* data against the formation template to ensure data integrity.
*/
public static final String DATA_INTEGRITY_PROPERTY = "dataintegrity";
/**
* The default value for the dataintegrity property.
*/
public static final String DATA_INTEGRITY_PROPERTY_DEFAULT = "false";
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
private boolean dataintegrity;
/**
* The name of the property for the proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY = "readproportion";
/**
* The default proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY_DEFAULT = "0.95";
/**
* The name of the property for the proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY = "updateproportion";
/**
* The default proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY_DEFAULT = "0.05";
/**
* The name of the property for the proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY = "insertproportion";
/**
* The default proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY = "scanproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are read-modify-write.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY = "readmodifywriteproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the the distribution of requests across the keyspace. Options are
* "uniform", "zipfian" and "latest"
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY = "requestdistribution";
/**
* The default distribution of requests across the keyspace.
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for adding zero padding to record numbers in order to match
* string sort order. Controls the number of 0s to left pad with.
*/
public static final String ZERO_PADDING_PROPERTY = "zeropadding";
/**
* The default zero padding value. Matches integer sort order
*/
public static final String ZERO_PADDING_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the min scan length (number of records).
*/
public static final String MIN_SCAN_LENGTH_PROPERTY = "minscanlength";
/**
* The default min scan length.
*/
public static final String MIN_SCAN_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the max scan length (number of records).
*/
public static final String MAX_SCAN_LENGTH_PROPERTY = "maxscanlength";
/**
* The default max scan length.
*/
public static final String MAX_SCAN_LENGTH_PROPERTY_DEFAULT = "1000";
/**
* The name of the property for the scan length distribution. Options are "uniform" and "zipfian"
* (favoring short scans)
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY = "scanlengthdistribution";
/**
* The default max scan length.
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for the order to insert records. Options are "ordered" or "hashed"
*/
public static final String INSERT_ORDER_PROPERTY = "insertorder";
/**
* Default insert order.
*/
public static final String INSERT_ORDER_PROPERTY_DEFAULT = "hashed";
/**
* Percentage data items that constitute the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION = "hotspotdatafraction";
/**
* Default value of the size of the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION_DEFAULT = "0.2";
/**
* Percentage operations that access the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION = "hotspotopnfraction";
/**
* Default value of the percentage operations accessing the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION_DEFAULT = "0.8";
/**
* How many times to retry when insertion of a single item to a DB fails.
*/
public static final String INSERTION_RETRY_LIMIT = "core_workload_insertion_retry_limit";
public static final String INSERTION_RETRY_LIMIT_DEFAULT = "0";
/**
* On average, how long to wait between the retries, in seconds.
*/
public static final String INSERTION_RETRY_INTERVAL = "core_workload_insertion_retry_interval";
public static final String INSERTION_RETRY_INTERVAL_DEFAULT = "3";
/**
* Field name prefix.
*/
public static final String FIELD_NAME_PREFIX = "fieldnameprefix";
/**
* Default value of the field name prefix.
*/
public static final String FIELD_NAME_PREFIX_DEFAULT = "field";
protected NumberGenerator keysequence;
protected DiscreteGenerator operationchooser;
protected NumberGenerator keychooser;
protected NumberGenerator fieldchooser;
protected AcknowledgedCounterGenerator transactioninsertkeysequence;
protected NumberGenerator scanlength;
protected boolean orderedinserts;
protected long fieldcount;
protected long recordcount;
protected int zeropadding;
protected int insertionRetryLimit;
protected int insertionRetryInterval;
private Measurements measurements = Measurements.getMeasurements();
public static String buildKeyName(long keynum, int zeropadding, boolean orderedinserts) {
if (!orderedinserts) {
keynum = Utils.hash(keynum);
}
String value = Long.toString(keynum);
int fill = zeropadding - value.length();
String prekey = "user";
for (int i = 0; i < fill; i++) {
prekey += '0';
}
return prekey + value;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
NumberGenerator fieldlengthgenerator;
String fieldlengthdistribution = p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY, FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
int fieldlength =
Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
int minfieldlength =
Integer.parseInt(p.getProperty(MIN_FIELD_LENGTH_PROPERTY, MIN_FIELD_LENGTH_PROPERTY_DEFAULT));
String fieldlengthhistogram = p.getProperty(
FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY, FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT);
if (fieldlengthdistribution.compareTo("constant") == 0) {
fieldlengthgenerator = new ConstantIntegerGenerator(fieldlength);
} else if (fieldlengthdistribution.compareTo("uniform") == 0) {
fieldlengthgenerator = new UniformLongGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("zipfian") == 0) {
fieldlengthgenerator = new ZipfianGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("histogram") == 0) {
try {
fieldlengthgenerator = new HistogramGenerator(fieldlengthhistogram);
} catch (IOException e) {
throw new WorkloadException(
"Couldn't read field length histogram file: " + fieldlengthhistogram, e);
}
} else {
throw new WorkloadException(
"Unknown field length distribution \"" + fieldlengthdistribution + "\"");
}
return fieldlengthgenerator;
}
/**
* Initialize the scenario.
* Called once, in the main client thread, before any operations are started.
*/
@Override
public void init(Properties p) throws WorkloadException {
table = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
fieldcount =
Long.parseLong(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT));
final String fieldnameprefix = p.getProperty(FIELD_NAME_PREFIX, FIELD_NAME_PREFIX_DEFAULT);
fieldnames = new ArrayList<>();
for (int i = 0; i < fieldcount; i++) {
fieldnames.add(fieldnameprefix + i);
}
fieldlengthgenerator = CoreWorkload.getFieldLengthGenerator(p);
recordcount =
Long.parseLong(p.getProperty(Client.RECORD_COUNT_PROPERTY, Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
String requestdistrib =
p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
int minscanlength =
Integer.parseInt(p.getProperty(MIN_SCAN_LENGTH_PROPERTY, MIN_SCAN_LENGTH_PROPERTY_DEFAULT));
int maxscanlength =
Integer.parseInt(p.getProperty(MAX_SCAN_LENGTH_PROPERTY, MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(SCAN_LENGTH_DISTRIBUTION_PROPERTY, SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
long insertstart =
Long.parseLong(p.getProperty(INSERT_START_PROPERTY, INSERT_START_PROPERTY_DEFAULT));
long insertcount=
Integer.parseInt(p.getProperty(INSERT_COUNT_PROPERTY, String.valueOf(recordcount - insertstart)));
// Confirm valid values for insertstart and insertcount in relation to recordcount
if (recordcount < (insertstart + insertcount)) {
System.err.println("Invalid combination of insertstart, insertcount and recordcount.");
System.err.println("recordcount must be bigger than insertstart + insertcount.");
System.exit(-1);
}
zeropadding =
Integer.parseInt(p.getProperty(ZERO_PADDING_PROPERTY, ZERO_PADDING_PROPERTY_DEFAULT));
readallfields = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_PROPERTY, READ_ALL_FIELDS_PROPERTY_DEFAULT));
readallfieldsbyname = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_BY_NAME_PROPERTY, READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT));
writeallfields = Boolean.parseBoolean(
p.getProperty(WRITE_ALL_FIELDS_PROPERTY, WRITE_ALL_FIELDS_PROPERTY_DEFAULT));
dataintegrity = Boolean.parseBoolean(
p.getProperty(DATA_INTEGRITY_PROPERTY, DATA_INTEGRITY_PROPERTY_DEFAULT));
// Confirm that fieldlengthgenerator returns a constant if data
// integrity check requested.
if (dataintegrity && !(p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT)).equals("constant")) {
System.err.println("Must have constant field size to check data integrity.");
System.exit(-1);
}
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
if (p.getProperty(INSERT_ORDER_PROPERTY, INSERT_ORDER_PROPERTY_DEFAULT).compareTo("hashed") == 0) {
orderedinserts = false;
} else {
orderedinserts = true;
}
keysequence = new CounterGenerator(insertstart);
operationchooser = createOperationGenerator(p);
transactioninsertkeysequence = new AcknowledgedCounterGenerator(recordcount);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("exponential") == 0) {
double percentile = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordcount * frac);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
// it does this by generating a random "next key" in part by taking the modulus over the
// number of keys.
// If the number of keys changes, this would shift the modulus, and we don't want that to
// change which keys are popular so we'll actually construct the scrambled zipfian generator
// with a keyspace that is larger than exists at the beginning of the test. that is, we'll predict
// the number of inserts, and tell the scrambled zipfian generator the number of existing keys
// plus the number of predicted keys as the total keyspace. then, if the generator picks a key
// that hasn't been inserted yet, will just ignore it and pick another key. this way, the size of
// the keyspace doesn't change from the perspective of the scrambled zipfian generator
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
int opcount = Integer.parseInt(p.getProperty(Client.OPERATION_COUNT_PROPERTY));
int expectednewkeys = (int) ((opcount) * insertproportion * 2.0); // 2 is fudge factor
keychooser = new ScrambledZipfianGenerator(insertstart, insertstart + insertcount + expectednewkeys);
} else if (requestdistrib.compareTo("latest") == 0) {
keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(insertstart, insertstart + insertcount - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
fieldchooser = new UniformLongGenerator(0, fieldcount - 1);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(minscanlength, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(minscanlength, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
insertionRetryLimit = Integer.parseInt(p.getProperty(
INSERTION_RETRY_LIMIT, INSERTION_RETRY_LIMIT_DEFAULT));
insertionRetryInterval = Integer.parseInt(p.getProperty(
INSERTION_RETRY_INTERVAL, INSERTION_RETRY_INTERVAL_DEFAULT));
}
/**
* Builds a value for a randomly chosen field.
*/
private HashMap<String, ByteIterator> buildSingleValue(String key) {<FILL_FUNCTION_BODY>}
/**
* Builds values for all fields.
*/
private HashMap<String, ByteIterator> buildValues(String key) {
HashMap<String, ByteIterator> values = new HashMap<>();
for (String fieldkey : fieldnames) {
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
values.put(fieldkey, data);
}
return values;
}
/**
* Build a deterministic value given the key information.
*/
private String buildDeterministicValue(String key, String fieldkey) {
int size = fieldlengthgenerator.nextValue().intValue();
StringBuilder sb = new StringBuilder(size);
sb.append(key);
sb.append(':');
sb.append(fieldkey);
while (sb.length() < size) {
sb.append(':');
sb.append(sb.toString().hashCode());
}
sb.setLength(size);
return sb.toString();
}
/**
* Do one insert operation. Because it will be called concurrently from multiple client threads,
* this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
int keynum = keysequence.nextValue().intValue();
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
Status status;
int numOfRetries = 0;
do {
status = db.insert(table, dbkey, values);
if (null != status && status.isOk()) {
break;
}
// Retry if configured. Without retrying, the load process will fail
// even if one single insertion fails. User can optionally configure
// an insertion retry limit (default is 0) to enable retry.
if (++numOfRetries <= insertionRetryLimit) {
System.err.println("Retrying insertion, retry count: " + numOfRetries);
try {
// Sleep for a random number between [0.8, 1.2)*insertionRetryInterval.
int sleepTime = (int) (1000 * insertionRetryInterval * (0.8 + 0.4 * Math.random()));
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
break;
}
} else {
System.err.println("Error inserting, not retrying any more. number of attempts: " + numOfRetries +
"Insertion Retry Limit: " + insertionRetryLimit);
break;
}
} while (true);
return null != status && status.isOk();
}
/**
* Do one transaction operation. Because it will be called concurrently from multiple client
* threads, this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if(operation == null) {
return false;
}
switch (operation) {
case "READ":
doTransactionRead(db);
break;
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "SCAN":
doTransactionScan(db);
break;
default:
doTransactionReadModifyWrite(db);
}
return true;
}
/**
* Results are reported in the first three buckets of the histogram under
* the label "VERIFY".
* Bucket 0 means the expected data was returned.
* Bucket 1 means incorrect data was returned.
* Bucket 2 means null data was returned when some data was expected.
*/
protected void verifyRow(String key, HashMap<String, ByteIterator> cells) {
Status verifyStatus = Status.OK;
long startTime = System.nanoTime();
if (!cells.isEmpty()) {
for (Map.Entry<String, ByteIterator> entry : cells.entrySet()) {
if (!entry.getValue().toString().equals(buildDeterministicValue(key, entry.getKey()))) {
verifyStatus = Status.UNEXPECTED_STATE;
break;
}
}
} else {
// This assumes that null data is never valid
verifyStatus = Status.ERROR;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
}
long nextKeynum() {
long keynum;
if (keychooser instanceof ExponentialGenerator) {
do {
keynum = transactioninsertkeysequence.lastValue() - keychooser.nextValue().intValue();
} while (keynum < 0);
} else {
do {
keynum = keychooser.nextValue().intValue();
} while (keynum > transactioninsertkeysequence.lastValue());
}
return keynum;
}
public void doTransactionRead(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
} else if (dataintegrity || readallfieldsbyname) {
// pass the full field list if dataintegrity is on for verification
fields = new HashSet<String>(fieldnames);
}
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
db.read(table, keyname, fields, cells);
if (dataintegrity) {
verifyRow(keyname, cells);
}
}
public void doTransactionReadModifyWrite(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
// do the transaction
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
db.read(table, keyname, fields, cells);
db.update(table, keyname, values);
long en = System.nanoTime();
if (dataintegrity) {
verifyRow(keyname, cells);
}
measurements.measure("READ-MODIFY-WRITE", (int) ((en - st) / 1000));
measurements.measureIntended("READ-MODIFY-WRITE", (int) ((en - ist) / 1000));
}
public void doTransactionScan(DB db) {
// choose a random key
long keynum = nextKeynum();
String startkeyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
// choose a random scan length
int len = scanlength.nextValue().intValue();
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
db.scan(table, startkeyname, len, fields, new Vector<HashMap<String, ByteIterator>>());
}
public void doTransactionUpdate(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
db.update(table, keyname, values);
}
public void doTransactionInsert(DB db) {
// choose the next key
long keynum = transactioninsertkeysequence.nextValue();
try {
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
db.insert(table, dbkey, values);
} finally {
transactioninsertkeysequence.acknowledge(keynum);
}
}
/**
* Creates a weighted discrete values with database operations for a workload to perform.
* Weights/proportions are read from the properties list and defaults are used
* when values are not configured.
* Current operations are "READ", "UPDATE", "INSERT", "SCAN" and "READMODIFYWRITE".
*
* @param p The properties list to pull weights from.
* @return A generator that can be used to determine the next operation to perform.
* @throws IllegalArgumentException if the properties object was null.
*/
protected static DiscreteGenerator createOperationGenerator(final Properties p) {
if (p == null) {
throw new IllegalArgumentException("Properties object cannot be null");
}
final double readproportion = Double.parseDouble(
p.getProperty(READ_PROPORTION_PROPERTY, READ_PROPORTION_PROPERTY_DEFAULT));
final double updateproportion = Double.parseDouble(
p.getProperty(UPDATE_PROPORTION_PROPERTY, UPDATE_PROPORTION_PROPERTY_DEFAULT));
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
final double scanproportion = Double.parseDouble(
p.getProperty(SCAN_PROPORTION_PROPERTY, SCAN_PROPORTION_PROPERTY_DEFAULT));
final double readmodifywriteproportion = Double.parseDouble(p.getProperty(
READMODIFYWRITE_PROPORTION_PROPERTY, READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT));
final DiscreteGenerator operationchooser = new DiscreteGenerator();
if (readproportion > 0) {
operationchooser.addValue(readproportion, "READ");
}
if (updateproportion > 0) {
operationchooser.addValue(updateproportion, "UPDATE");
}
if (insertproportion > 0) {
operationchooser.addValue(insertproportion, "INSERT");
}
if (scanproportion > 0) {
operationchooser.addValue(scanproportion, "SCAN");
}
if (readmodifywriteproportion > 0) {
operationchooser.addValue(readmodifywriteproportion, "READMODIFYWRITE");
}
return operationchooser;
}
} |
HashMap<String, ByteIterator> value = new HashMap<>();
String fieldkey = fieldnames.get(fieldchooser.nextValue().intValue());
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
value.put(fieldkey, data);
return value;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/CoreWorkload.java | CoreWorkload | buildValues | class CoreWorkload extends Workload {
/**
* The name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY = "table";
/**
* The default name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY_DEFAULT = "usertable";
protected String table;
/**
* The name of the property for the number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY = "fieldcount";
/**
* Default number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY_DEFAULT = "10";
private List<String> fieldnames;
/**
* The name of the property for the field length distribution. Options are "uniform", "zipfian"
* (favouring short records), "constant", and "histogram".
* <p>
* If "uniform", "zipfian" or "constant", the maximum field length will be that specified by the
* fieldlength property. If "histogram", then the histogram will be read from the filename
* specified in the "fieldlengthhistogram" property.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY = "fieldlengthdistribution";
/**
* The default field length distribution.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "constant";
/**
* The name of the property for the length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY = "fieldlength";
/**
* The default maximum length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY_DEFAULT = "100";
/**
* The name of the property for the minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY = "minfieldlength";
/**
* The default minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of a property that specifies the filename containing the field length histogram (only
* used if fieldlengthdistribution is "histogram").
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY = "fieldlengthhistogram";
/**
* The default filename containing a field length histogram.
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT = "hist.txt";
/**
* Generator object that produces field lengths. The value of this depends on the properties that
* start with "FIELD_LENGTH_".
*/
protected NumberGenerator fieldlengthgenerator;
/**
* The name of the property for deciding whether to read one field (false) or all fields (true) of
* a record.
*/
public static final String READ_ALL_FIELDS_PROPERTY = "readallfields";
/**
* The default value for the readallfields property.
*/
public static final String READ_ALL_FIELDS_PROPERTY_DEFAULT = "true";
protected boolean readallfields;
/**
* The name of the property for determining how to read all the fields when readallfields is true.
* If set to true, all the field names will be passed into the underlying client. If set to false,
* null will be passed into the underlying client. When passed a null, some clients may retrieve
* the entire row with a wildcard, which may be slower than naming all the fields.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY = "readallfieldsbyname";
/**
* The default value for the readallfieldsbyname property.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT = "false";
protected boolean readallfieldsbyname;
/**
* The name of the property for deciding whether to write one field (false) or all fields (true)
* of a record.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY = "writeallfields";
/**
* The default value for the writeallfields property.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY_DEFAULT = "false";
protected boolean writeallfields;
/**
* The name of the property for deciding whether to check all returned
* data against the formation template to ensure data integrity.
*/
public static final String DATA_INTEGRITY_PROPERTY = "dataintegrity";
/**
* The default value for the dataintegrity property.
*/
public static final String DATA_INTEGRITY_PROPERTY_DEFAULT = "false";
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
private boolean dataintegrity;
/**
* The name of the property for the proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY = "readproportion";
/**
* The default proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY_DEFAULT = "0.95";
/**
* The name of the property for the proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY = "updateproportion";
/**
* The default proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY_DEFAULT = "0.05";
/**
* The name of the property for the proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY = "insertproportion";
/**
* The default proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY = "scanproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are read-modify-write.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY = "readmodifywriteproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the the distribution of requests across the keyspace. Options are
* "uniform", "zipfian" and "latest"
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY = "requestdistribution";
/**
* The default distribution of requests across the keyspace.
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for adding zero padding to record numbers in order to match
* string sort order. Controls the number of 0s to left pad with.
*/
public static final String ZERO_PADDING_PROPERTY = "zeropadding";
/**
* The default zero padding value. Matches integer sort order
*/
public static final String ZERO_PADDING_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the min scan length (number of records).
*/
public static final String MIN_SCAN_LENGTH_PROPERTY = "minscanlength";
/**
* The default min scan length.
*/
public static final String MIN_SCAN_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the max scan length (number of records).
*/
public static final String MAX_SCAN_LENGTH_PROPERTY = "maxscanlength";
/**
* The default max scan length.
*/
public static final String MAX_SCAN_LENGTH_PROPERTY_DEFAULT = "1000";
/**
* The name of the property for the scan length distribution. Options are "uniform" and "zipfian"
* (favoring short scans)
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY = "scanlengthdistribution";
/**
* The default max scan length.
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for the order to insert records. Options are "ordered" or "hashed"
*/
public static final String INSERT_ORDER_PROPERTY = "insertorder";
/**
* Default insert order.
*/
public static final String INSERT_ORDER_PROPERTY_DEFAULT = "hashed";
/**
* Percentage data items that constitute the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION = "hotspotdatafraction";
/**
* Default value of the size of the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION_DEFAULT = "0.2";
/**
* Percentage operations that access the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION = "hotspotopnfraction";
/**
* Default value of the percentage operations accessing the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION_DEFAULT = "0.8";
/**
* How many times to retry when insertion of a single item to a DB fails.
*/
public static final String INSERTION_RETRY_LIMIT = "core_workload_insertion_retry_limit";
public static final String INSERTION_RETRY_LIMIT_DEFAULT = "0";
/**
* On average, how long to wait between the retries, in seconds.
*/
public static final String INSERTION_RETRY_INTERVAL = "core_workload_insertion_retry_interval";
public static final String INSERTION_RETRY_INTERVAL_DEFAULT = "3";
/**
* Field name prefix.
*/
public static final String FIELD_NAME_PREFIX = "fieldnameprefix";
/**
* Default value of the field name prefix.
*/
public static final String FIELD_NAME_PREFIX_DEFAULT = "field";
protected NumberGenerator keysequence;
protected DiscreteGenerator operationchooser;
protected NumberGenerator keychooser;
protected NumberGenerator fieldchooser;
protected AcknowledgedCounterGenerator transactioninsertkeysequence;
protected NumberGenerator scanlength;
protected boolean orderedinserts;
protected long fieldcount;
protected long recordcount;
protected int zeropadding;
protected int insertionRetryLimit;
protected int insertionRetryInterval;
private Measurements measurements = Measurements.getMeasurements();
public static String buildKeyName(long keynum, int zeropadding, boolean orderedinserts) {
if (!orderedinserts) {
keynum = Utils.hash(keynum);
}
String value = Long.toString(keynum);
int fill = zeropadding - value.length();
String prekey = "user";
for (int i = 0; i < fill; i++) {
prekey += '0';
}
return prekey + value;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
NumberGenerator fieldlengthgenerator;
String fieldlengthdistribution = p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY, FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
int fieldlength =
Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
int minfieldlength =
Integer.parseInt(p.getProperty(MIN_FIELD_LENGTH_PROPERTY, MIN_FIELD_LENGTH_PROPERTY_DEFAULT));
String fieldlengthhistogram = p.getProperty(
FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY, FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT);
if (fieldlengthdistribution.compareTo("constant") == 0) {
fieldlengthgenerator = new ConstantIntegerGenerator(fieldlength);
} else if (fieldlengthdistribution.compareTo("uniform") == 0) {
fieldlengthgenerator = new UniformLongGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("zipfian") == 0) {
fieldlengthgenerator = new ZipfianGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("histogram") == 0) {
try {
fieldlengthgenerator = new HistogramGenerator(fieldlengthhistogram);
} catch (IOException e) {
throw new WorkloadException(
"Couldn't read field length histogram file: " + fieldlengthhistogram, e);
}
} else {
throw new WorkloadException(
"Unknown field length distribution \"" + fieldlengthdistribution + "\"");
}
return fieldlengthgenerator;
}
/**
* Initialize the scenario.
* Called once, in the main client thread, before any operations are started.
*/
@Override
public void init(Properties p) throws WorkloadException {
table = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
fieldcount =
Long.parseLong(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT));
final String fieldnameprefix = p.getProperty(FIELD_NAME_PREFIX, FIELD_NAME_PREFIX_DEFAULT);
fieldnames = new ArrayList<>();
for (int i = 0; i < fieldcount; i++) {
fieldnames.add(fieldnameprefix + i);
}
fieldlengthgenerator = CoreWorkload.getFieldLengthGenerator(p);
recordcount =
Long.parseLong(p.getProperty(Client.RECORD_COUNT_PROPERTY, Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
String requestdistrib =
p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
int minscanlength =
Integer.parseInt(p.getProperty(MIN_SCAN_LENGTH_PROPERTY, MIN_SCAN_LENGTH_PROPERTY_DEFAULT));
int maxscanlength =
Integer.parseInt(p.getProperty(MAX_SCAN_LENGTH_PROPERTY, MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(SCAN_LENGTH_DISTRIBUTION_PROPERTY, SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
long insertstart =
Long.parseLong(p.getProperty(INSERT_START_PROPERTY, INSERT_START_PROPERTY_DEFAULT));
long insertcount=
Integer.parseInt(p.getProperty(INSERT_COUNT_PROPERTY, String.valueOf(recordcount - insertstart)));
// Confirm valid values for insertstart and insertcount in relation to recordcount
if (recordcount < (insertstart + insertcount)) {
System.err.println("Invalid combination of insertstart, insertcount and recordcount.");
System.err.println("recordcount must be bigger than insertstart + insertcount.");
System.exit(-1);
}
zeropadding =
Integer.parseInt(p.getProperty(ZERO_PADDING_PROPERTY, ZERO_PADDING_PROPERTY_DEFAULT));
readallfields = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_PROPERTY, READ_ALL_FIELDS_PROPERTY_DEFAULT));
readallfieldsbyname = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_BY_NAME_PROPERTY, READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT));
writeallfields = Boolean.parseBoolean(
p.getProperty(WRITE_ALL_FIELDS_PROPERTY, WRITE_ALL_FIELDS_PROPERTY_DEFAULT));
dataintegrity = Boolean.parseBoolean(
p.getProperty(DATA_INTEGRITY_PROPERTY, DATA_INTEGRITY_PROPERTY_DEFAULT));
// Confirm that fieldlengthgenerator returns a constant if data
// integrity check requested.
if (dataintegrity && !(p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT)).equals("constant")) {
System.err.println("Must have constant field size to check data integrity.");
System.exit(-1);
}
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
if (p.getProperty(INSERT_ORDER_PROPERTY, INSERT_ORDER_PROPERTY_DEFAULT).compareTo("hashed") == 0) {
orderedinserts = false;
} else {
orderedinserts = true;
}
keysequence = new CounterGenerator(insertstart);
operationchooser = createOperationGenerator(p);
transactioninsertkeysequence = new AcknowledgedCounterGenerator(recordcount);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("exponential") == 0) {
double percentile = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordcount * frac);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
// it does this by generating a random "next key" in part by taking the modulus over the
// number of keys.
// If the number of keys changes, this would shift the modulus, and we don't want that to
// change which keys are popular so we'll actually construct the scrambled zipfian generator
// with a keyspace that is larger than exists at the beginning of the test. that is, we'll predict
// the number of inserts, and tell the scrambled zipfian generator the number of existing keys
// plus the number of predicted keys as the total keyspace. then, if the generator picks a key
// that hasn't been inserted yet, will just ignore it and pick another key. this way, the size of
// the keyspace doesn't change from the perspective of the scrambled zipfian generator
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
int opcount = Integer.parseInt(p.getProperty(Client.OPERATION_COUNT_PROPERTY));
int expectednewkeys = (int) ((opcount) * insertproportion * 2.0); // 2 is fudge factor
keychooser = new ScrambledZipfianGenerator(insertstart, insertstart + insertcount + expectednewkeys);
} else if (requestdistrib.compareTo("latest") == 0) {
keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(insertstart, insertstart + insertcount - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
fieldchooser = new UniformLongGenerator(0, fieldcount - 1);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(minscanlength, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(minscanlength, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
insertionRetryLimit = Integer.parseInt(p.getProperty(
INSERTION_RETRY_LIMIT, INSERTION_RETRY_LIMIT_DEFAULT));
insertionRetryInterval = Integer.parseInt(p.getProperty(
INSERTION_RETRY_INTERVAL, INSERTION_RETRY_INTERVAL_DEFAULT));
}
/**
* Builds a value for a randomly chosen field.
*/
private HashMap<String, ByteIterator> buildSingleValue(String key) {
HashMap<String, ByteIterator> value = new HashMap<>();
String fieldkey = fieldnames.get(fieldchooser.nextValue().intValue());
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
value.put(fieldkey, data);
return value;
}
/**
* Builds values for all fields.
*/
private HashMap<String, ByteIterator> buildValues(String key) {<FILL_FUNCTION_BODY>}
/**
* Build a deterministic value given the key information.
*/
private String buildDeterministicValue(String key, String fieldkey) {
int size = fieldlengthgenerator.nextValue().intValue();
StringBuilder sb = new StringBuilder(size);
sb.append(key);
sb.append(':');
sb.append(fieldkey);
while (sb.length() < size) {
sb.append(':');
sb.append(sb.toString().hashCode());
}
sb.setLength(size);
return sb.toString();
}
/**
* Do one insert operation. Because it will be called concurrently from multiple client threads,
* this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
int keynum = keysequence.nextValue().intValue();
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
Status status;
int numOfRetries = 0;
do {
status = db.insert(table, dbkey, values);
if (null != status && status.isOk()) {
break;
}
// Retry if configured. Without retrying, the load process will fail
// even if one single insertion fails. User can optionally configure
// an insertion retry limit (default is 0) to enable retry.
if (++numOfRetries <= insertionRetryLimit) {
System.err.println("Retrying insertion, retry count: " + numOfRetries);
try {
// Sleep for a random number between [0.8, 1.2)*insertionRetryInterval.
int sleepTime = (int) (1000 * insertionRetryInterval * (0.8 + 0.4 * Math.random()));
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
break;
}
} else {
System.err.println("Error inserting, not retrying any more. number of attempts: " + numOfRetries +
"Insertion Retry Limit: " + insertionRetryLimit);
break;
}
} while (true);
return null != status && status.isOk();
}
/**
* Do one transaction operation. Because it will be called concurrently from multiple client
* threads, this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if(operation == null) {
return false;
}
switch (operation) {
case "READ":
doTransactionRead(db);
break;
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "SCAN":
doTransactionScan(db);
break;
default:
doTransactionReadModifyWrite(db);
}
return true;
}
/**
* Results are reported in the first three buckets of the histogram under
* the label "VERIFY".
* Bucket 0 means the expected data was returned.
* Bucket 1 means incorrect data was returned.
* Bucket 2 means null data was returned when some data was expected.
*/
protected void verifyRow(String key, HashMap<String, ByteIterator> cells) {
Status verifyStatus = Status.OK;
long startTime = System.nanoTime();
if (!cells.isEmpty()) {
for (Map.Entry<String, ByteIterator> entry : cells.entrySet()) {
if (!entry.getValue().toString().equals(buildDeterministicValue(key, entry.getKey()))) {
verifyStatus = Status.UNEXPECTED_STATE;
break;
}
}
} else {
// This assumes that null data is never valid
verifyStatus = Status.ERROR;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
}
long nextKeynum() {
long keynum;
if (keychooser instanceof ExponentialGenerator) {
do {
keynum = transactioninsertkeysequence.lastValue() - keychooser.nextValue().intValue();
} while (keynum < 0);
} else {
do {
keynum = keychooser.nextValue().intValue();
} while (keynum > transactioninsertkeysequence.lastValue());
}
return keynum;
}
public void doTransactionRead(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
} else if (dataintegrity || readallfieldsbyname) {
// pass the full field list if dataintegrity is on for verification
fields = new HashSet<String>(fieldnames);
}
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
db.read(table, keyname, fields, cells);
if (dataintegrity) {
verifyRow(keyname, cells);
}
}
public void doTransactionReadModifyWrite(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
// do the transaction
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
db.read(table, keyname, fields, cells);
db.update(table, keyname, values);
long en = System.nanoTime();
if (dataintegrity) {
verifyRow(keyname, cells);
}
measurements.measure("READ-MODIFY-WRITE", (int) ((en - st) / 1000));
measurements.measureIntended("READ-MODIFY-WRITE", (int) ((en - ist) / 1000));
}
public void doTransactionScan(DB db) {
// choose a random key
long keynum = nextKeynum();
String startkeyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
// choose a random scan length
int len = scanlength.nextValue().intValue();
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
db.scan(table, startkeyname, len, fields, new Vector<HashMap<String, ByteIterator>>());
}
public void doTransactionUpdate(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
db.update(table, keyname, values);
}
public void doTransactionInsert(DB db) {
// choose the next key
long keynum = transactioninsertkeysequence.nextValue();
try {
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
db.insert(table, dbkey, values);
} finally {
transactioninsertkeysequence.acknowledge(keynum);
}
}
/**
* Creates a weighted discrete values with database operations for a workload to perform.
* Weights/proportions are read from the properties list and defaults are used
* when values are not configured.
* Current operations are "READ", "UPDATE", "INSERT", "SCAN" and "READMODIFYWRITE".
*
* @param p The properties list to pull weights from.
* @return A generator that can be used to determine the next operation to perform.
* @throws IllegalArgumentException if the properties object was null.
*/
protected static DiscreteGenerator createOperationGenerator(final Properties p) {
if (p == null) {
throw new IllegalArgumentException("Properties object cannot be null");
}
final double readproportion = Double.parseDouble(
p.getProperty(READ_PROPORTION_PROPERTY, READ_PROPORTION_PROPERTY_DEFAULT));
final double updateproportion = Double.parseDouble(
p.getProperty(UPDATE_PROPORTION_PROPERTY, UPDATE_PROPORTION_PROPERTY_DEFAULT));
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
final double scanproportion = Double.parseDouble(
p.getProperty(SCAN_PROPORTION_PROPERTY, SCAN_PROPORTION_PROPERTY_DEFAULT));
final double readmodifywriteproportion = Double.parseDouble(p.getProperty(
READMODIFYWRITE_PROPORTION_PROPERTY, READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT));
final DiscreteGenerator operationchooser = new DiscreteGenerator();
if (readproportion > 0) {
operationchooser.addValue(readproportion, "READ");
}
if (updateproportion > 0) {
operationchooser.addValue(updateproportion, "UPDATE");
}
if (insertproportion > 0) {
operationchooser.addValue(insertproportion, "INSERT");
}
if (scanproportion > 0) {
operationchooser.addValue(scanproportion, "SCAN");
}
if (readmodifywriteproportion > 0) {
operationchooser.addValue(readmodifywriteproportion, "READMODIFYWRITE");
}
return operationchooser;
}
} |
HashMap<String, ByteIterator> values = new HashMap<>();
for (String fieldkey : fieldnames) {
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
values.put(fieldkey, data);
}
return values;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/CoreWorkload.java | CoreWorkload | buildDeterministicValue | class CoreWorkload extends Workload {
/**
* The name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY = "table";
/**
* The default name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY_DEFAULT = "usertable";
protected String table;
/**
* The name of the property for the number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY = "fieldcount";
/**
* Default number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY_DEFAULT = "10";
private List<String> fieldnames;
/**
* The name of the property for the field length distribution. Options are "uniform", "zipfian"
* (favouring short records), "constant", and "histogram".
* <p>
* If "uniform", "zipfian" or "constant", the maximum field length will be that specified by the
* fieldlength property. If "histogram", then the histogram will be read from the filename
* specified in the "fieldlengthhistogram" property.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY = "fieldlengthdistribution";
/**
* The default field length distribution.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "constant";
/**
* The name of the property for the length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY = "fieldlength";
/**
* The default maximum length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY_DEFAULT = "100";
/**
* The name of the property for the minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY = "minfieldlength";
/**
* The default minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of a property that specifies the filename containing the field length histogram (only
* used if fieldlengthdistribution is "histogram").
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY = "fieldlengthhistogram";
/**
* The default filename containing a field length histogram.
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT = "hist.txt";
/**
* Generator object that produces field lengths. The value of this depends on the properties that
* start with "FIELD_LENGTH_".
*/
protected NumberGenerator fieldlengthgenerator;
/**
* The name of the property for deciding whether to read one field (false) or all fields (true) of
* a record.
*/
public static final String READ_ALL_FIELDS_PROPERTY = "readallfields";
/**
* The default value for the readallfields property.
*/
public static final String READ_ALL_FIELDS_PROPERTY_DEFAULT = "true";
protected boolean readallfields;
/**
* The name of the property for determining how to read all the fields when readallfields is true.
* If set to true, all the field names will be passed into the underlying client. If set to false,
* null will be passed into the underlying client. When passed a null, some clients may retrieve
* the entire row with a wildcard, which may be slower than naming all the fields.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY = "readallfieldsbyname";
/**
* The default value for the readallfieldsbyname property.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT = "false";
protected boolean readallfieldsbyname;
/**
* The name of the property for deciding whether to write one field (false) or all fields (true)
* of a record.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY = "writeallfields";
/**
* The default value for the writeallfields property.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY_DEFAULT = "false";
protected boolean writeallfields;
/**
* The name of the property for deciding whether to check all returned
* data against the formation template to ensure data integrity.
*/
public static final String DATA_INTEGRITY_PROPERTY = "dataintegrity";
/**
* The default value for the dataintegrity property.
*/
public static final String DATA_INTEGRITY_PROPERTY_DEFAULT = "false";
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
private boolean dataintegrity;
/**
* The name of the property for the proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY = "readproportion";
/**
* The default proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY_DEFAULT = "0.95";
/**
* The name of the property for the proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY = "updateproportion";
/**
* The default proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY_DEFAULT = "0.05";
/**
* The name of the property for the proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY = "insertproportion";
/**
* The default proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY = "scanproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are read-modify-write.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY = "readmodifywriteproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the the distribution of requests across the keyspace. Options are
* "uniform", "zipfian" and "latest"
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY = "requestdistribution";
/**
* The default distribution of requests across the keyspace.
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for adding zero padding to record numbers in order to match
* string sort order. Controls the number of 0s to left pad with.
*/
public static final String ZERO_PADDING_PROPERTY = "zeropadding";
/**
* The default zero padding value. Matches integer sort order
*/
public static final String ZERO_PADDING_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the min scan length (number of records).
*/
public static final String MIN_SCAN_LENGTH_PROPERTY = "minscanlength";
/**
* The default min scan length.
*/
public static final String MIN_SCAN_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the max scan length (number of records).
*/
public static final String MAX_SCAN_LENGTH_PROPERTY = "maxscanlength";
/**
* The default max scan length.
*/
public static final String MAX_SCAN_LENGTH_PROPERTY_DEFAULT = "1000";
/**
* The name of the property for the scan length distribution. Options are "uniform" and "zipfian"
* (favoring short scans)
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY = "scanlengthdistribution";
/**
* The default max scan length.
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for the order to insert records. Options are "ordered" or "hashed"
*/
public static final String INSERT_ORDER_PROPERTY = "insertorder";
/**
* Default insert order.
*/
public static final String INSERT_ORDER_PROPERTY_DEFAULT = "hashed";
/**
* Percentage data items that constitute the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION = "hotspotdatafraction";
/**
* Default value of the size of the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION_DEFAULT = "0.2";
/**
* Percentage operations that access the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION = "hotspotopnfraction";
/**
* Default value of the percentage operations accessing the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION_DEFAULT = "0.8";
/**
* How many times to retry when insertion of a single item to a DB fails.
*/
public static final String INSERTION_RETRY_LIMIT = "core_workload_insertion_retry_limit";
public static final String INSERTION_RETRY_LIMIT_DEFAULT = "0";
/**
* On average, how long to wait between the retries, in seconds.
*/
public static final String INSERTION_RETRY_INTERVAL = "core_workload_insertion_retry_interval";
public static final String INSERTION_RETRY_INTERVAL_DEFAULT = "3";
/**
* Field name prefix.
*/
public static final String FIELD_NAME_PREFIX = "fieldnameprefix";
/**
* Default value of the field name prefix.
*/
public static final String FIELD_NAME_PREFIX_DEFAULT = "field";
protected NumberGenerator keysequence;
protected DiscreteGenerator operationchooser;
protected NumberGenerator keychooser;
protected NumberGenerator fieldchooser;
protected AcknowledgedCounterGenerator transactioninsertkeysequence;
protected NumberGenerator scanlength;
protected boolean orderedinserts;
protected long fieldcount;
protected long recordcount;
protected int zeropadding;
protected int insertionRetryLimit;
protected int insertionRetryInterval;
private Measurements measurements = Measurements.getMeasurements();
public static String buildKeyName(long keynum, int zeropadding, boolean orderedinserts) {
if (!orderedinserts) {
keynum = Utils.hash(keynum);
}
String value = Long.toString(keynum);
int fill = zeropadding - value.length();
String prekey = "user";
for (int i = 0; i < fill; i++) {
prekey += '0';
}
return prekey + value;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
NumberGenerator fieldlengthgenerator;
String fieldlengthdistribution = p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY, FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
int fieldlength =
Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
int minfieldlength =
Integer.parseInt(p.getProperty(MIN_FIELD_LENGTH_PROPERTY, MIN_FIELD_LENGTH_PROPERTY_DEFAULT));
String fieldlengthhistogram = p.getProperty(
FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY, FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT);
if (fieldlengthdistribution.compareTo("constant") == 0) {
fieldlengthgenerator = new ConstantIntegerGenerator(fieldlength);
} else if (fieldlengthdistribution.compareTo("uniform") == 0) {
fieldlengthgenerator = new UniformLongGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("zipfian") == 0) {
fieldlengthgenerator = new ZipfianGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("histogram") == 0) {
try {
fieldlengthgenerator = new HistogramGenerator(fieldlengthhistogram);
} catch (IOException e) {
throw new WorkloadException(
"Couldn't read field length histogram file: " + fieldlengthhistogram, e);
}
} else {
throw new WorkloadException(
"Unknown field length distribution \"" + fieldlengthdistribution + "\"");
}
return fieldlengthgenerator;
}
/**
* Initialize the scenario.
* Called once, in the main client thread, before any operations are started.
*/
@Override
public void init(Properties p) throws WorkloadException {
table = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
fieldcount =
Long.parseLong(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT));
final String fieldnameprefix = p.getProperty(FIELD_NAME_PREFIX, FIELD_NAME_PREFIX_DEFAULT);
fieldnames = new ArrayList<>();
for (int i = 0; i < fieldcount; i++) {
fieldnames.add(fieldnameprefix + i);
}
fieldlengthgenerator = CoreWorkload.getFieldLengthGenerator(p);
recordcount =
Long.parseLong(p.getProperty(Client.RECORD_COUNT_PROPERTY, Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
String requestdistrib =
p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
int minscanlength =
Integer.parseInt(p.getProperty(MIN_SCAN_LENGTH_PROPERTY, MIN_SCAN_LENGTH_PROPERTY_DEFAULT));
int maxscanlength =
Integer.parseInt(p.getProperty(MAX_SCAN_LENGTH_PROPERTY, MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(SCAN_LENGTH_DISTRIBUTION_PROPERTY, SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
long insertstart =
Long.parseLong(p.getProperty(INSERT_START_PROPERTY, INSERT_START_PROPERTY_DEFAULT));
long insertcount=
Integer.parseInt(p.getProperty(INSERT_COUNT_PROPERTY, String.valueOf(recordcount - insertstart)));
// Confirm valid values for insertstart and insertcount in relation to recordcount
if (recordcount < (insertstart + insertcount)) {
System.err.println("Invalid combination of insertstart, insertcount and recordcount.");
System.err.println("recordcount must be bigger than insertstart + insertcount.");
System.exit(-1);
}
zeropadding =
Integer.parseInt(p.getProperty(ZERO_PADDING_PROPERTY, ZERO_PADDING_PROPERTY_DEFAULT));
readallfields = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_PROPERTY, READ_ALL_FIELDS_PROPERTY_DEFAULT));
readallfieldsbyname = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_BY_NAME_PROPERTY, READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT));
writeallfields = Boolean.parseBoolean(
p.getProperty(WRITE_ALL_FIELDS_PROPERTY, WRITE_ALL_FIELDS_PROPERTY_DEFAULT));
dataintegrity = Boolean.parseBoolean(
p.getProperty(DATA_INTEGRITY_PROPERTY, DATA_INTEGRITY_PROPERTY_DEFAULT));
// Confirm that fieldlengthgenerator returns a constant if data
// integrity check requested.
if (dataintegrity && !(p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT)).equals("constant")) {
System.err.println("Must have constant field size to check data integrity.");
System.exit(-1);
}
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
if (p.getProperty(INSERT_ORDER_PROPERTY, INSERT_ORDER_PROPERTY_DEFAULT).compareTo("hashed") == 0) {
orderedinserts = false;
} else {
orderedinserts = true;
}
keysequence = new CounterGenerator(insertstart);
operationchooser = createOperationGenerator(p);
transactioninsertkeysequence = new AcknowledgedCounterGenerator(recordcount);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("exponential") == 0) {
double percentile = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordcount * frac);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
// it does this by generating a random "next key" in part by taking the modulus over the
// number of keys.
// If the number of keys changes, this would shift the modulus, and we don't want that to
// change which keys are popular so we'll actually construct the scrambled zipfian generator
// with a keyspace that is larger than exists at the beginning of the test. that is, we'll predict
// the number of inserts, and tell the scrambled zipfian generator the number of existing keys
// plus the number of predicted keys as the total keyspace. then, if the generator picks a key
// that hasn't been inserted yet, will just ignore it and pick another key. this way, the size of
// the keyspace doesn't change from the perspective of the scrambled zipfian generator
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
int opcount = Integer.parseInt(p.getProperty(Client.OPERATION_COUNT_PROPERTY));
int expectednewkeys = (int) ((opcount) * insertproportion * 2.0); // 2 is fudge factor
keychooser = new ScrambledZipfianGenerator(insertstart, insertstart + insertcount + expectednewkeys);
} else if (requestdistrib.compareTo("latest") == 0) {
keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(insertstart, insertstart + insertcount - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
fieldchooser = new UniformLongGenerator(0, fieldcount - 1);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(minscanlength, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(minscanlength, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
insertionRetryLimit = Integer.parseInt(p.getProperty(
INSERTION_RETRY_LIMIT, INSERTION_RETRY_LIMIT_DEFAULT));
insertionRetryInterval = Integer.parseInt(p.getProperty(
INSERTION_RETRY_INTERVAL, INSERTION_RETRY_INTERVAL_DEFAULT));
}
/**
* Builds a value for a randomly chosen field.
*/
private HashMap<String, ByteIterator> buildSingleValue(String key) {
HashMap<String, ByteIterator> value = new HashMap<>();
String fieldkey = fieldnames.get(fieldchooser.nextValue().intValue());
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
value.put(fieldkey, data);
return value;
}
/**
* Builds values for all fields.
*/
private HashMap<String, ByteIterator> buildValues(String key) {
HashMap<String, ByteIterator> values = new HashMap<>();
for (String fieldkey : fieldnames) {
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
values.put(fieldkey, data);
}
return values;
}
/**
* Build a deterministic value given the key information.
*/
private String buildDeterministicValue(String key, String fieldkey) {<FILL_FUNCTION_BODY>}
/**
* Do one insert operation. Because it will be called concurrently from multiple client threads,
* this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
int keynum = keysequence.nextValue().intValue();
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
Status status;
int numOfRetries = 0;
do {
status = db.insert(table, dbkey, values);
if (null != status && status.isOk()) {
break;
}
// Retry if configured. Without retrying, the load process will fail
// even if one single insertion fails. User can optionally configure
// an insertion retry limit (default is 0) to enable retry.
if (++numOfRetries <= insertionRetryLimit) {
System.err.println("Retrying insertion, retry count: " + numOfRetries);
try {
// Sleep for a random number between [0.8, 1.2)*insertionRetryInterval.
int sleepTime = (int) (1000 * insertionRetryInterval * (0.8 + 0.4 * Math.random()));
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
break;
}
} else {
System.err.println("Error inserting, not retrying any more. number of attempts: " + numOfRetries +
"Insertion Retry Limit: " + insertionRetryLimit);
break;
}
} while (true);
return null != status && status.isOk();
}
/**
* Do one transaction operation. Because it will be called concurrently from multiple client
* threads, this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if(operation == null) {
return false;
}
switch (operation) {
case "READ":
doTransactionRead(db);
break;
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "SCAN":
doTransactionScan(db);
break;
default:
doTransactionReadModifyWrite(db);
}
return true;
}
/**
* Results are reported in the first three buckets of the histogram under
* the label "VERIFY".
* Bucket 0 means the expected data was returned.
* Bucket 1 means incorrect data was returned.
* Bucket 2 means null data was returned when some data was expected.
*/
protected void verifyRow(String key, HashMap<String, ByteIterator> cells) {
Status verifyStatus = Status.OK;
long startTime = System.nanoTime();
if (!cells.isEmpty()) {
for (Map.Entry<String, ByteIterator> entry : cells.entrySet()) {
if (!entry.getValue().toString().equals(buildDeterministicValue(key, entry.getKey()))) {
verifyStatus = Status.UNEXPECTED_STATE;
break;
}
}
} else {
// This assumes that null data is never valid
verifyStatus = Status.ERROR;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
}
long nextKeynum() {
long keynum;
if (keychooser instanceof ExponentialGenerator) {
do {
keynum = transactioninsertkeysequence.lastValue() - keychooser.nextValue().intValue();
} while (keynum < 0);
} else {
do {
keynum = keychooser.nextValue().intValue();
} while (keynum > transactioninsertkeysequence.lastValue());
}
return keynum;
}
public void doTransactionRead(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
} else if (dataintegrity || readallfieldsbyname) {
// pass the full field list if dataintegrity is on for verification
fields = new HashSet<String>(fieldnames);
}
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
db.read(table, keyname, fields, cells);
if (dataintegrity) {
verifyRow(keyname, cells);
}
}
public void doTransactionReadModifyWrite(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
// do the transaction
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
db.read(table, keyname, fields, cells);
db.update(table, keyname, values);
long en = System.nanoTime();
if (dataintegrity) {
verifyRow(keyname, cells);
}
measurements.measure("READ-MODIFY-WRITE", (int) ((en - st) / 1000));
measurements.measureIntended("READ-MODIFY-WRITE", (int) ((en - ist) / 1000));
}
public void doTransactionScan(DB db) {
// choose a random key
long keynum = nextKeynum();
String startkeyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
// choose a random scan length
int len = scanlength.nextValue().intValue();
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
db.scan(table, startkeyname, len, fields, new Vector<HashMap<String, ByteIterator>>());
}
public void doTransactionUpdate(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
db.update(table, keyname, values);
}
public void doTransactionInsert(DB db) {
// choose the next key
long keynum = transactioninsertkeysequence.nextValue();
try {
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
db.insert(table, dbkey, values);
} finally {
transactioninsertkeysequence.acknowledge(keynum);
}
}
/**
* Creates a weighted discrete values with database operations for a workload to perform.
* Weights/proportions are read from the properties list and defaults are used
* when values are not configured.
* Current operations are "READ", "UPDATE", "INSERT", "SCAN" and "READMODIFYWRITE".
*
* @param p The properties list to pull weights from.
* @return A generator that can be used to determine the next operation to perform.
* @throws IllegalArgumentException if the properties object was null.
*/
protected static DiscreteGenerator createOperationGenerator(final Properties p) {
if (p == null) {
throw new IllegalArgumentException("Properties object cannot be null");
}
final double readproportion = Double.parseDouble(
p.getProperty(READ_PROPORTION_PROPERTY, READ_PROPORTION_PROPERTY_DEFAULT));
final double updateproportion = Double.parseDouble(
p.getProperty(UPDATE_PROPORTION_PROPERTY, UPDATE_PROPORTION_PROPERTY_DEFAULT));
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
final double scanproportion = Double.parseDouble(
p.getProperty(SCAN_PROPORTION_PROPERTY, SCAN_PROPORTION_PROPERTY_DEFAULT));
final double readmodifywriteproportion = Double.parseDouble(p.getProperty(
READMODIFYWRITE_PROPORTION_PROPERTY, READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT));
final DiscreteGenerator operationchooser = new DiscreteGenerator();
if (readproportion > 0) {
operationchooser.addValue(readproportion, "READ");
}
if (updateproportion > 0) {
operationchooser.addValue(updateproportion, "UPDATE");
}
if (insertproportion > 0) {
operationchooser.addValue(insertproportion, "INSERT");
}
if (scanproportion > 0) {
operationchooser.addValue(scanproportion, "SCAN");
}
if (readmodifywriteproportion > 0) {
operationchooser.addValue(readmodifywriteproportion, "READMODIFYWRITE");
}
return operationchooser;
}
} |
int size = fieldlengthgenerator.nextValue().intValue();
StringBuilder sb = new StringBuilder(size);
sb.append(key);
sb.append(':');
sb.append(fieldkey);
while (sb.length() < size) {
sb.append(':');
sb.append(sb.toString().hashCode());
}
sb.setLength(size);
return sb.toString();
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/CoreWorkload.java | CoreWorkload | doInsert | class CoreWorkload extends Workload {
/**
* The name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY = "table";
/**
* The default name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY_DEFAULT = "usertable";
protected String table;
/**
* The name of the property for the number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY = "fieldcount";
/**
* Default number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY_DEFAULT = "10";
private List<String> fieldnames;
/**
* The name of the property for the field length distribution. Options are "uniform", "zipfian"
* (favouring short records), "constant", and "histogram".
* <p>
* If "uniform", "zipfian" or "constant", the maximum field length will be that specified by the
* fieldlength property. If "histogram", then the histogram will be read from the filename
* specified in the "fieldlengthhistogram" property.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY = "fieldlengthdistribution";
/**
* The default field length distribution.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "constant";
/**
* The name of the property for the length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY = "fieldlength";
/**
* The default maximum length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY_DEFAULT = "100";
/**
* The name of the property for the minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY = "minfieldlength";
/**
* The default minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of a property that specifies the filename containing the field length histogram (only
* used if fieldlengthdistribution is "histogram").
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY = "fieldlengthhistogram";
/**
* The default filename containing a field length histogram.
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT = "hist.txt";
/**
* Generator object that produces field lengths. The value of this depends on the properties that
* start with "FIELD_LENGTH_".
*/
protected NumberGenerator fieldlengthgenerator;
/**
* The name of the property for deciding whether to read one field (false) or all fields (true) of
* a record.
*/
public static final String READ_ALL_FIELDS_PROPERTY = "readallfields";
/**
* The default value for the readallfields property.
*/
public static final String READ_ALL_FIELDS_PROPERTY_DEFAULT = "true";
protected boolean readallfields;
/**
* The name of the property for determining how to read all the fields when readallfields is true.
* If set to true, all the field names will be passed into the underlying client. If set to false,
* null will be passed into the underlying client. When passed a null, some clients may retrieve
* the entire row with a wildcard, which may be slower than naming all the fields.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY = "readallfieldsbyname";
/**
* The default value for the readallfieldsbyname property.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT = "false";
protected boolean readallfieldsbyname;
/**
* The name of the property for deciding whether to write one field (false) or all fields (true)
* of a record.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY = "writeallfields";
/**
* The default value for the writeallfields property.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY_DEFAULT = "false";
protected boolean writeallfields;
/**
* The name of the property for deciding whether to check all returned
* data against the formation template to ensure data integrity.
*/
public static final String DATA_INTEGRITY_PROPERTY = "dataintegrity";
/**
* The default value for the dataintegrity property.
*/
public static final String DATA_INTEGRITY_PROPERTY_DEFAULT = "false";
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
private boolean dataintegrity;
/**
* The name of the property for the proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY = "readproportion";
/**
* The default proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY_DEFAULT = "0.95";
/**
* The name of the property for the proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY = "updateproportion";
/**
* The default proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY_DEFAULT = "0.05";
/**
* The name of the property for the proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY = "insertproportion";
/**
* The default proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY = "scanproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are read-modify-write.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY = "readmodifywriteproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the the distribution of requests across the keyspace. Options are
* "uniform", "zipfian" and "latest"
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY = "requestdistribution";
/**
* The default distribution of requests across the keyspace.
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for adding zero padding to record numbers in order to match
* string sort order. Controls the number of 0s to left pad with.
*/
public static final String ZERO_PADDING_PROPERTY = "zeropadding";
/**
* The default zero padding value. Matches integer sort order
*/
public static final String ZERO_PADDING_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the min scan length (number of records).
*/
public static final String MIN_SCAN_LENGTH_PROPERTY = "minscanlength";
/**
* The default min scan length.
*/
public static final String MIN_SCAN_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the max scan length (number of records).
*/
public static final String MAX_SCAN_LENGTH_PROPERTY = "maxscanlength";
/**
* The default max scan length.
*/
public static final String MAX_SCAN_LENGTH_PROPERTY_DEFAULT = "1000";
/**
* The name of the property for the scan length distribution. Options are "uniform" and "zipfian"
* (favoring short scans)
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY = "scanlengthdistribution";
/**
* The default max scan length.
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for the order to insert records. Options are "ordered" or "hashed"
*/
public static final String INSERT_ORDER_PROPERTY = "insertorder";
/**
* Default insert order.
*/
public static final String INSERT_ORDER_PROPERTY_DEFAULT = "hashed";
/**
* Percentage data items that constitute the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION = "hotspotdatafraction";
/**
* Default value of the size of the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION_DEFAULT = "0.2";
/**
* Percentage operations that access the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION = "hotspotopnfraction";
/**
* Default value of the percentage operations accessing the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION_DEFAULT = "0.8";
/**
* How many times to retry when insertion of a single item to a DB fails.
*/
public static final String INSERTION_RETRY_LIMIT = "core_workload_insertion_retry_limit";
public static final String INSERTION_RETRY_LIMIT_DEFAULT = "0";
/**
* On average, how long to wait between the retries, in seconds.
*/
public static final String INSERTION_RETRY_INTERVAL = "core_workload_insertion_retry_interval";
public static final String INSERTION_RETRY_INTERVAL_DEFAULT = "3";
/**
* Field name prefix.
*/
public static final String FIELD_NAME_PREFIX = "fieldnameprefix";
/**
* Default value of the field name prefix.
*/
public static final String FIELD_NAME_PREFIX_DEFAULT = "field";
protected NumberGenerator keysequence;
protected DiscreteGenerator operationchooser;
protected NumberGenerator keychooser;
protected NumberGenerator fieldchooser;
protected AcknowledgedCounterGenerator transactioninsertkeysequence;
protected NumberGenerator scanlength;
protected boolean orderedinserts;
protected long fieldcount;
protected long recordcount;
protected int zeropadding;
protected int insertionRetryLimit;
protected int insertionRetryInterval;
private Measurements measurements = Measurements.getMeasurements();
public static String buildKeyName(long keynum, int zeropadding, boolean orderedinserts) {
if (!orderedinserts) {
keynum = Utils.hash(keynum);
}
String value = Long.toString(keynum);
int fill = zeropadding - value.length();
String prekey = "user";
for (int i = 0; i < fill; i++) {
prekey += '0';
}
return prekey + value;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
NumberGenerator fieldlengthgenerator;
String fieldlengthdistribution = p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY, FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
int fieldlength =
Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
int minfieldlength =
Integer.parseInt(p.getProperty(MIN_FIELD_LENGTH_PROPERTY, MIN_FIELD_LENGTH_PROPERTY_DEFAULT));
String fieldlengthhistogram = p.getProperty(
FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY, FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT);
if (fieldlengthdistribution.compareTo("constant") == 0) {
fieldlengthgenerator = new ConstantIntegerGenerator(fieldlength);
} else if (fieldlengthdistribution.compareTo("uniform") == 0) {
fieldlengthgenerator = new UniformLongGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("zipfian") == 0) {
fieldlengthgenerator = new ZipfianGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("histogram") == 0) {
try {
fieldlengthgenerator = new HistogramGenerator(fieldlengthhistogram);
} catch (IOException e) {
throw new WorkloadException(
"Couldn't read field length histogram file: " + fieldlengthhistogram, e);
}
} else {
throw new WorkloadException(
"Unknown field length distribution \"" + fieldlengthdistribution + "\"");
}
return fieldlengthgenerator;
}
/**
* Initialize the scenario.
* Called once, in the main client thread, before any operations are started.
*/
@Override
public void init(Properties p) throws WorkloadException {
table = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
fieldcount =
Long.parseLong(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT));
final String fieldnameprefix = p.getProperty(FIELD_NAME_PREFIX, FIELD_NAME_PREFIX_DEFAULT);
fieldnames = new ArrayList<>();
for (int i = 0; i < fieldcount; i++) {
fieldnames.add(fieldnameprefix + i);
}
fieldlengthgenerator = CoreWorkload.getFieldLengthGenerator(p);
recordcount =
Long.parseLong(p.getProperty(Client.RECORD_COUNT_PROPERTY, Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
String requestdistrib =
p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
int minscanlength =
Integer.parseInt(p.getProperty(MIN_SCAN_LENGTH_PROPERTY, MIN_SCAN_LENGTH_PROPERTY_DEFAULT));
int maxscanlength =
Integer.parseInt(p.getProperty(MAX_SCAN_LENGTH_PROPERTY, MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(SCAN_LENGTH_DISTRIBUTION_PROPERTY, SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
long insertstart =
Long.parseLong(p.getProperty(INSERT_START_PROPERTY, INSERT_START_PROPERTY_DEFAULT));
long insertcount=
Integer.parseInt(p.getProperty(INSERT_COUNT_PROPERTY, String.valueOf(recordcount - insertstart)));
// Confirm valid values for insertstart and insertcount in relation to recordcount
if (recordcount < (insertstart + insertcount)) {
System.err.println("Invalid combination of insertstart, insertcount and recordcount.");
System.err.println("recordcount must be bigger than insertstart + insertcount.");
System.exit(-1);
}
zeropadding =
Integer.parseInt(p.getProperty(ZERO_PADDING_PROPERTY, ZERO_PADDING_PROPERTY_DEFAULT));
readallfields = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_PROPERTY, READ_ALL_FIELDS_PROPERTY_DEFAULT));
readallfieldsbyname = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_BY_NAME_PROPERTY, READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT));
writeallfields = Boolean.parseBoolean(
p.getProperty(WRITE_ALL_FIELDS_PROPERTY, WRITE_ALL_FIELDS_PROPERTY_DEFAULT));
dataintegrity = Boolean.parseBoolean(
p.getProperty(DATA_INTEGRITY_PROPERTY, DATA_INTEGRITY_PROPERTY_DEFAULT));
// Confirm that fieldlengthgenerator returns a constant if data
// integrity check requested.
if (dataintegrity && !(p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT)).equals("constant")) {
System.err.println("Must have constant field size to check data integrity.");
System.exit(-1);
}
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
if (p.getProperty(INSERT_ORDER_PROPERTY, INSERT_ORDER_PROPERTY_DEFAULT).compareTo("hashed") == 0) {
orderedinserts = false;
} else {
orderedinserts = true;
}
keysequence = new CounterGenerator(insertstart);
operationchooser = createOperationGenerator(p);
transactioninsertkeysequence = new AcknowledgedCounterGenerator(recordcount);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("exponential") == 0) {
double percentile = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordcount * frac);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
// it does this by generating a random "next key" in part by taking the modulus over the
// number of keys.
// If the number of keys changes, this would shift the modulus, and we don't want that to
// change which keys are popular so we'll actually construct the scrambled zipfian generator
// with a keyspace that is larger than exists at the beginning of the test. that is, we'll predict
// the number of inserts, and tell the scrambled zipfian generator the number of existing keys
// plus the number of predicted keys as the total keyspace. then, if the generator picks a key
// that hasn't been inserted yet, will just ignore it and pick another key. this way, the size of
// the keyspace doesn't change from the perspective of the scrambled zipfian generator
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
int opcount = Integer.parseInt(p.getProperty(Client.OPERATION_COUNT_PROPERTY));
int expectednewkeys = (int) ((opcount) * insertproportion * 2.0); // 2 is fudge factor
keychooser = new ScrambledZipfianGenerator(insertstart, insertstart + insertcount + expectednewkeys);
} else if (requestdistrib.compareTo("latest") == 0) {
keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(insertstart, insertstart + insertcount - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
fieldchooser = new UniformLongGenerator(0, fieldcount - 1);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(minscanlength, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(minscanlength, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
insertionRetryLimit = Integer.parseInt(p.getProperty(
INSERTION_RETRY_LIMIT, INSERTION_RETRY_LIMIT_DEFAULT));
insertionRetryInterval = Integer.parseInt(p.getProperty(
INSERTION_RETRY_INTERVAL, INSERTION_RETRY_INTERVAL_DEFAULT));
}
/**
* Builds a value for a randomly chosen field.
*/
private HashMap<String, ByteIterator> buildSingleValue(String key) {
HashMap<String, ByteIterator> value = new HashMap<>();
String fieldkey = fieldnames.get(fieldchooser.nextValue().intValue());
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
value.put(fieldkey, data);
return value;
}
/**
* Builds values for all fields.
*/
private HashMap<String, ByteIterator> buildValues(String key) {
HashMap<String, ByteIterator> values = new HashMap<>();
for (String fieldkey : fieldnames) {
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
values.put(fieldkey, data);
}
return values;
}
/**
* Build a deterministic value given the key information.
*/
private String buildDeterministicValue(String key, String fieldkey) {
int size = fieldlengthgenerator.nextValue().intValue();
StringBuilder sb = new StringBuilder(size);
sb.append(key);
sb.append(':');
sb.append(fieldkey);
while (sb.length() < size) {
sb.append(':');
sb.append(sb.toString().hashCode());
}
sb.setLength(size);
return sb.toString();
}
/**
* Do one insert operation. Because it will be called concurrently from multiple client threads,
* this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {<FILL_FUNCTION_BODY>}
/**
* Do one transaction operation. Because it will be called concurrently from multiple client
* threads, this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if(operation == null) {
return false;
}
switch (operation) {
case "READ":
doTransactionRead(db);
break;
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "SCAN":
doTransactionScan(db);
break;
default:
doTransactionReadModifyWrite(db);
}
return true;
}
/**
* Results are reported in the first three buckets of the histogram under
* the label "VERIFY".
* Bucket 0 means the expected data was returned.
* Bucket 1 means incorrect data was returned.
* Bucket 2 means null data was returned when some data was expected.
*/
protected void verifyRow(String key, HashMap<String, ByteIterator> cells) {
Status verifyStatus = Status.OK;
long startTime = System.nanoTime();
if (!cells.isEmpty()) {
for (Map.Entry<String, ByteIterator> entry : cells.entrySet()) {
if (!entry.getValue().toString().equals(buildDeterministicValue(key, entry.getKey()))) {
verifyStatus = Status.UNEXPECTED_STATE;
break;
}
}
} else {
// This assumes that null data is never valid
verifyStatus = Status.ERROR;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
}
long nextKeynum() {
long keynum;
if (keychooser instanceof ExponentialGenerator) {
do {
keynum = transactioninsertkeysequence.lastValue() - keychooser.nextValue().intValue();
} while (keynum < 0);
} else {
do {
keynum = keychooser.nextValue().intValue();
} while (keynum > transactioninsertkeysequence.lastValue());
}
return keynum;
}
public void doTransactionRead(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
} else if (dataintegrity || readallfieldsbyname) {
// pass the full field list if dataintegrity is on for verification
fields = new HashSet<String>(fieldnames);
}
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
db.read(table, keyname, fields, cells);
if (dataintegrity) {
verifyRow(keyname, cells);
}
}
public void doTransactionReadModifyWrite(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
// do the transaction
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
db.read(table, keyname, fields, cells);
db.update(table, keyname, values);
long en = System.nanoTime();
if (dataintegrity) {
verifyRow(keyname, cells);
}
measurements.measure("READ-MODIFY-WRITE", (int) ((en - st) / 1000));
measurements.measureIntended("READ-MODIFY-WRITE", (int) ((en - ist) / 1000));
}
public void doTransactionScan(DB db) {
// choose a random key
long keynum = nextKeynum();
String startkeyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
// choose a random scan length
int len = scanlength.nextValue().intValue();
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
db.scan(table, startkeyname, len, fields, new Vector<HashMap<String, ByteIterator>>());
}
public void doTransactionUpdate(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
db.update(table, keyname, values);
}
public void doTransactionInsert(DB db) {
// choose the next key
long keynum = transactioninsertkeysequence.nextValue();
try {
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
db.insert(table, dbkey, values);
} finally {
transactioninsertkeysequence.acknowledge(keynum);
}
}
/**
* Creates a weighted discrete values with database operations for a workload to perform.
* Weights/proportions are read from the properties list and defaults are used
* when values are not configured.
* Current operations are "READ", "UPDATE", "INSERT", "SCAN" and "READMODIFYWRITE".
*
* @param p The properties list to pull weights from.
* @return A generator that can be used to determine the next operation to perform.
* @throws IllegalArgumentException if the properties object was null.
*/
protected static DiscreteGenerator createOperationGenerator(final Properties p) {
if (p == null) {
throw new IllegalArgumentException("Properties object cannot be null");
}
final double readproportion = Double.parseDouble(
p.getProperty(READ_PROPORTION_PROPERTY, READ_PROPORTION_PROPERTY_DEFAULT));
final double updateproportion = Double.parseDouble(
p.getProperty(UPDATE_PROPORTION_PROPERTY, UPDATE_PROPORTION_PROPERTY_DEFAULT));
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
final double scanproportion = Double.parseDouble(
p.getProperty(SCAN_PROPORTION_PROPERTY, SCAN_PROPORTION_PROPERTY_DEFAULT));
final double readmodifywriteproportion = Double.parseDouble(p.getProperty(
READMODIFYWRITE_PROPORTION_PROPERTY, READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT));
final DiscreteGenerator operationchooser = new DiscreteGenerator();
if (readproportion > 0) {
operationchooser.addValue(readproportion, "READ");
}
if (updateproportion > 0) {
operationchooser.addValue(updateproportion, "UPDATE");
}
if (insertproportion > 0) {
operationchooser.addValue(insertproportion, "INSERT");
}
if (scanproportion > 0) {
operationchooser.addValue(scanproportion, "SCAN");
}
if (readmodifywriteproportion > 0) {
operationchooser.addValue(readmodifywriteproportion, "READMODIFYWRITE");
}
return operationchooser;
}
} |
int keynum = keysequence.nextValue().intValue();
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
Status status;
int numOfRetries = 0;
do {
status = db.insert(table, dbkey, values);
if (null != status && status.isOk()) {
break;
}
// Retry if configured. Without retrying, the load process will fail
// even if one single insertion fails. User can optionally configure
// an insertion retry limit (default is 0) to enable retry.
if (++numOfRetries <= insertionRetryLimit) {
System.err.println("Retrying insertion, retry count: " + numOfRetries);
try {
// Sleep for a random number between [0.8, 1.2)*insertionRetryInterval.
int sleepTime = (int) (1000 * insertionRetryInterval * (0.8 + 0.4 * Math.random()));
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
break;
}
} else {
System.err.println("Error inserting, not retrying any more. number of attempts: " + numOfRetries +
"Insertion Retry Limit: " + insertionRetryLimit);
break;
}
} while (true);
return null != status && status.isOk();
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/CoreWorkload.java | CoreWorkload | doTransaction | class CoreWorkload extends Workload {
/**
* The name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY = "table";
/**
* The default name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY_DEFAULT = "usertable";
protected String table;
/**
* The name of the property for the number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY = "fieldcount";
/**
* Default number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY_DEFAULT = "10";
private List<String> fieldnames;
/**
* The name of the property for the field length distribution. Options are "uniform", "zipfian"
* (favouring short records), "constant", and "histogram".
* <p>
* If "uniform", "zipfian" or "constant", the maximum field length will be that specified by the
* fieldlength property. If "histogram", then the histogram will be read from the filename
* specified in the "fieldlengthhistogram" property.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY = "fieldlengthdistribution";
/**
* The default field length distribution.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "constant";
/**
* The name of the property for the length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY = "fieldlength";
/**
* The default maximum length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY_DEFAULT = "100";
/**
* The name of the property for the minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY = "minfieldlength";
/**
* The default minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of a property that specifies the filename containing the field length histogram (only
* used if fieldlengthdistribution is "histogram").
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY = "fieldlengthhistogram";
/**
* The default filename containing a field length histogram.
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT = "hist.txt";
/**
* Generator object that produces field lengths. The value of this depends on the properties that
* start with "FIELD_LENGTH_".
*/
protected NumberGenerator fieldlengthgenerator;
/**
* The name of the property for deciding whether to read one field (false) or all fields (true) of
* a record.
*/
public static final String READ_ALL_FIELDS_PROPERTY = "readallfields";
/**
* The default value for the readallfields property.
*/
public static final String READ_ALL_FIELDS_PROPERTY_DEFAULT = "true";
protected boolean readallfields;
/**
* The name of the property for determining how to read all the fields when readallfields is true.
* If set to true, all the field names will be passed into the underlying client. If set to false,
* null will be passed into the underlying client. When passed a null, some clients may retrieve
* the entire row with a wildcard, which may be slower than naming all the fields.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY = "readallfieldsbyname";
/**
* The default value for the readallfieldsbyname property.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT = "false";
protected boolean readallfieldsbyname;
/**
* The name of the property for deciding whether to write one field (false) or all fields (true)
* of a record.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY = "writeallfields";
/**
* The default value for the writeallfields property.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY_DEFAULT = "false";
protected boolean writeallfields;
/**
* The name of the property for deciding whether to check all returned
* data against the formation template to ensure data integrity.
*/
public static final String DATA_INTEGRITY_PROPERTY = "dataintegrity";
/**
* The default value for the dataintegrity property.
*/
public static final String DATA_INTEGRITY_PROPERTY_DEFAULT = "false";
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
private boolean dataintegrity;
/**
* The name of the property for the proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY = "readproportion";
/**
* The default proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY_DEFAULT = "0.95";
/**
* The name of the property for the proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY = "updateproportion";
/**
* The default proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY_DEFAULT = "0.05";
/**
* The name of the property for the proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY = "insertproportion";
/**
* The default proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY = "scanproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are read-modify-write.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY = "readmodifywriteproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the the distribution of requests across the keyspace. Options are
* "uniform", "zipfian" and "latest"
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY = "requestdistribution";
/**
* The default distribution of requests across the keyspace.
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for adding zero padding to record numbers in order to match
* string sort order. Controls the number of 0s to left pad with.
*/
public static final String ZERO_PADDING_PROPERTY = "zeropadding";
/**
* The default zero padding value. Matches integer sort order
*/
public static final String ZERO_PADDING_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the min scan length (number of records).
*/
public static final String MIN_SCAN_LENGTH_PROPERTY = "minscanlength";
/**
* The default min scan length.
*/
public static final String MIN_SCAN_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the max scan length (number of records).
*/
public static final String MAX_SCAN_LENGTH_PROPERTY = "maxscanlength";
/**
* The default max scan length.
*/
public static final String MAX_SCAN_LENGTH_PROPERTY_DEFAULT = "1000";
/**
* The name of the property for the scan length distribution. Options are "uniform" and "zipfian"
* (favoring short scans)
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY = "scanlengthdistribution";
/**
* The default max scan length.
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for the order to insert records. Options are "ordered" or "hashed"
*/
public static final String INSERT_ORDER_PROPERTY = "insertorder";
/**
* Default insert order.
*/
public static final String INSERT_ORDER_PROPERTY_DEFAULT = "hashed";
/**
* Percentage data items that constitute the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION = "hotspotdatafraction";
/**
* Default value of the size of the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION_DEFAULT = "0.2";
/**
* Percentage operations that access the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION = "hotspotopnfraction";
/**
* Default value of the percentage operations accessing the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION_DEFAULT = "0.8";
/**
* How many times to retry when insertion of a single item to a DB fails.
*/
public static final String INSERTION_RETRY_LIMIT = "core_workload_insertion_retry_limit";
public static final String INSERTION_RETRY_LIMIT_DEFAULT = "0";
/**
* On average, how long to wait between the retries, in seconds.
*/
public static final String INSERTION_RETRY_INTERVAL = "core_workload_insertion_retry_interval";
public static final String INSERTION_RETRY_INTERVAL_DEFAULT = "3";
/**
* Field name prefix.
*/
public static final String FIELD_NAME_PREFIX = "fieldnameprefix";
/**
* Default value of the field name prefix.
*/
public static final String FIELD_NAME_PREFIX_DEFAULT = "field";
protected NumberGenerator keysequence;
protected DiscreteGenerator operationchooser;
protected NumberGenerator keychooser;
protected NumberGenerator fieldchooser;
protected AcknowledgedCounterGenerator transactioninsertkeysequence;
protected NumberGenerator scanlength;
protected boolean orderedinserts;
protected long fieldcount;
protected long recordcount;
protected int zeropadding;
protected int insertionRetryLimit;
protected int insertionRetryInterval;
private Measurements measurements = Measurements.getMeasurements();
public static String buildKeyName(long keynum, int zeropadding, boolean orderedinserts) {
if (!orderedinserts) {
keynum = Utils.hash(keynum);
}
String value = Long.toString(keynum);
int fill = zeropadding - value.length();
String prekey = "user";
for (int i = 0; i < fill; i++) {
prekey += '0';
}
return prekey + value;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
NumberGenerator fieldlengthgenerator;
String fieldlengthdistribution = p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY, FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
int fieldlength =
Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
int minfieldlength =
Integer.parseInt(p.getProperty(MIN_FIELD_LENGTH_PROPERTY, MIN_FIELD_LENGTH_PROPERTY_DEFAULT));
String fieldlengthhistogram = p.getProperty(
FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY, FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT);
if (fieldlengthdistribution.compareTo("constant") == 0) {
fieldlengthgenerator = new ConstantIntegerGenerator(fieldlength);
} else if (fieldlengthdistribution.compareTo("uniform") == 0) {
fieldlengthgenerator = new UniformLongGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("zipfian") == 0) {
fieldlengthgenerator = new ZipfianGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("histogram") == 0) {
try {
fieldlengthgenerator = new HistogramGenerator(fieldlengthhistogram);
} catch (IOException e) {
throw new WorkloadException(
"Couldn't read field length histogram file: " + fieldlengthhistogram, e);
}
} else {
throw new WorkloadException(
"Unknown field length distribution \"" + fieldlengthdistribution + "\"");
}
return fieldlengthgenerator;
}
/**
* Initialize the scenario.
* Called once, in the main client thread, before any operations are started.
*/
@Override
public void init(Properties p) throws WorkloadException {
table = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
fieldcount =
Long.parseLong(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT));
final String fieldnameprefix = p.getProperty(FIELD_NAME_PREFIX, FIELD_NAME_PREFIX_DEFAULT);
fieldnames = new ArrayList<>();
for (int i = 0; i < fieldcount; i++) {
fieldnames.add(fieldnameprefix + i);
}
fieldlengthgenerator = CoreWorkload.getFieldLengthGenerator(p);
recordcount =
Long.parseLong(p.getProperty(Client.RECORD_COUNT_PROPERTY, Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
String requestdistrib =
p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
int minscanlength =
Integer.parseInt(p.getProperty(MIN_SCAN_LENGTH_PROPERTY, MIN_SCAN_LENGTH_PROPERTY_DEFAULT));
int maxscanlength =
Integer.parseInt(p.getProperty(MAX_SCAN_LENGTH_PROPERTY, MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(SCAN_LENGTH_DISTRIBUTION_PROPERTY, SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
long insertstart =
Long.parseLong(p.getProperty(INSERT_START_PROPERTY, INSERT_START_PROPERTY_DEFAULT));
long insertcount=
Integer.parseInt(p.getProperty(INSERT_COUNT_PROPERTY, String.valueOf(recordcount - insertstart)));
// Confirm valid values for insertstart and insertcount in relation to recordcount
if (recordcount < (insertstart + insertcount)) {
System.err.println("Invalid combination of insertstart, insertcount and recordcount.");
System.err.println("recordcount must be bigger than insertstart + insertcount.");
System.exit(-1);
}
zeropadding =
Integer.parseInt(p.getProperty(ZERO_PADDING_PROPERTY, ZERO_PADDING_PROPERTY_DEFAULT));
readallfields = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_PROPERTY, READ_ALL_FIELDS_PROPERTY_DEFAULT));
readallfieldsbyname = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_BY_NAME_PROPERTY, READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT));
writeallfields = Boolean.parseBoolean(
p.getProperty(WRITE_ALL_FIELDS_PROPERTY, WRITE_ALL_FIELDS_PROPERTY_DEFAULT));
dataintegrity = Boolean.parseBoolean(
p.getProperty(DATA_INTEGRITY_PROPERTY, DATA_INTEGRITY_PROPERTY_DEFAULT));
// Confirm that fieldlengthgenerator returns a constant if data
// integrity check requested.
if (dataintegrity && !(p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT)).equals("constant")) {
System.err.println("Must have constant field size to check data integrity.");
System.exit(-1);
}
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
if (p.getProperty(INSERT_ORDER_PROPERTY, INSERT_ORDER_PROPERTY_DEFAULT).compareTo("hashed") == 0) {
orderedinserts = false;
} else {
orderedinserts = true;
}
keysequence = new CounterGenerator(insertstart);
operationchooser = createOperationGenerator(p);
transactioninsertkeysequence = new AcknowledgedCounterGenerator(recordcount);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("exponential") == 0) {
double percentile = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordcount * frac);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
// it does this by generating a random "next key" in part by taking the modulus over the
// number of keys.
// If the number of keys changes, this would shift the modulus, and we don't want that to
// change which keys are popular so we'll actually construct the scrambled zipfian generator
// with a keyspace that is larger than exists at the beginning of the test. that is, we'll predict
// the number of inserts, and tell the scrambled zipfian generator the number of existing keys
// plus the number of predicted keys as the total keyspace. then, if the generator picks a key
// that hasn't been inserted yet, will just ignore it and pick another key. this way, the size of
// the keyspace doesn't change from the perspective of the scrambled zipfian generator
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
int opcount = Integer.parseInt(p.getProperty(Client.OPERATION_COUNT_PROPERTY));
int expectednewkeys = (int) ((opcount) * insertproportion * 2.0); // 2 is fudge factor
keychooser = new ScrambledZipfianGenerator(insertstart, insertstart + insertcount + expectednewkeys);
} else if (requestdistrib.compareTo("latest") == 0) {
keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(insertstart, insertstart + insertcount - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
fieldchooser = new UniformLongGenerator(0, fieldcount - 1);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(minscanlength, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(minscanlength, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
insertionRetryLimit = Integer.parseInt(p.getProperty(
INSERTION_RETRY_LIMIT, INSERTION_RETRY_LIMIT_DEFAULT));
insertionRetryInterval = Integer.parseInt(p.getProperty(
INSERTION_RETRY_INTERVAL, INSERTION_RETRY_INTERVAL_DEFAULT));
}
/**
* Builds a value for a randomly chosen field.
*/
private HashMap<String, ByteIterator> buildSingleValue(String key) {
HashMap<String, ByteIterator> value = new HashMap<>();
String fieldkey = fieldnames.get(fieldchooser.nextValue().intValue());
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
value.put(fieldkey, data);
return value;
}
/**
* Builds values for all fields.
*/
private HashMap<String, ByteIterator> buildValues(String key) {
HashMap<String, ByteIterator> values = new HashMap<>();
for (String fieldkey : fieldnames) {
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
values.put(fieldkey, data);
}
return values;
}
/**
* Build a deterministic value given the key information.
*/
private String buildDeterministicValue(String key, String fieldkey) {
int size = fieldlengthgenerator.nextValue().intValue();
StringBuilder sb = new StringBuilder(size);
sb.append(key);
sb.append(':');
sb.append(fieldkey);
while (sb.length() < size) {
sb.append(':');
sb.append(sb.toString().hashCode());
}
sb.setLength(size);
return sb.toString();
}
/**
* Do one insert operation. Because it will be called concurrently from multiple client threads,
* this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
int keynum = keysequence.nextValue().intValue();
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
Status status;
int numOfRetries = 0;
do {
status = db.insert(table, dbkey, values);
if (null != status && status.isOk()) {
break;
}
// Retry if configured. Without retrying, the load process will fail
// even if one single insertion fails. User can optionally configure
// an insertion retry limit (default is 0) to enable retry.
if (++numOfRetries <= insertionRetryLimit) {
System.err.println("Retrying insertion, retry count: " + numOfRetries);
try {
// Sleep for a random number between [0.8, 1.2)*insertionRetryInterval.
int sleepTime = (int) (1000 * insertionRetryInterval * (0.8 + 0.4 * Math.random()));
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
break;
}
} else {
System.err.println("Error inserting, not retrying any more. number of attempts: " + numOfRetries +
"Insertion Retry Limit: " + insertionRetryLimit);
break;
}
} while (true);
return null != status && status.isOk();
}
/**
* Do one transaction operation. Because it will be called concurrently from multiple client
* threads, this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doTransaction(DB db, Object threadstate) {<FILL_FUNCTION_BODY>}
/**
* Results are reported in the first three buckets of the histogram under
* the label "VERIFY".
* Bucket 0 means the expected data was returned.
* Bucket 1 means incorrect data was returned.
* Bucket 2 means null data was returned when some data was expected.
*/
protected void verifyRow(String key, HashMap<String, ByteIterator> cells) {
Status verifyStatus = Status.OK;
long startTime = System.nanoTime();
if (!cells.isEmpty()) {
for (Map.Entry<String, ByteIterator> entry : cells.entrySet()) {
if (!entry.getValue().toString().equals(buildDeterministicValue(key, entry.getKey()))) {
verifyStatus = Status.UNEXPECTED_STATE;
break;
}
}
} else {
// This assumes that null data is never valid
verifyStatus = Status.ERROR;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
}
long nextKeynum() {
long keynum;
if (keychooser instanceof ExponentialGenerator) {
do {
keynum = transactioninsertkeysequence.lastValue() - keychooser.nextValue().intValue();
} while (keynum < 0);
} else {
do {
keynum = keychooser.nextValue().intValue();
} while (keynum > transactioninsertkeysequence.lastValue());
}
return keynum;
}
public void doTransactionRead(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
} else if (dataintegrity || readallfieldsbyname) {
// pass the full field list if dataintegrity is on for verification
fields = new HashSet<String>(fieldnames);
}
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
db.read(table, keyname, fields, cells);
if (dataintegrity) {
verifyRow(keyname, cells);
}
}
public void doTransactionReadModifyWrite(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
// do the transaction
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
db.read(table, keyname, fields, cells);
db.update(table, keyname, values);
long en = System.nanoTime();
if (dataintegrity) {
verifyRow(keyname, cells);
}
measurements.measure("READ-MODIFY-WRITE", (int) ((en - st) / 1000));
measurements.measureIntended("READ-MODIFY-WRITE", (int) ((en - ist) / 1000));
}
public void doTransactionScan(DB db) {
// choose a random key
long keynum = nextKeynum();
String startkeyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
// choose a random scan length
int len = scanlength.nextValue().intValue();
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
db.scan(table, startkeyname, len, fields, new Vector<HashMap<String, ByteIterator>>());
}
public void doTransactionUpdate(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
db.update(table, keyname, values);
}
public void doTransactionInsert(DB db) {
// choose the next key
long keynum = transactioninsertkeysequence.nextValue();
try {
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
db.insert(table, dbkey, values);
} finally {
transactioninsertkeysequence.acknowledge(keynum);
}
}
/**
* Creates a weighted discrete values with database operations for a workload to perform.
* Weights/proportions are read from the properties list and defaults are used
* when values are not configured.
* Current operations are "READ", "UPDATE", "INSERT", "SCAN" and "READMODIFYWRITE".
*
* @param p The properties list to pull weights from.
* @return A generator that can be used to determine the next operation to perform.
* @throws IllegalArgumentException if the properties object was null.
*/
protected static DiscreteGenerator createOperationGenerator(final Properties p) {
if (p == null) {
throw new IllegalArgumentException("Properties object cannot be null");
}
final double readproportion = Double.parseDouble(
p.getProperty(READ_PROPORTION_PROPERTY, READ_PROPORTION_PROPERTY_DEFAULT));
final double updateproportion = Double.parseDouble(
p.getProperty(UPDATE_PROPORTION_PROPERTY, UPDATE_PROPORTION_PROPERTY_DEFAULT));
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
final double scanproportion = Double.parseDouble(
p.getProperty(SCAN_PROPORTION_PROPERTY, SCAN_PROPORTION_PROPERTY_DEFAULT));
final double readmodifywriteproportion = Double.parseDouble(p.getProperty(
READMODIFYWRITE_PROPORTION_PROPERTY, READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT));
final DiscreteGenerator operationchooser = new DiscreteGenerator();
if (readproportion > 0) {
operationchooser.addValue(readproportion, "READ");
}
if (updateproportion > 0) {
operationchooser.addValue(updateproportion, "UPDATE");
}
if (insertproportion > 0) {
operationchooser.addValue(insertproportion, "INSERT");
}
if (scanproportion > 0) {
operationchooser.addValue(scanproportion, "SCAN");
}
if (readmodifywriteproportion > 0) {
operationchooser.addValue(readmodifywriteproportion, "READMODIFYWRITE");
}
return operationchooser;
}
} |
String operation = operationchooser.nextString();
if(operation == null) {
return false;
}
switch (operation) {
case "READ":
doTransactionRead(db);
break;
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "SCAN":
doTransactionScan(db);
break;
default:
doTransactionReadModifyWrite(db);
}
return true;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/CoreWorkload.java | CoreWorkload | verifyRow | class CoreWorkload extends Workload {
/**
* The name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY = "table";
/**
* The default name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY_DEFAULT = "usertable";
protected String table;
/**
* The name of the property for the number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY = "fieldcount";
/**
* Default number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY_DEFAULT = "10";
private List<String> fieldnames;
/**
* The name of the property for the field length distribution. Options are "uniform", "zipfian"
* (favouring short records), "constant", and "histogram".
* <p>
* If "uniform", "zipfian" or "constant", the maximum field length will be that specified by the
* fieldlength property. If "histogram", then the histogram will be read from the filename
* specified in the "fieldlengthhistogram" property.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY = "fieldlengthdistribution";
/**
* The default field length distribution.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "constant";
/**
* The name of the property for the length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY = "fieldlength";
/**
* The default maximum length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY_DEFAULT = "100";
/**
* The name of the property for the minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY = "minfieldlength";
/**
* The default minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of a property that specifies the filename containing the field length histogram (only
* used if fieldlengthdistribution is "histogram").
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY = "fieldlengthhistogram";
/**
* The default filename containing a field length histogram.
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT = "hist.txt";
/**
* Generator object that produces field lengths. The value of this depends on the properties that
* start with "FIELD_LENGTH_".
*/
protected NumberGenerator fieldlengthgenerator;
/**
* The name of the property for deciding whether to read one field (false) or all fields (true) of
* a record.
*/
public static final String READ_ALL_FIELDS_PROPERTY = "readallfields";
/**
* The default value for the readallfields property.
*/
public static final String READ_ALL_FIELDS_PROPERTY_DEFAULT = "true";
protected boolean readallfields;
/**
* The name of the property for determining how to read all the fields when readallfields is true.
* If set to true, all the field names will be passed into the underlying client. If set to false,
* null will be passed into the underlying client. When passed a null, some clients may retrieve
* the entire row with a wildcard, which may be slower than naming all the fields.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY = "readallfieldsbyname";
/**
* The default value for the readallfieldsbyname property.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT = "false";
protected boolean readallfieldsbyname;
/**
* The name of the property for deciding whether to write one field (false) or all fields (true)
* of a record.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY = "writeallfields";
/**
* The default value for the writeallfields property.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY_DEFAULT = "false";
protected boolean writeallfields;
/**
* The name of the property for deciding whether to check all returned
* data against the formation template to ensure data integrity.
*/
public static final String DATA_INTEGRITY_PROPERTY = "dataintegrity";
/**
* The default value for the dataintegrity property.
*/
public static final String DATA_INTEGRITY_PROPERTY_DEFAULT = "false";
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
private boolean dataintegrity;
/**
* The name of the property for the proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY = "readproportion";
/**
* The default proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY_DEFAULT = "0.95";
/**
* The name of the property for the proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY = "updateproportion";
/**
* The default proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY_DEFAULT = "0.05";
/**
* The name of the property for the proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY = "insertproportion";
/**
* The default proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY = "scanproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are read-modify-write.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY = "readmodifywriteproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the the distribution of requests across the keyspace. Options are
* "uniform", "zipfian" and "latest"
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY = "requestdistribution";
/**
* The default distribution of requests across the keyspace.
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for adding zero padding to record numbers in order to match
* string sort order. Controls the number of 0s to left pad with.
*/
public static final String ZERO_PADDING_PROPERTY = "zeropadding";
/**
* The default zero padding value. Matches integer sort order
*/
public static final String ZERO_PADDING_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the min scan length (number of records).
*/
public static final String MIN_SCAN_LENGTH_PROPERTY = "minscanlength";
/**
* The default min scan length.
*/
public static final String MIN_SCAN_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the max scan length (number of records).
*/
public static final String MAX_SCAN_LENGTH_PROPERTY = "maxscanlength";
/**
* The default max scan length.
*/
public static final String MAX_SCAN_LENGTH_PROPERTY_DEFAULT = "1000";
/**
* The name of the property for the scan length distribution. Options are "uniform" and "zipfian"
* (favoring short scans)
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY = "scanlengthdistribution";
/**
* The default max scan length.
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for the order to insert records. Options are "ordered" or "hashed"
*/
public static final String INSERT_ORDER_PROPERTY = "insertorder";
/**
* Default insert order.
*/
public static final String INSERT_ORDER_PROPERTY_DEFAULT = "hashed";
/**
* Percentage data items that constitute the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION = "hotspotdatafraction";
/**
* Default value of the size of the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION_DEFAULT = "0.2";
/**
* Percentage operations that access the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION = "hotspotopnfraction";
/**
* Default value of the percentage operations accessing the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION_DEFAULT = "0.8";
/**
* How many times to retry when insertion of a single item to a DB fails.
*/
public static final String INSERTION_RETRY_LIMIT = "core_workload_insertion_retry_limit";
public static final String INSERTION_RETRY_LIMIT_DEFAULT = "0";
/**
* On average, how long to wait between the retries, in seconds.
*/
public static final String INSERTION_RETRY_INTERVAL = "core_workload_insertion_retry_interval";
public static final String INSERTION_RETRY_INTERVAL_DEFAULT = "3";
/**
* Field name prefix.
*/
public static final String FIELD_NAME_PREFIX = "fieldnameprefix";
/**
* Default value of the field name prefix.
*/
public static final String FIELD_NAME_PREFIX_DEFAULT = "field";
protected NumberGenerator keysequence;
protected DiscreteGenerator operationchooser;
protected NumberGenerator keychooser;
protected NumberGenerator fieldchooser;
protected AcknowledgedCounterGenerator transactioninsertkeysequence;
protected NumberGenerator scanlength;
protected boolean orderedinserts;
protected long fieldcount;
protected long recordcount;
protected int zeropadding;
protected int insertionRetryLimit;
protected int insertionRetryInterval;
private Measurements measurements = Measurements.getMeasurements();
public static String buildKeyName(long keynum, int zeropadding, boolean orderedinserts) {
if (!orderedinserts) {
keynum = Utils.hash(keynum);
}
String value = Long.toString(keynum);
int fill = zeropadding - value.length();
String prekey = "user";
for (int i = 0; i < fill; i++) {
prekey += '0';
}
return prekey + value;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
NumberGenerator fieldlengthgenerator;
String fieldlengthdistribution = p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY, FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
int fieldlength =
Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
int minfieldlength =
Integer.parseInt(p.getProperty(MIN_FIELD_LENGTH_PROPERTY, MIN_FIELD_LENGTH_PROPERTY_DEFAULT));
String fieldlengthhistogram = p.getProperty(
FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY, FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT);
if (fieldlengthdistribution.compareTo("constant") == 0) {
fieldlengthgenerator = new ConstantIntegerGenerator(fieldlength);
} else if (fieldlengthdistribution.compareTo("uniform") == 0) {
fieldlengthgenerator = new UniformLongGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("zipfian") == 0) {
fieldlengthgenerator = new ZipfianGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("histogram") == 0) {
try {
fieldlengthgenerator = new HistogramGenerator(fieldlengthhistogram);
} catch (IOException e) {
throw new WorkloadException(
"Couldn't read field length histogram file: " + fieldlengthhistogram, e);
}
} else {
throw new WorkloadException(
"Unknown field length distribution \"" + fieldlengthdistribution + "\"");
}
return fieldlengthgenerator;
}
/**
* Initialize the scenario.
* Called once, in the main client thread, before any operations are started.
*/
@Override
public void init(Properties p) throws WorkloadException {
table = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
fieldcount =
Long.parseLong(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT));
final String fieldnameprefix = p.getProperty(FIELD_NAME_PREFIX, FIELD_NAME_PREFIX_DEFAULT);
fieldnames = new ArrayList<>();
for (int i = 0; i < fieldcount; i++) {
fieldnames.add(fieldnameprefix + i);
}
fieldlengthgenerator = CoreWorkload.getFieldLengthGenerator(p);
recordcount =
Long.parseLong(p.getProperty(Client.RECORD_COUNT_PROPERTY, Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
String requestdistrib =
p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
int minscanlength =
Integer.parseInt(p.getProperty(MIN_SCAN_LENGTH_PROPERTY, MIN_SCAN_LENGTH_PROPERTY_DEFAULT));
int maxscanlength =
Integer.parseInt(p.getProperty(MAX_SCAN_LENGTH_PROPERTY, MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(SCAN_LENGTH_DISTRIBUTION_PROPERTY, SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
long insertstart =
Long.parseLong(p.getProperty(INSERT_START_PROPERTY, INSERT_START_PROPERTY_DEFAULT));
long insertcount=
Integer.parseInt(p.getProperty(INSERT_COUNT_PROPERTY, String.valueOf(recordcount - insertstart)));
// Confirm valid values for insertstart and insertcount in relation to recordcount
if (recordcount < (insertstart + insertcount)) {
System.err.println("Invalid combination of insertstart, insertcount and recordcount.");
System.err.println("recordcount must be bigger than insertstart + insertcount.");
System.exit(-1);
}
zeropadding =
Integer.parseInt(p.getProperty(ZERO_PADDING_PROPERTY, ZERO_PADDING_PROPERTY_DEFAULT));
readallfields = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_PROPERTY, READ_ALL_FIELDS_PROPERTY_DEFAULT));
readallfieldsbyname = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_BY_NAME_PROPERTY, READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT));
writeallfields = Boolean.parseBoolean(
p.getProperty(WRITE_ALL_FIELDS_PROPERTY, WRITE_ALL_FIELDS_PROPERTY_DEFAULT));
dataintegrity = Boolean.parseBoolean(
p.getProperty(DATA_INTEGRITY_PROPERTY, DATA_INTEGRITY_PROPERTY_DEFAULT));
// Confirm that fieldlengthgenerator returns a constant if data
// integrity check requested.
if (dataintegrity && !(p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT)).equals("constant")) {
System.err.println("Must have constant field size to check data integrity.");
System.exit(-1);
}
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
if (p.getProperty(INSERT_ORDER_PROPERTY, INSERT_ORDER_PROPERTY_DEFAULT).compareTo("hashed") == 0) {
orderedinserts = false;
} else {
orderedinserts = true;
}
keysequence = new CounterGenerator(insertstart);
operationchooser = createOperationGenerator(p);
transactioninsertkeysequence = new AcknowledgedCounterGenerator(recordcount);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("exponential") == 0) {
double percentile = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordcount * frac);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
// it does this by generating a random "next key" in part by taking the modulus over the
// number of keys.
// If the number of keys changes, this would shift the modulus, and we don't want that to
// change which keys are popular so we'll actually construct the scrambled zipfian generator
// with a keyspace that is larger than exists at the beginning of the test. that is, we'll predict
// the number of inserts, and tell the scrambled zipfian generator the number of existing keys
// plus the number of predicted keys as the total keyspace. then, if the generator picks a key
// that hasn't been inserted yet, will just ignore it and pick another key. this way, the size of
// the keyspace doesn't change from the perspective of the scrambled zipfian generator
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
int opcount = Integer.parseInt(p.getProperty(Client.OPERATION_COUNT_PROPERTY));
int expectednewkeys = (int) ((opcount) * insertproportion * 2.0); // 2 is fudge factor
keychooser = new ScrambledZipfianGenerator(insertstart, insertstart + insertcount + expectednewkeys);
} else if (requestdistrib.compareTo("latest") == 0) {
keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(insertstart, insertstart + insertcount - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
fieldchooser = new UniformLongGenerator(0, fieldcount - 1);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(minscanlength, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(minscanlength, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
insertionRetryLimit = Integer.parseInt(p.getProperty(
INSERTION_RETRY_LIMIT, INSERTION_RETRY_LIMIT_DEFAULT));
insertionRetryInterval = Integer.parseInt(p.getProperty(
INSERTION_RETRY_INTERVAL, INSERTION_RETRY_INTERVAL_DEFAULT));
}
/**
* Builds a value for a randomly chosen field.
*/
private HashMap<String, ByteIterator> buildSingleValue(String key) {
HashMap<String, ByteIterator> value = new HashMap<>();
String fieldkey = fieldnames.get(fieldchooser.nextValue().intValue());
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
value.put(fieldkey, data);
return value;
}
/**
* Builds values for all fields.
*/
private HashMap<String, ByteIterator> buildValues(String key) {
HashMap<String, ByteIterator> values = new HashMap<>();
for (String fieldkey : fieldnames) {
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
values.put(fieldkey, data);
}
return values;
}
/**
* Build a deterministic value given the key information.
*/
private String buildDeterministicValue(String key, String fieldkey) {
int size = fieldlengthgenerator.nextValue().intValue();
StringBuilder sb = new StringBuilder(size);
sb.append(key);
sb.append(':');
sb.append(fieldkey);
while (sb.length() < size) {
sb.append(':');
sb.append(sb.toString().hashCode());
}
sb.setLength(size);
return sb.toString();
}
/**
* Do one insert operation. Because it will be called concurrently from multiple client threads,
* this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
int keynum = keysequence.nextValue().intValue();
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
Status status;
int numOfRetries = 0;
do {
status = db.insert(table, dbkey, values);
if (null != status && status.isOk()) {
break;
}
// Retry if configured. Without retrying, the load process will fail
// even if one single insertion fails. User can optionally configure
// an insertion retry limit (default is 0) to enable retry.
if (++numOfRetries <= insertionRetryLimit) {
System.err.println("Retrying insertion, retry count: " + numOfRetries);
try {
// Sleep for a random number between [0.8, 1.2)*insertionRetryInterval.
int sleepTime = (int) (1000 * insertionRetryInterval * (0.8 + 0.4 * Math.random()));
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
break;
}
} else {
System.err.println("Error inserting, not retrying any more. number of attempts: " + numOfRetries +
"Insertion Retry Limit: " + insertionRetryLimit);
break;
}
} while (true);
return null != status && status.isOk();
}
/**
* Do one transaction operation. Because it will be called concurrently from multiple client
* threads, this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if(operation == null) {
return false;
}
switch (operation) {
case "READ":
doTransactionRead(db);
break;
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "SCAN":
doTransactionScan(db);
break;
default:
doTransactionReadModifyWrite(db);
}
return true;
}
/**
* Results are reported in the first three buckets of the histogram under
* the label "VERIFY".
* Bucket 0 means the expected data was returned.
* Bucket 1 means incorrect data was returned.
* Bucket 2 means null data was returned when some data was expected.
*/
protected void verifyRow(String key, HashMap<String, ByteIterator> cells) {<FILL_FUNCTION_BODY>}
long nextKeynum() {
long keynum;
if (keychooser instanceof ExponentialGenerator) {
do {
keynum = transactioninsertkeysequence.lastValue() - keychooser.nextValue().intValue();
} while (keynum < 0);
} else {
do {
keynum = keychooser.nextValue().intValue();
} while (keynum > transactioninsertkeysequence.lastValue());
}
return keynum;
}
public void doTransactionRead(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
} else if (dataintegrity || readallfieldsbyname) {
// pass the full field list if dataintegrity is on for verification
fields = new HashSet<String>(fieldnames);
}
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
db.read(table, keyname, fields, cells);
if (dataintegrity) {
verifyRow(keyname, cells);
}
}
public void doTransactionReadModifyWrite(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
// do the transaction
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
db.read(table, keyname, fields, cells);
db.update(table, keyname, values);
long en = System.nanoTime();
if (dataintegrity) {
verifyRow(keyname, cells);
}
measurements.measure("READ-MODIFY-WRITE", (int) ((en - st) / 1000));
measurements.measureIntended("READ-MODIFY-WRITE", (int) ((en - ist) / 1000));
}
public void doTransactionScan(DB db) {
// choose a random key
long keynum = nextKeynum();
String startkeyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
// choose a random scan length
int len = scanlength.nextValue().intValue();
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
db.scan(table, startkeyname, len, fields, new Vector<HashMap<String, ByteIterator>>());
}
public void doTransactionUpdate(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
db.update(table, keyname, values);
}
public void doTransactionInsert(DB db) {
// choose the next key
long keynum = transactioninsertkeysequence.nextValue();
try {
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
db.insert(table, dbkey, values);
} finally {
transactioninsertkeysequence.acknowledge(keynum);
}
}
/**
* Creates a weighted discrete values with database operations for a workload to perform.
* Weights/proportions are read from the properties list and defaults are used
* when values are not configured.
* Current operations are "READ", "UPDATE", "INSERT", "SCAN" and "READMODIFYWRITE".
*
* @param p The properties list to pull weights from.
* @return A generator that can be used to determine the next operation to perform.
* @throws IllegalArgumentException if the properties object was null.
*/
protected static DiscreteGenerator createOperationGenerator(final Properties p) {
if (p == null) {
throw new IllegalArgumentException("Properties object cannot be null");
}
final double readproportion = Double.parseDouble(
p.getProperty(READ_PROPORTION_PROPERTY, READ_PROPORTION_PROPERTY_DEFAULT));
final double updateproportion = Double.parseDouble(
p.getProperty(UPDATE_PROPORTION_PROPERTY, UPDATE_PROPORTION_PROPERTY_DEFAULT));
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
final double scanproportion = Double.parseDouble(
p.getProperty(SCAN_PROPORTION_PROPERTY, SCAN_PROPORTION_PROPERTY_DEFAULT));
final double readmodifywriteproportion = Double.parseDouble(p.getProperty(
READMODIFYWRITE_PROPORTION_PROPERTY, READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT));
final DiscreteGenerator operationchooser = new DiscreteGenerator();
if (readproportion > 0) {
operationchooser.addValue(readproportion, "READ");
}
if (updateproportion > 0) {
operationchooser.addValue(updateproportion, "UPDATE");
}
if (insertproportion > 0) {
operationchooser.addValue(insertproportion, "INSERT");
}
if (scanproportion > 0) {
operationchooser.addValue(scanproportion, "SCAN");
}
if (readmodifywriteproportion > 0) {
operationchooser.addValue(readmodifywriteproportion, "READMODIFYWRITE");
}
return operationchooser;
}
} |
Status verifyStatus = Status.OK;
long startTime = System.nanoTime();
if (!cells.isEmpty()) {
for (Map.Entry<String, ByteIterator> entry : cells.entrySet()) {
if (!entry.getValue().toString().equals(buildDeterministicValue(key, entry.getKey()))) {
verifyStatus = Status.UNEXPECTED_STATE;
break;
}
}
} else {
// This assumes that null data is never valid
verifyStatus = Status.ERROR;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/CoreWorkload.java | CoreWorkload | nextKeynum | class CoreWorkload extends Workload {
/**
* The name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY = "table";
/**
* The default name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY_DEFAULT = "usertable";
protected String table;
/**
* The name of the property for the number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY = "fieldcount";
/**
* Default number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY_DEFAULT = "10";
private List<String> fieldnames;
/**
* The name of the property for the field length distribution. Options are "uniform", "zipfian"
* (favouring short records), "constant", and "histogram".
* <p>
* If "uniform", "zipfian" or "constant", the maximum field length will be that specified by the
* fieldlength property. If "histogram", then the histogram will be read from the filename
* specified in the "fieldlengthhistogram" property.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY = "fieldlengthdistribution";
/**
* The default field length distribution.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "constant";
/**
* The name of the property for the length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY = "fieldlength";
/**
* The default maximum length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY_DEFAULT = "100";
/**
* The name of the property for the minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY = "minfieldlength";
/**
* The default minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of a property that specifies the filename containing the field length histogram (only
* used if fieldlengthdistribution is "histogram").
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY = "fieldlengthhistogram";
/**
* The default filename containing a field length histogram.
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT = "hist.txt";
/**
* Generator object that produces field lengths. The value of this depends on the properties that
* start with "FIELD_LENGTH_".
*/
protected NumberGenerator fieldlengthgenerator;
/**
* The name of the property for deciding whether to read one field (false) or all fields (true) of
* a record.
*/
public static final String READ_ALL_FIELDS_PROPERTY = "readallfields";
/**
* The default value for the readallfields property.
*/
public static final String READ_ALL_FIELDS_PROPERTY_DEFAULT = "true";
protected boolean readallfields;
/**
* The name of the property for determining how to read all the fields when readallfields is true.
* If set to true, all the field names will be passed into the underlying client. If set to false,
* null will be passed into the underlying client. When passed a null, some clients may retrieve
* the entire row with a wildcard, which may be slower than naming all the fields.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY = "readallfieldsbyname";
/**
* The default value for the readallfieldsbyname property.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT = "false";
protected boolean readallfieldsbyname;
/**
* The name of the property for deciding whether to write one field (false) or all fields (true)
* of a record.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY = "writeallfields";
/**
* The default value for the writeallfields property.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY_DEFAULT = "false";
protected boolean writeallfields;
/**
* The name of the property for deciding whether to check all returned
* data against the formation template to ensure data integrity.
*/
public static final String DATA_INTEGRITY_PROPERTY = "dataintegrity";
/**
* The default value for the dataintegrity property.
*/
public static final String DATA_INTEGRITY_PROPERTY_DEFAULT = "false";
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
private boolean dataintegrity;
/**
* The name of the property for the proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY = "readproportion";
/**
* The default proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY_DEFAULT = "0.95";
/**
* The name of the property for the proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY = "updateproportion";
/**
* The default proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY_DEFAULT = "0.05";
/**
* The name of the property for the proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY = "insertproportion";
/**
* The default proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY = "scanproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are read-modify-write.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY = "readmodifywriteproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the the distribution of requests across the keyspace. Options are
* "uniform", "zipfian" and "latest"
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY = "requestdistribution";
/**
* The default distribution of requests across the keyspace.
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for adding zero padding to record numbers in order to match
* string sort order. Controls the number of 0s to left pad with.
*/
public static final String ZERO_PADDING_PROPERTY = "zeropadding";
/**
* The default zero padding value. Matches integer sort order
*/
public static final String ZERO_PADDING_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the min scan length (number of records).
*/
public static final String MIN_SCAN_LENGTH_PROPERTY = "minscanlength";
/**
* The default min scan length.
*/
public static final String MIN_SCAN_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the max scan length (number of records).
*/
public static final String MAX_SCAN_LENGTH_PROPERTY = "maxscanlength";
/**
* The default max scan length.
*/
public static final String MAX_SCAN_LENGTH_PROPERTY_DEFAULT = "1000";
/**
* The name of the property for the scan length distribution. Options are "uniform" and "zipfian"
* (favoring short scans)
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY = "scanlengthdistribution";
/**
* The default max scan length.
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for the order to insert records. Options are "ordered" or "hashed"
*/
public static final String INSERT_ORDER_PROPERTY = "insertorder";
/**
* Default insert order.
*/
public static final String INSERT_ORDER_PROPERTY_DEFAULT = "hashed";
/**
* Percentage data items that constitute the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION = "hotspotdatafraction";
/**
* Default value of the size of the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION_DEFAULT = "0.2";
/**
* Percentage operations that access the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION = "hotspotopnfraction";
/**
* Default value of the percentage operations accessing the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION_DEFAULT = "0.8";
/**
* How many times to retry when insertion of a single item to a DB fails.
*/
public static final String INSERTION_RETRY_LIMIT = "core_workload_insertion_retry_limit";
public static final String INSERTION_RETRY_LIMIT_DEFAULT = "0";
/**
* On average, how long to wait between the retries, in seconds.
*/
public static final String INSERTION_RETRY_INTERVAL = "core_workload_insertion_retry_interval";
public static final String INSERTION_RETRY_INTERVAL_DEFAULT = "3";
/**
* Field name prefix.
*/
public static final String FIELD_NAME_PREFIX = "fieldnameprefix";
/**
* Default value of the field name prefix.
*/
public static final String FIELD_NAME_PREFIX_DEFAULT = "field";
protected NumberGenerator keysequence;
protected DiscreteGenerator operationchooser;
protected NumberGenerator keychooser;
protected NumberGenerator fieldchooser;
protected AcknowledgedCounterGenerator transactioninsertkeysequence;
protected NumberGenerator scanlength;
protected boolean orderedinserts;
protected long fieldcount;
protected long recordcount;
protected int zeropadding;
protected int insertionRetryLimit;
protected int insertionRetryInterval;
private Measurements measurements = Measurements.getMeasurements();
public static String buildKeyName(long keynum, int zeropadding, boolean orderedinserts) {
if (!orderedinserts) {
keynum = Utils.hash(keynum);
}
String value = Long.toString(keynum);
int fill = zeropadding - value.length();
String prekey = "user";
for (int i = 0; i < fill; i++) {
prekey += '0';
}
return prekey + value;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
NumberGenerator fieldlengthgenerator;
String fieldlengthdistribution = p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY, FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
int fieldlength =
Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
int minfieldlength =
Integer.parseInt(p.getProperty(MIN_FIELD_LENGTH_PROPERTY, MIN_FIELD_LENGTH_PROPERTY_DEFAULT));
String fieldlengthhistogram = p.getProperty(
FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY, FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT);
if (fieldlengthdistribution.compareTo("constant") == 0) {
fieldlengthgenerator = new ConstantIntegerGenerator(fieldlength);
} else if (fieldlengthdistribution.compareTo("uniform") == 0) {
fieldlengthgenerator = new UniformLongGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("zipfian") == 0) {
fieldlengthgenerator = new ZipfianGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("histogram") == 0) {
try {
fieldlengthgenerator = new HistogramGenerator(fieldlengthhistogram);
} catch (IOException e) {
throw new WorkloadException(
"Couldn't read field length histogram file: " + fieldlengthhistogram, e);
}
} else {
throw new WorkloadException(
"Unknown field length distribution \"" + fieldlengthdistribution + "\"");
}
return fieldlengthgenerator;
}
/**
* Initialize the scenario.
* Called once, in the main client thread, before any operations are started.
*/
@Override
public void init(Properties p) throws WorkloadException {
table = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
fieldcount =
Long.parseLong(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT));
final String fieldnameprefix = p.getProperty(FIELD_NAME_PREFIX, FIELD_NAME_PREFIX_DEFAULT);
fieldnames = new ArrayList<>();
for (int i = 0; i < fieldcount; i++) {
fieldnames.add(fieldnameprefix + i);
}
fieldlengthgenerator = CoreWorkload.getFieldLengthGenerator(p);
recordcount =
Long.parseLong(p.getProperty(Client.RECORD_COUNT_PROPERTY, Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
String requestdistrib =
p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
int minscanlength =
Integer.parseInt(p.getProperty(MIN_SCAN_LENGTH_PROPERTY, MIN_SCAN_LENGTH_PROPERTY_DEFAULT));
int maxscanlength =
Integer.parseInt(p.getProperty(MAX_SCAN_LENGTH_PROPERTY, MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(SCAN_LENGTH_DISTRIBUTION_PROPERTY, SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
long insertstart =
Long.parseLong(p.getProperty(INSERT_START_PROPERTY, INSERT_START_PROPERTY_DEFAULT));
long insertcount=
Integer.parseInt(p.getProperty(INSERT_COUNT_PROPERTY, String.valueOf(recordcount - insertstart)));
// Confirm valid values for insertstart and insertcount in relation to recordcount
if (recordcount < (insertstart + insertcount)) {
System.err.println("Invalid combination of insertstart, insertcount and recordcount.");
System.err.println("recordcount must be bigger than insertstart + insertcount.");
System.exit(-1);
}
zeropadding =
Integer.parseInt(p.getProperty(ZERO_PADDING_PROPERTY, ZERO_PADDING_PROPERTY_DEFAULT));
readallfields = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_PROPERTY, READ_ALL_FIELDS_PROPERTY_DEFAULT));
readallfieldsbyname = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_BY_NAME_PROPERTY, READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT));
writeallfields = Boolean.parseBoolean(
p.getProperty(WRITE_ALL_FIELDS_PROPERTY, WRITE_ALL_FIELDS_PROPERTY_DEFAULT));
dataintegrity = Boolean.parseBoolean(
p.getProperty(DATA_INTEGRITY_PROPERTY, DATA_INTEGRITY_PROPERTY_DEFAULT));
// Confirm that fieldlengthgenerator returns a constant if data
// integrity check requested.
if (dataintegrity && !(p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT)).equals("constant")) {
System.err.println("Must have constant field size to check data integrity.");
System.exit(-1);
}
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
if (p.getProperty(INSERT_ORDER_PROPERTY, INSERT_ORDER_PROPERTY_DEFAULT).compareTo("hashed") == 0) {
orderedinserts = false;
} else {
orderedinserts = true;
}
keysequence = new CounterGenerator(insertstart);
operationchooser = createOperationGenerator(p);
transactioninsertkeysequence = new AcknowledgedCounterGenerator(recordcount);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("exponential") == 0) {
double percentile = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordcount * frac);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
// it does this by generating a random "next key" in part by taking the modulus over the
// number of keys.
// If the number of keys changes, this would shift the modulus, and we don't want that to
// change which keys are popular so we'll actually construct the scrambled zipfian generator
// with a keyspace that is larger than exists at the beginning of the test. that is, we'll predict
// the number of inserts, and tell the scrambled zipfian generator the number of existing keys
// plus the number of predicted keys as the total keyspace. then, if the generator picks a key
// that hasn't been inserted yet, will just ignore it and pick another key. this way, the size of
// the keyspace doesn't change from the perspective of the scrambled zipfian generator
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
int opcount = Integer.parseInt(p.getProperty(Client.OPERATION_COUNT_PROPERTY));
int expectednewkeys = (int) ((opcount) * insertproportion * 2.0); // 2 is fudge factor
keychooser = new ScrambledZipfianGenerator(insertstart, insertstart + insertcount + expectednewkeys);
} else if (requestdistrib.compareTo("latest") == 0) {
keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(insertstart, insertstart + insertcount - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
fieldchooser = new UniformLongGenerator(0, fieldcount - 1);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(minscanlength, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(minscanlength, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
insertionRetryLimit = Integer.parseInt(p.getProperty(
INSERTION_RETRY_LIMIT, INSERTION_RETRY_LIMIT_DEFAULT));
insertionRetryInterval = Integer.parseInt(p.getProperty(
INSERTION_RETRY_INTERVAL, INSERTION_RETRY_INTERVAL_DEFAULT));
}
/**
* Builds a value for a randomly chosen field.
*/
private HashMap<String, ByteIterator> buildSingleValue(String key) {
HashMap<String, ByteIterator> value = new HashMap<>();
String fieldkey = fieldnames.get(fieldchooser.nextValue().intValue());
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
value.put(fieldkey, data);
return value;
}
/**
* Builds values for all fields.
*/
private HashMap<String, ByteIterator> buildValues(String key) {
HashMap<String, ByteIterator> values = new HashMap<>();
for (String fieldkey : fieldnames) {
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
values.put(fieldkey, data);
}
return values;
}
/**
* Build a deterministic value given the key information.
*/
private String buildDeterministicValue(String key, String fieldkey) {
int size = fieldlengthgenerator.nextValue().intValue();
StringBuilder sb = new StringBuilder(size);
sb.append(key);
sb.append(':');
sb.append(fieldkey);
while (sb.length() < size) {
sb.append(':');
sb.append(sb.toString().hashCode());
}
sb.setLength(size);
return sb.toString();
}
/**
* Do one insert operation. Because it will be called concurrently from multiple client threads,
* this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
int keynum = keysequence.nextValue().intValue();
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
Status status;
int numOfRetries = 0;
do {
status = db.insert(table, dbkey, values);
if (null != status && status.isOk()) {
break;
}
// Retry if configured. Without retrying, the load process will fail
// even if one single insertion fails. User can optionally configure
// an insertion retry limit (default is 0) to enable retry.
if (++numOfRetries <= insertionRetryLimit) {
System.err.println("Retrying insertion, retry count: " + numOfRetries);
try {
// Sleep for a random number between [0.8, 1.2)*insertionRetryInterval.
int sleepTime = (int) (1000 * insertionRetryInterval * (0.8 + 0.4 * Math.random()));
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
break;
}
} else {
System.err.println("Error inserting, not retrying any more. number of attempts: " + numOfRetries +
"Insertion Retry Limit: " + insertionRetryLimit);
break;
}
} while (true);
return null != status && status.isOk();
}
/**
* Do one transaction operation. Because it will be called concurrently from multiple client
* threads, this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if(operation == null) {
return false;
}
switch (operation) {
case "READ":
doTransactionRead(db);
break;
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "SCAN":
doTransactionScan(db);
break;
default:
doTransactionReadModifyWrite(db);
}
return true;
}
/**
* Results are reported in the first three buckets of the histogram under
* the label "VERIFY".
* Bucket 0 means the expected data was returned.
* Bucket 1 means incorrect data was returned.
* Bucket 2 means null data was returned when some data was expected.
*/
protected void verifyRow(String key, HashMap<String, ByteIterator> cells) {
Status verifyStatus = Status.OK;
long startTime = System.nanoTime();
if (!cells.isEmpty()) {
for (Map.Entry<String, ByteIterator> entry : cells.entrySet()) {
if (!entry.getValue().toString().equals(buildDeterministicValue(key, entry.getKey()))) {
verifyStatus = Status.UNEXPECTED_STATE;
break;
}
}
} else {
// This assumes that null data is never valid
verifyStatus = Status.ERROR;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
}
long nextKeynum() {<FILL_FUNCTION_BODY>}
public void doTransactionRead(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
} else if (dataintegrity || readallfieldsbyname) {
// pass the full field list if dataintegrity is on for verification
fields = new HashSet<String>(fieldnames);
}
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
db.read(table, keyname, fields, cells);
if (dataintegrity) {
verifyRow(keyname, cells);
}
}
public void doTransactionReadModifyWrite(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
// do the transaction
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
db.read(table, keyname, fields, cells);
db.update(table, keyname, values);
long en = System.nanoTime();
if (dataintegrity) {
verifyRow(keyname, cells);
}
measurements.measure("READ-MODIFY-WRITE", (int) ((en - st) / 1000));
measurements.measureIntended("READ-MODIFY-WRITE", (int) ((en - ist) / 1000));
}
public void doTransactionScan(DB db) {
// choose a random key
long keynum = nextKeynum();
String startkeyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
// choose a random scan length
int len = scanlength.nextValue().intValue();
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
db.scan(table, startkeyname, len, fields, new Vector<HashMap<String, ByteIterator>>());
}
public void doTransactionUpdate(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
db.update(table, keyname, values);
}
public void doTransactionInsert(DB db) {
// choose the next key
long keynum = transactioninsertkeysequence.nextValue();
try {
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
db.insert(table, dbkey, values);
} finally {
transactioninsertkeysequence.acknowledge(keynum);
}
}
/**
* Creates a weighted discrete values with database operations for a workload to perform.
* Weights/proportions are read from the properties list and defaults are used
* when values are not configured.
* Current operations are "READ", "UPDATE", "INSERT", "SCAN" and "READMODIFYWRITE".
*
* @param p The properties list to pull weights from.
* @return A generator that can be used to determine the next operation to perform.
* @throws IllegalArgumentException if the properties object was null.
*/
protected static DiscreteGenerator createOperationGenerator(final Properties p) {
if (p == null) {
throw new IllegalArgumentException("Properties object cannot be null");
}
final double readproportion = Double.parseDouble(
p.getProperty(READ_PROPORTION_PROPERTY, READ_PROPORTION_PROPERTY_DEFAULT));
final double updateproportion = Double.parseDouble(
p.getProperty(UPDATE_PROPORTION_PROPERTY, UPDATE_PROPORTION_PROPERTY_DEFAULT));
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
final double scanproportion = Double.parseDouble(
p.getProperty(SCAN_PROPORTION_PROPERTY, SCAN_PROPORTION_PROPERTY_DEFAULT));
final double readmodifywriteproportion = Double.parseDouble(p.getProperty(
READMODIFYWRITE_PROPORTION_PROPERTY, READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT));
final DiscreteGenerator operationchooser = new DiscreteGenerator();
if (readproportion > 0) {
operationchooser.addValue(readproportion, "READ");
}
if (updateproportion > 0) {
operationchooser.addValue(updateproportion, "UPDATE");
}
if (insertproportion > 0) {
operationchooser.addValue(insertproportion, "INSERT");
}
if (scanproportion > 0) {
operationchooser.addValue(scanproportion, "SCAN");
}
if (readmodifywriteproportion > 0) {
operationchooser.addValue(readmodifywriteproportion, "READMODIFYWRITE");
}
return operationchooser;
}
} |
long keynum;
if (keychooser instanceof ExponentialGenerator) {
do {
keynum = transactioninsertkeysequence.lastValue() - keychooser.nextValue().intValue();
} while (keynum < 0);
} else {
do {
keynum = keychooser.nextValue().intValue();
} while (keynum > transactioninsertkeysequence.lastValue());
}
return keynum;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/CoreWorkload.java | CoreWorkload | doTransactionRead | class CoreWorkload extends Workload {
/**
* The name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY = "table";
/**
* The default name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY_DEFAULT = "usertable";
protected String table;
/**
* The name of the property for the number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY = "fieldcount";
/**
* Default number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY_DEFAULT = "10";
private List<String> fieldnames;
/**
* The name of the property for the field length distribution. Options are "uniform", "zipfian"
* (favouring short records), "constant", and "histogram".
* <p>
* If "uniform", "zipfian" or "constant", the maximum field length will be that specified by the
* fieldlength property. If "histogram", then the histogram will be read from the filename
* specified in the "fieldlengthhistogram" property.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY = "fieldlengthdistribution";
/**
* The default field length distribution.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "constant";
/**
* The name of the property for the length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY = "fieldlength";
/**
* The default maximum length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY_DEFAULT = "100";
/**
* The name of the property for the minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY = "minfieldlength";
/**
* The default minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of a property that specifies the filename containing the field length histogram (only
* used if fieldlengthdistribution is "histogram").
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY = "fieldlengthhistogram";
/**
* The default filename containing a field length histogram.
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT = "hist.txt";
/**
* Generator object that produces field lengths. The value of this depends on the properties that
* start with "FIELD_LENGTH_".
*/
protected NumberGenerator fieldlengthgenerator;
/**
* The name of the property for deciding whether to read one field (false) or all fields (true) of
* a record.
*/
public static final String READ_ALL_FIELDS_PROPERTY = "readallfields";
/**
* The default value for the readallfields property.
*/
public static final String READ_ALL_FIELDS_PROPERTY_DEFAULT = "true";
protected boolean readallfields;
/**
* The name of the property for determining how to read all the fields when readallfields is true.
* If set to true, all the field names will be passed into the underlying client. If set to false,
* null will be passed into the underlying client. When passed a null, some clients may retrieve
* the entire row with a wildcard, which may be slower than naming all the fields.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY = "readallfieldsbyname";
/**
* The default value for the readallfieldsbyname property.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT = "false";
protected boolean readallfieldsbyname;
/**
* The name of the property for deciding whether to write one field (false) or all fields (true)
* of a record.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY = "writeallfields";
/**
* The default value for the writeallfields property.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY_DEFAULT = "false";
protected boolean writeallfields;
/**
* The name of the property for deciding whether to check all returned
* data against the formation template to ensure data integrity.
*/
public static final String DATA_INTEGRITY_PROPERTY = "dataintegrity";
/**
* The default value for the dataintegrity property.
*/
public static final String DATA_INTEGRITY_PROPERTY_DEFAULT = "false";
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
private boolean dataintegrity;
/**
* The name of the property for the proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY = "readproportion";
/**
* The default proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY_DEFAULT = "0.95";
/**
* The name of the property for the proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY = "updateproportion";
/**
* The default proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY_DEFAULT = "0.05";
/**
* The name of the property for the proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY = "insertproportion";
/**
* The default proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY = "scanproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are read-modify-write.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY = "readmodifywriteproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the the distribution of requests across the keyspace. Options are
* "uniform", "zipfian" and "latest"
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY = "requestdistribution";
/**
* The default distribution of requests across the keyspace.
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for adding zero padding to record numbers in order to match
* string sort order. Controls the number of 0s to left pad with.
*/
public static final String ZERO_PADDING_PROPERTY = "zeropadding";
/**
* The default zero padding value. Matches integer sort order
*/
public static final String ZERO_PADDING_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the min scan length (number of records).
*/
public static final String MIN_SCAN_LENGTH_PROPERTY = "minscanlength";
/**
* The default min scan length.
*/
public static final String MIN_SCAN_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the max scan length (number of records).
*/
public static final String MAX_SCAN_LENGTH_PROPERTY = "maxscanlength";
/**
* The default max scan length.
*/
public static final String MAX_SCAN_LENGTH_PROPERTY_DEFAULT = "1000";
/**
* The name of the property for the scan length distribution. Options are "uniform" and "zipfian"
* (favoring short scans)
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY = "scanlengthdistribution";
/**
* The default max scan length.
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for the order to insert records. Options are "ordered" or "hashed"
*/
public static final String INSERT_ORDER_PROPERTY = "insertorder";
/**
* Default insert order.
*/
public static final String INSERT_ORDER_PROPERTY_DEFAULT = "hashed";
/**
* Percentage data items that constitute the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION = "hotspotdatafraction";
/**
* Default value of the size of the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION_DEFAULT = "0.2";
/**
* Percentage operations that access the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION = "hotspotopnfraction";
/**
* Default value of the percentage operations accessing the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION_DEFAULT = "0.8";
/**
* How many times to retry when insertion of a single item to a DB fails.
*/
public static final String INSERTION_RETRY_LIMIT = "core_workload_insertion_retry_limit";
public static final String INSERTION_RETRY_LIMIT_DEFAULT = "0";
/**
* On average, how long to wait between the retries, in seconds.
*/
public static final String INSERTION_RETRY_INTERVAL = "core_workload_insertion_retry_interval";
public static final String INSERTION_RETRY_INTERVAL_DEFAULT = "3";
/**
* Field name prefix.
*/
public static final String FIELD_NAME_PREFIX = "fieldnameprefix";
/**
* Default value of the field name prefix.
*/
public static final String FIELD_NAME_PREFIX_DEFAULT = "field";
protected NumberGenerator keysequence;
protected DiscreteGenerator operationchooser;
protected NumberGenerator keychooser;
protected NumberGenerator fieldchooser;
protected AcknowledgedCounterGenerator transactioninsertkeysequence;
protected NumberGenerator scanlength;
protected boolean orderedinserts;
protected long fieldcount;
protected long recordcount;
protected int zeropadding;
protected int insertionRetryLimit;
protected int insertionRetryInterval;
private Measurements measurements = Measurements.getMeasurements();
public static String buildKeyName(long keynum, int zeropadding, boolean orderedinserts) {
if (!orderedinserts) {
keynum = Utils.hash(keynum);
}
String value = Long.toString(keynum);
int fill = zeropadding - value.length();
String prekey = "user";
for (int i = 0; i < fill; i++) {
prekey += '0';
}
return prekey + value;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
NumberGenerator fieldlengthgenerator;
String fieldlengthdistribution = p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY, FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
int fieldlength =
Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
int minfieldlength =
Integer.parseInt(p.getProperty(MIN_FIELD_LENGTH_PROPERTY, MIN_FIELD_LENGTH_PROPERTY_DEFAULT));
String fieldlengthhistogram = p.getProperty(
FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY, FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT);
if (fieldlengthdistribution.compareTo("constant") == 0) {
fieldlengthgenerator = new ConstantIntegerGenerator(fieldlength);
} else if (fieldlengthdistribution.compareTo("uniform") == 0) {
fieldlengthgenerator = new UniformLongGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("zipfian") == 0) {
fieldlengthgenerator = new ZipfianGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("histogram") == 0) {
try {
fieldlengthgenerator = new HistogramGenerator(fieldlengthhistogram);
} catch (IOException e) {
throw new WorkloadException(
"Couldn't read field length histogram file: " + fieldlengthhistogram, e);
}
} else {
throw new WorkloadException(
"Unknown field length distribution \"" + fieldlengthdistribution + "\"");
}
return fieldlengthgenerator;
}
/**
* Initialize the scenario.
* Called once, in the main client thread, before any operations are started.
*/
@Override
public void init(Properties p) throws WorkloadException {
table = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
fieldcount =
Long.parseLong(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT));
final String fieldnameprefix = p.getProperty(FIELD_NAME_PREFIX, FIELD_NAME_PREFIX_DEFAULT);
fieldnames = new ArrayList<>();
for (int i = 0; i < fieldcount; i++) {
fieldnames.add(fieldnameprefix + i);
}
fieldlengthgenerator = CoreWorkload.getFieldLengthGenerator(p);
recordcount =
Long.parseLong(p.getProperty(Client.RECORD_COUNT_PROPERTY, Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
String requestdistrib =
p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
int minscanlength =
Integer.parseInt(p.getProperty(MIN_SCAN_LENGTH_PROPERTY, MIN_SCAN_LENGTH_PROPERTY_DEFAULT));
int maxscanlength =
Integer.parseInt(p.getProperty(MAX_SCAN_LENGTH_PROPERTY, MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(SCAN_LENGTH_DISTRIBUTION_PROPERTY, SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
long insertstart =
Long.parseLong(p.getProperty(INSERT_START_PROPERTY, INSERT_START_PROPERTY_DEFAULT));
long insertcount=
Integer.parseInt(p.getProperty(INSERT_COUNT_PROPERTY, String.valueOf(recordcount - insertstart)));
// Confirm valid values for insertstart and insertcount in relation to recordcount
if (recordcount < (insertstart + insertcount)) {
System.err.println("Invalid combination of insertstart, insertcount and recordcount.");
System.err.println("recordcount must be bigger than insertstart + insertcount.");
System.exit(-1);
}
zeropadding =
Integer.parseInt(p.getProperty(ZERO_PADDING_PROPERTY, ZERO_PADDING_PROPERTY_DEFAULT));
readallfields = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_PROPERTY, READ_ALL_FIELDS_PROPERTY_DEFAULT));
readallfieldsbyname = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_BY_NAME_PROPERTY, READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT));
writeallfields = Boolean.parseBoolean(
p.getProperty(WRITE_ALL_FIELDS_PROPERTY, WRITE_ALL_FIELDS_PROPERTY_DEFAULT));
dataintegrity = Boolean.parseBoolean(
p.getProperty(DATA_INTEGRITY_PROPERTY, DATA_INTEGRITY_PROPERTY_DEFAULT));
// Confirm that fieldlengthgenerator returns a constant if data
// integrity check requested.
if (dataintegrity && !(p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT)).equals("constant")) {
System.err.println("Must have constant field size to check data integrity.");
System.exit(-1);
}
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
if (p.getProperty(INSERT_ORDER_PROPERTY, INSERT_ORDER_PROPERTY_DEFAULT).compareTo("hashed") == 0) {
orderedinserts = false;
} else {
orderedinserts = true;
}
keysequence = new CounterGenerator(insertstart);
operationchooser = createOperationGenerator(p);
transactioninsertkeysequence = new AcknowledgedCounterGenerator(recordcount);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("exponential") == 0) {
double percentile = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordcount * frac);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
// it does this by generating a random "next key" in part by taking the modulus over the
// number of keys.
// If the number of keys changes, this would shift the modulus, and we don't want that to
// change which keys are popular so we'll actually construct the scrambled zipfian generator
// with a keyspace that is larger than exists at the beginning of the test. that is, we'll predict
// the number of inserts, and tell the scrambled zipfian generator the number of existing keys
// plus the number of predicted keys as the total keyspace. then, if the generator picks a key
// that hasn't been inserted yet, will just ignore it and pick another key. this way, the size of
// the keyspace doesn't change from the perspective of the scrambled zipfian generator
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
int opcount = Integer.parseInt(p.getProperty(Client.OPERATION_COUNT_PROPERTY));
int expectednewkeys = (int) ((opcount) * insertproportion * 2.0); // 2 is fudge factor
keychooser = new ScrambledZipfianGenerator(insertstart, insertstart + insertcount + expectednewkeys);
} else if (requestdistrib.compareTo("latest") == 0) {
keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(insertstart, insertstart + insertcount - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
fieldchooser = new UniformLongGenerator(0, fieldcount - 1);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(minscanlength, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(minscanlength, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
insertionRetryLimit = Integer.parseInt(p.getProperty(
INSERTION_RETRY_LIMIT, INSERTION_RETRY_LIMIT_DEFAULT));
insertionRetryInterval = Integer.parseInt(p.getProperty(
INSERTION_RETRY_INTERVAL, INSERTION_RETRY_INTERVAL_DEFAULT));
}
/**
* Builds a value for a randomly chosen field.
*/
private HashMap<String, ByteIterator> buildSingleValue(String key) {
HashMap<String, ByteIterator> value = new HashMap<>();
String fieldkey = fieldnames.get(fieldchooser.nextValue().intValue());
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
value.put(fieldkey, data);
return value;
}
/**
* Builds values for all fields.
*/
private HashMap<String, ByteIterator> buildValues(String key) {
HashMap<String, ByteIterator> values = new HashMap<>();
for (String fieldkey : fieldnames) {
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
values.put(fieldkey, data);
}
return values;
}
/**
* Build a deterministic value given the key information.
*/
private String buildDeterministicValue(String key, String fieldkey) {
int size = fieldlengthgenerator.nextValue().intValue();
StringBuilder sb = new StringBuilder(size);
sb.append(key);
sb.append(':');
sb.append(fieldkey);
while (sb.length() < size) {
sb.append(':');
sb.append(sb.toString().hashCode());
}
sb.setLength(size);
return sb.toString();
}
/**
* Do one insert operation. Because it will be called concurrently from multiple client threads,
* this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
int keynum = keysequence.nextValue().intValue();
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
Status status;
int numOfRetries = 0;
do {
status = db.insert(table, dbkey, values);
if (null != status && status.isOk()) {
break;
}
// Retry if configured. Without retrying, the load process will fail
// even if one single insertion fails. User can optionally configure
// an insertion retry limit (default is 0) to enable retry.
if (++numOfRetries <= insertionRetryLimit) {
System.err.println("Retrying insertion, retry count: " + numOfRetries);
try {
// Sleep for a random number between [0.8, 1.2)*insertionRetryInterval.
int sleepTime = (int) (1000 * insertionRetryInterval * (0.8 + 0.4 * Math.random()));
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
break;
}
} else {
System.err.println("Error inserting, not retrying any more. number of attempts: " + numOfRetries +
"Insertion Retry Limit: " + insertionRetryLimit);
break;
}
} while (true);
return null != status && status.isOk();
}
/**
* Do one transaction operation. Because it will be called concurrently from multiple client
* threads, this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if(operation == null) {
return false;
}
switch (operation) {
case "READ":
doTransactionRead(db);
break;
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "SCAN":
doTransactionScan(db);
break;
default:
doTransactionReadModifyWrite(db);
}
return true;
}
/**
* Results are reported in the first three buckets of the histogram under
* the label "VERIFY".
* Bucket 0 means the expected data was returned.
* Bucket 1 means incorrect data was returned.
* Bucket 2 means null data was returned when some data was expected.
*/
protected void verifyRow(String key, HashMap<String, ByteIterator> cells) {
Status verifyStatus = Status.OK;
long startTime = System.nanoTime();
if (!cells.isEmpty()) {
for (Map.Entry<String, ByteIterator> entry : cells.entrySet()) {
if (!entry.getValue().toString().equals(buildDeterministicValue(key, entry.getKey()))) {
verifyStatus = Status.UNEXPECTED_STATE;
break;
}
}
} else {
// This assumes that null data is never valid
verifyStatus = Status.ERROR;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
}
long nextKeynum() {
long keynum;
if (keychooser instanceof ExponentialGenerator) {
do {
keynum = transactioninsertkeysequence.lastValue() - keychooser.nextValue().intValue();
} while (keynum < 0);
} else {
do {
keynum = keychooser.nextValue().intValue();
} while (keynum > transactioninsertkeysequence.lastValue());
}
return keynum;
}
public void doTransactionRead(DB db) {<FILL_FUNCTION_BODY>}
public void doTransactionReadModifyWrite(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
// do the transaction
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
db.read(table, keyname, fields, cells);
db.update(table, keyname, values);
long en = System.nanoTime();
if (dataintegrity) {
verifyRow(keyname, cells);
}
measurements.measure("READ-MODIFY-WRITE", (int) ((en - st) / 1000));
measurements.measureIntended("READ-MODIFY-WRITE", (int) ((en - ist) / 1000));
}
public void doTransactionScan(DB db) {
// choose a random key
long keynum = nextKeynum();
String startkeyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
// choose a random scan length
int len = scanlength.nextValue().intValue();
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
db.scan(table, startkeyname, len, fields, new Vector<HashMap<String, ByteIterator>>());
}
public void doTransactionUpdate(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
db.update(table, keyname, values);
}
public void doTransactionInsert(DB db) {
// choose the next key
long keynum = transactioninsertkeysequence.nextValue();
try {
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
db.insert(table, dbkey, values);
} finally {
transactioninsertkeysequence.acknowledge(keynum);
}
}
/**
* Creates a weighted discrete values with database operations for a workload to perform.
* Weights/proportions are read from the properties list and defaults are used
* when values are not configured.
* Current operations are "READ", "UPDATE", "INSERT", "SCAN" and "READMODIFYWRITE".
*
* @param p The properties list to pull weights from.
* @return A generator that can be used to determine the next operation to perform.
* @throws IllegalArgumentException if the properties object was null.
*/
protected static DiscreteGenerator createOperationGenerator(final Properties p) {
if (p == null) {
throw new IllegalArgumentException("Properties object cannot be null");
}
final double readproportion = Double.parseDouble(
p.getProperty(READ_PROPORTION_PROPERTY, READ_PROPORTION_PROPERTY_DEFAULT));
final double updateproportion = Double.parseDouble(
p.getProperty(UPDATE_PROPORTION_PROPERTY, UPDATE_PROPORTION_PROPERTY_DEFAULT));
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
final double scanproportion = Double.parseDouble(
p.getProperty(SCAN_PROPORTION_PROPERTY, SCAN_PROPORTION_PROPERTY_DEFAULT));
final double readmodifywriteproportion = Double.parseDouble(p.getProperty(
READMODIFYWRITE_PROPORTION_PROPERTY, READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT));
final DiscreteGenerator operationchooser = new DiscreteGenerator();
if (readproportion > 0) {
operationchooser.addValue(readproportion, "READ");
}
if (updateproportion > 0) {
operationchooser.addValue(updateproportion, "UPDATE");
}
if (insertproportion > 0) {
operationchooser.addValue(insertproportion, "INSERT");
}
if (scanproportion > 0) {
operationchooser.addValue(scanproportion, "SCAN");
}
if (readmodifywriteproportion > 0) {
operationchooser.addValue(readmodifywriteproportion, "READMODIFYWRITE");
}
return operationchooser;
}
} |
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
} else if (dataintegrity || readallfieldsbyname) {
// pass the full field list if dataintegrity is on for verification
fields = new HashSet<String>(fieldnames);
}
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
db.read(table, keyname, fields, cells);
if (dataintegrity) {
verifyRow(keyname, cells);
}
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/CoreWorkload.java | CoreWorkload | doTransactionReadModifyWrite | class CoreWorkload extends Workload {
/**
* The name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY = "table";
/**
* The default name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY_DEFAULT = "usertable";
protected String table;
/**
* The name of the property for the number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY = "fieldcount";
/**
* Default number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY_DEFAULT = "10";
private List<String> fieldnames;
/**
* The name of the property for the field length distribution. Options are "uniform", "zipfian"
* (favouring short records), "constant", and "histogram".
* <p>
* If "uniform", "zipfian" or "constant", the maximum field length will be that specified by the
* fieldlength property. If "histogram", then the histogram will be read from the filename
* specified in the "fieldlengthhistogram" property.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY = "fieldlengthdistribution";
/**
* The default field length distribution.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "constant";
/**
* The name of the property for the length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY = "fieldlength";
/**
* The default maximum length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY_DEFAULT = "100";
/**
* The name of the property for the minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY = "minfieldlength";
/**
* The default minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of a property that specifies the filename containing the field length histogram (only
* used if fieldlengthdistribution is "histogram").
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY = "fieldlengthhistogram";
/**
* The default filename containing a field length histogram.
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT = "hist.txt";
/**
* Generator object that produces field lengths. The value of this depends on the properties that
* start with "FIELD_LENGTH_".
*/
protected NumberGenerator fieldlengthgenerator;
/**
* The name of the property for deciding whether to read one field (false) or all fields (true) of
* a record.
*/
public static final String READ_ALL_FIELDS_PROPERTY = "readallfields";
/**
* The default value for the readallfields property.
*/
public static final String READ_ALL_FIELDS_PROPERTY_DEFAULT = "true";
protected boolean readallfields;
/**
* The name of the property for determining how to read all the fields when readallfields is true.
* If set to true, all the field names will be passed into the underlying client. If set to false,
* null will be passed into the underlying client. When passed a null, some clients may retrieve
* the entire row with a wildcard, which may be slower than naming all the fields.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY = "readallfieldsbyname";
/**
* The default value for the readallfieldsbyname property.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT = "false";
protected boolean readallfieldsbyname;
/**
* The name of the property for deciding whether to write one field (false) or all fields (true)
* of a record.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY = "writeallfields";
/**
* The default value for the writeallfields property.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY_DEFAULT = "false";
protected boolean writeallfields;
/**
* The name of the property for deciding whether to check all returned
* data against the formation template to ensure data integrity.
*/
public static final String DATA_INTEGRITY_PROPERTY = "dataintegrity";
/**
* The default value for the dataintegrity property.
*/
public static final String DATA_INTEGRITY_PROPERTY_DEFAULT = "false";
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
private boolean dataintegrity;
/**
* The name of the property for the proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY = "readproportion";
/**
* The default proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY_DEFAULT = "0.95";
/**
* The name of the property for the proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY = "updateproportion";
/**
* The default proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY_DEFAULT = "0.05";
/**
* The name of the property for the proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY = "insertproportion";
/**
* The default proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY = "scanproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are read-modify-write.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY = "readmodifywriteproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the the distribution of requests across the keyspace. Options are
* "uniform", "zipfian" and "latest"
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY = "requestdistribution";
/**
* The default distribution of requests across the keyspace.
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for adding zero padding to record numbers in order to match
* string sort order. Controls the number of 0s to left pad with.
*/
public static final String ZERO_PADDING_PROPERTY = "zeropadding";
/**
* The default zero padding value. Matches integer sort order
*/
public static final String ZERO_PADDING_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the min scan length (number of records).
*/
public static final String MIN_SCAN_LENGTH_PROPERTY = "minscanlength";
/**
* The default min scan length.
*/
public static final String MIN_SCAN_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the max scan length (number of records).
*/
public static final String MAX_SCAN_LENGTH_PROPERTY = "maxscanlength";
/**
* The default max scan length.
*/
public static final String MAX_SCAN_LENGTH_PROPERTY_DEFAULT = "1000";
/**
* The name of the property for the scan length distribution. Options are "uniform" and "zipfian"
* (favoring short scans)
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY = "scanlengthdistribution";
/**
* The default max scan length.
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for the order to insert records. Options are "ordered" or "hashed"
*/
public static final String INSERT_ORDER_PROPERTY = "insertorder";
/**
* Default insert order.
*/
public static final String INSERT_ORDER_PROPERTY_DEFAULT = "hashed";
/**
* Percentage data items that constitute the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION = "hotspotdatafraction";
/**
* Default value of the size of the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION_DEFAULT = "0.2";
/**
* Percentage operations that access the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION = "hotspotopnfraction";
/**
* Default value of the percentage operations accessing the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION_DEFAULT = "0.8";
/**
* How many times to retry when insertion of a single item to a DB fails.
*/
public static final String INSERTION_RETRY_LIMIT = "core_workload_insertion_retry_limit";
public static final String INSERTION_RETRY_LIMIT_DEFAULT = "0";
/**
* On average, how long to wait between the retries, in seconds.
*/
public static final String INSERTION_RETRY_INTERVAL = "core_workload_insertion_retry_interval";
public static final String INSERTION_RETRY_INTERVAL_DEFAULT = "3";
/**
* Field name prefix.
*/
public static final String FIELD_NAME_PREFIX = "fieldnameprefix";
/**
* Default value of the field name prefix.
*/
public static final String FIELD_NAME_PREFIX_DEFAULT = "field";
protected NumberGenerator keysequence;
protected DiscreteGenerator operationchooser;
protected NumberGenerator keychooser;
protected NumberGenerator fieldchooser;
protected AcknowledgedCounterGenerator transactioninsertkeysequence;
protected NumberGenerator scanlength;
protected boolean orderedinserts;
protected long fieldcount;
protected long recordcount;
protected int zeropadding;
protected int insertionRetryLimit;
protected int insertionRetryInterval;
private Measurements measurements = Measurements.getMeasurements();
public static String buildKeyName(long keynum, int zeropadding, boolean orderedinserts) {
if (!orderedinserts) {
keynum = Utils.hash(keynum);
}
String value = Long.toString(keynum);
int fill = zeropadding - value.length();
String prekey = "user";
for (int i = 0; i < fill; i++) {
prekey += '0';
}
return prekey + value;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
NumberGenerator fieldlengthgenerator;
String fieldlengthdistribution = p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY, FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
int fieldlength =
Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
int minfieldlength =
Integer.parseInt(p.getProperty(MIN_FIELD_LENGTH_PROPERTY, MIN_FIELD_LENGTH_PROPERTY_DEFAULT));
String fieldlengthhistogram = p.getProperty(
FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY, FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT);
if (fieldlengthdistribution.compareTo("constant") == 0) {
fieldlengthgenerator = new ConstantIntegerGenerator(fieldlength);
} else if (fieldlengthdistribution.compareTo("uniform") == 0) {
fieldlengthgenerator = new UniformLongGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("zipfian") == 0) {
fieldlengthgenerator = new ZipfianGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("histogram") == 0) {
try {
fieldlengthgenerator = new HistogramGenerator(fieldlengthhistogram);
} catch (IOException e) {
throw new WorkloadException(
"Couldn't read field length histogram file: " + fieldlengthhistogram, e);
}
} else {
throw new WorkloadException(
"Unknown field length distribution \"" + fieldlengthdistribution + "\"");
}
return fieldlengthgenerator;
}
/**
* Initialize the scenario.
* Called once, in the main client thread, before any operations are started.
*/
@Override
public void init(Properties p) throws WorkloadException {
table = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
fieldcount =
Long.parseLong(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT));
final String fieldnameprefix = p.getProperty(FIELD_NAME_PREFIX, FIELD_NAME_PREFIX_DEFAULT);
fieldnames = new ArrayList<>();
for (int i = 0; i < fieldcount; i++) {
fieldnames.add(fieldnameprefix + i);
}
fieldlengthgenerator = CoreWorkload.getFieldLengthGenerator(p);
recordcount =
Long.parseLong(p.getProperty(Client.RECORD_COUNT_PROPERTY, Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
String requestdistrib =
p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
int minscanlength =
Integer.parseInt(p.getProperty(MIN_SCAN_LENGTH_PROPERTY, MIN_SCAN_LENGTH_PROPERTY_DEFAULT));
int maxscanlength =
Integer.parseInt(p.getProperty(MAX_SCAN_LENGTH_PROPERTY, MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(SCAN_LENGTH_DISTRIBUTION_PROPERTY, SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
long insertstart =
Long.parseLong(p.getProperty(INSERT_START_PROPERTY, INSERT_START_PROPERTY_DEFAULT));
long insertcount=
Integer.parseInt(p.getProperty(INSERT_COUNT_PROPERTY, String.valueOf(recordcount - insertstart)));
// Confirm valid values for insertstart and insertcount in relation to recordcount
if (recordcount < (insertstart + insertcount)) {
System.err.println("Invalid combination of insertstart, insertcount and recordcount.");
System.err.println("recordcount must be bigger than insertstart + insertcount.");
System.exit(-1);
}
zeropadding =
Integer.parseInt(p.getProperty(ZERO_PADDING_PROPERTY, ZERO_PADDING_PROPERTY_DEFAULT));
readallfields = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_PROPERTY, READ_ALL_FIELDS_PROPERTY_DEFAULT));
readallfieldsbyname = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_BY_NAME_PROPERTY, READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT));
writeallfields = Boolean.parseBoolean(
p.getProperty(WRITE_ALL_FIELDS_PROPERTY, WRITE_ALL_FIELDS_PROPERTY_DEFAULT));
dataintegrity = Boolean.parseBoolean(
p.getProperty(DATA_INTEGRITY_PROPERTY, DATA_INTEGRITY_PROPERTY_DEFAULT));
// Confirm that fieldlengthgenerator returns a constant if data
// integrity check requested.
if (dataintegrity && !(p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT)).equals("constant")) {
System.err.println("Must have constant field size to check data integrity.");
System.exit(-1);
}
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
if (p.getProperty(INSERT_ORDER_PROPERTY, INSERT_ORDER_PROPERTY_DEFAULT).compareTo("hashed") == 0) {
orderedinserts = false;
} else {
orderedinserts = true;
}
keysequence = new CounterGenerator(insertstart);
operationchooser = createOperationGenerator(p);
transactioninsertkeysequence = new AcknowledgedCounterGenerator(recordcount);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("exponential") == 0) {
double percentile = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordcount * frac);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
// it does this by generating a random "next key" in part by taking the modulus over the
// number of keys.
// If the number of keys changes, this would shift the modulus, and we don't want that to
// change which keys are popular so we'll actually construct the scrambled zipfian generator
// with a keyspace that is larger than exists at the beginning of the test. that is, we'll predict
// the number of inserts, and tell the scrambled zipfian generator the number of existing keys
// plus the number of predicted keys as the total keyspace. then, if the generator picks a key
// that hasn't been inserted yet, will just ignore it and pick another key. this way, the size of
// the keyspace doesn't change from the perspective of the scrambled zipfian generator
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
int opcount = Integer.parseInt(p.getProperty(Client.OPERATION_COUNT_PROPERTY));
int expectednewkeys = (int) ((opcount) * insertproportion * 2.0); // 2 is fudge factor
keychooser = new ScrambledZipfianGenerator(insertstart, insertstart + insertcount + expectednewkeys);
} else if (requestdistrib.compareTo("latest") == 0) {
keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(insertstart, insertstart + insertcount - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
fieldchooser = new UniformLongGenerator(0, fieldcount - 1);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(minscanlength, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(minscanlength, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
insertionRetryLimit = Integer.parseInt(p.getProperty(
INSERTION_RETRY_LIMIT, INSERTION_RETRY_LIMIT_DEFAULT));
insertionRetryInterval = Integer.parseInt(p.getProperty(
INSERTION_RETRY_INTERVAL, INSERTION_RETRY_INTERVAL_DEFAULT));
}
/**
* Builds a value for a randomly chosen field.
*/
private HashMap<String, ByteIterator> buildSingleValue(String key) {
HashMap<String, ByteIterator> value = new HashMap<>();
String fieldkey = fieldnames.get(fieldchooser.nextValue().intValue());
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
value.put(fieldkey, data);
return value;
}
/**
* Builds values for all fields.
*/
private HashMap<String, ByteIterator> buildValues(String key) {
HashMap<String, ByteIterator> values = new HashMap<>();
for (String fieldkey : fieldnames) {
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
values.put(fieldkey, data);
}
return values;
}
/**
* Build a deterministic value given the key information.
*/
private String buildDeterministicValue(String key, String fieldkey) {
int size = fieldlengthgenerator.nextValue().intValue();
StringBuilder sb = new StringBuilder(size);
sb.append(key);
sb.append(':');
sb.append(fieldkey);
while (sb.length() < size) {
sb.append(':');
sb.append(sb.toString().hashCode());
}
sb.setLength(size);
return sb.toString();
}
/**
* Do one insert operation. Because it will be called concurrently from multiple client threads,
* this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
int keynum = keysequence.nextValue().intValue();
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
Status status;
int numOfRetries = 0;
do {
status = db.insert(table, dbkey, values);
if (null != status && status.isOk()) {
break;
}
// Retry if configured. Without retrying, the load process will fail
// even if one single insertion fails. User can optionally configure
// an insertion retry limit (default is 0) to enable retry.
if (++numOfRetries <= insertionRetryLimit) {
System.err.println("Retrying insertion, retry count: " + numOfRetries);
try {
// Sleep for a random number between [0.8, 1.2)*insertionRetryInterval.
int sleepTime = (int) (1000 * insertionRetryInterval * (0.8 + 0.4 * Math.random()));
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
break;
}
} else {
System.err.println("Error inserting, not retrying any more. number of attempts: " + numOfRetries +
"Insertion Retry Limit: " + insertionRetryLimit);
break;
}
} while (true);
return null != status && status.isOk();
}
/**
* Do one transaction operation. Because it will be called concurrently from multiple client
* threads, this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if(operation == null) {
return false;
}
switch (operation) {
case "READ":
doTransactionRead(db);
break;
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "SCAN":
doTransactionScan(db);
break;
default:
doTransactionReadModifyWrite(db);
}
return true;
}
/**
* Results are reported in the first three buckets of the histogram under
* the label "VERIFY".
* Bucket 0 means the expected data was returned.
* Bucket 1 means incorrect data was returned.
* Bucket 2 means null data was returned when some data was expected.
*/
protected void verifyRow(String key, HashMap<String, ByteIterator> cells) {
Status verifyStatus = Status.OK;
long startTime = System.nanoTime();
if (!cells.isEmpty()) {
for (Map.Entry<String, ByteIterator> entry : cells.entrySet()) {
if (!entry.getValue().toString().equals(buildDeterministicValue(key, entry.getKey()))) {
verifyStatus = Status.UNEXPECTED_STATE;
break;
}
}
} else {
// This assumes that null data is never valid
verifyStatus = Status.ERROR;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
}
long nextKeynum() {
long keynum;
if (keychooser instanceof ExponentialGenerator) {
do {
keynum = transactioninsertkeysequence.lastValue() - keychooser.nextValue().intValue();
} while (keynum < 0);
} else {
do {
keynum = keychooser.nextValue().intValue();
} while (keynum > transactioninsertkeysequence.lastValue());
}
return keynum;
}
public void doTransactionRead(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
} else if (dataintegrity || readallfieldsbyname) {
// pass the full field list if dataintegrity is on for verification
fields = new HashSet<String>(fieldnames);
}
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
db.read(table, keyname, fields, cells);
if (dataintegrity) {
verifyRow(keyname, cells);
}
}
public void doTransactionReadModifyWrite(DB db) {<FILL_FUNCTION_BODY>}
public void doTransactionScan(DB db) {
// choose a random key
long keynum = nextKeynum();
String startkeyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
// choose a random scan length
int len = scanlength.nextValue().intValue();
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
db.scan(table, startkeyname, len, fields, new Vector<HashMap<String, ByteIterator>>());
}
public void doTransactionUpdate(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
db.update(table, keyname, values);
}
public void doTransactionInsert(DB db) {
// choose the next key
long keynum = transactioninsertkeysequence.nextValue();
try {
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
db.insert(table, dbkey, values);
} finally {
transactioninsertkeysequence.acknowledge(keynum);
}
}
/**
* Creates a weighted discrete values with database operations for a workload to perform.
* Weights/proportions are read from the properties list and defaults are used
* when values are not configured.
* Current operations are "READ", "UPDATE", "INSERT", "SCAN" and "READMODIFYWRITE".
*
* @param p The properties list to pull weights from.
* @return A generator that can be used to determine the next operation to perform.
* @throws IllegalArgumentException if the properties object was null.
*/
protected static DiscreteGenerator createOperationGenerator(final Properties p) {
if (p == null) {
throw new IllegalArgumentException("Properties object cannot be null");
}
final double readproportion = Double.parseDouble(
p.getProperty(READ_PROPORTION_PROPERTY, READ_PROPORTION_PROPERTY_DEFAULT));
final double updateproportion = Double.parseDouble(
p.getProperty(UPDATE_PROPORTION_PROPERTY, UPDATE_PROPORTION_PROPERTY_DEFAULT));
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
final double scanproportion = Double.parseDouble(
p.getProperty(SCAN_PROPORTION_PROPERTY, SCAN_PROPORTION_PROPERTY_DEFAULT));
final double readmodifywriteproportion = Double.parseDouble(p.getProperty(
READMODIFYWRITE_PROPORTION_PROPERTY, READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT));
final DiscreteGenerator operationchooser = new DiscreteGenerator();
if (readproportion > 0) {
operationchooser.addValue(readproportion, "READ");
}
if (updateproportion > 0) {
operationchooser.addValue(updateproportion, "UPDATE");
}
if (insertproportion > 0) {
operationchooser.addValue(insertproportion, "INSERT");
}
if (scanproportion > 0) {
operationchooser.addValue(scanproportion, "SCAN");
}
if (readmodifywriteproportion > 0) {
operationchooser.addValue(readmodifywriteproportion, "READMODIFYWRITE");
}
return operationchooser;
}
} |
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
// do the transaction
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
db.read(table, keyname, fields, cells);
db.update(table, keyname, values);
long en = System.nanoTime();
if (dataintegrity) {
verifyRow(keyname, cells);
}
measurements.measure("READ-MODIFY-WRITE", (int) ((en - st) / 1000));
measurements.measureIntended("READ-MODIFY-WRITE", (int) ((en - ist) / 1000));
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/CoreWorkload.java | CoreWorkload | doTransactionScan | class CoreWorkload extends Workload {
/**
* The name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY = "table";
/**
* The default name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY_DEFAULT = "usertable";
protected String table;
/**
* The name of the property for the number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY = "fieldcount";
/**
* Default number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY_DEFAULT = "10";
private List<String> fieldnames;
/**
* The name of the property for the field length distribution. Options are "uniform", "zipfian"
* (favouring short records), "constant", and "histogram".
* <p>
* If "uniform", "zipfian" or "constant", the maximum field length will be that specified by the
* fieldlength property. If "histogram", then the histogram will be read from the filename
* specified in the "fieldlengthhistogram" property.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY = "fieldlengthdistribution";
/**
* The default field length distribution.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "constant";
/**
* The name of the property for the length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY = "fieldlength";
/**
* The default maximum length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY_DEFAULT = "100";
/**
* The name of the property for the minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY = "minfieldlength";
/**
* The default minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of a property that specifies the filename containing the field length histogram (only
* used if fieldlengthdistribution is "histogram").
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY = "fieldlengthhistogram";
/**
* The default filename containing a field length histogram.
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT = "hist.txt";
/**
* Generator object that produces field lengths. The value of this depends on the properties that
* start with "FIELD_LENGTH_".
*/
protected NumberGenerator fieldlengthgenerator;
/**
* The name of the property for deciding whether to read one field (false) or all fields (true) of
* a record.
*/
public static final String READ_ALL_FIELDS_PROPERTY = "readallfields";
/**
* The default value for the readallfields property.
*/
public static final String READ_ALL_FIELDS_PROPERTY_DEFAULT = "true";
protected boolean readallfields;
/**
* The name of the property for determining how to read all the fields when readallfields is true.
* If set to true, all the field names will be passed into the underlying client. If set to false,
* null will be passed into the underlying client. When passed a null, some clients may retrieve
* the entire row with a wildcard, which may be slower than naming all the fields.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY = "readallfieldsbyname";
/**
* The default value for the readallfieldsbyname property.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT = "false";
protected boolean readallfieldsbyname;
/**
* The name of the property for deciding whether to write one field (false) or all fields (true)
* of a record.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY = "writeallfields";
/**
* The default value for the writeallfields property.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY_DEFAULT = "false";
protected boolean writeallfields;
/**
* The name of the property for deciding whether to check all returned
* data against the formation template to ensure data integrity.
*/
public static final String DATA_INTEGRITY_PROPERTY = "dataintegrity";
/**
* The default value for the dataintegrity property.
*/
public static final String DATA_INTEGRITY_PROPERTY_DEFAULT = "false";
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
private boolean dataintegrity;
/**
* The name of the property for the proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY = "readproportion";
/**
* The default proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY_DEFAULT = "0.95";
/**
* The name of the property for the proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY = "updateproportion";
/**
* The default proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY_DEFAULT = "0.05";
/**
* The name of the property for the proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY = "insertproportion";
/**
* The default proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY = "scanproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are read-modify-write.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY = "readmodifywriteproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the the distribution of requests across the keyspace. Options are
* "uniform", "zipfian" and "latest"
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY = "requestdistribution";
/**
* The default distribution of requests across the keyspace.
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for adding zero padding to record numbers in order to match
* string sort order. Controls the number of 0s to left pad with.
*/
public static final String ZERO_PADDING_PROPERTY = "zeropadding";
/**
* The default zero padding value. Matches integer sort order
*/
public static final String ZERO_PADDING_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the min scan length (number of records).
*/
public static final String MIN_SCAN_LENGTH_PROPERTY = "minscanlength";
/**
* The default min scan length.
*/
public static final String MIN_SCAN_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the max scan length (number of records).
*/
public static final String MAX_SCAN_LENGTH_PROPERTY = "maxscanlength";
/**
* The default max scan length.
*/
public static final String MAX_SCAN_LENGTH_PROPERTY_DEFAULT = "1000";
/**
* The name of the property for the scan length distribution. Options are "uniform" and "zipfian"
* (favoring short scans)
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY = "scanlengthdistribution";
/**
* The default max scan length.
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for the order to insert records. Options are "ordered" or "hashed"
*/
public static final String INSERT_ORDER_PROPERTY = "insertorder";
/**
* Default insert order.
*/
public static final String INSERT_ORDER_PROPERTY_DEFAULT = "hashed";
/**
* Percentage data items that constitute the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION = "hotspotdatafraction";
/**
* Default value of the size of the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION_DEFAULT = "0.2";
/**
* Percentage operations that access the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION = "hotspotopnfraction";
/**
* Default value of the percentage operations accessing the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION_DEFAULT = "0.8";
/**
* How many times to retry when insertion of a single item to a DB fails.
*/
public static final String INSERTION_RETRY_LIMIT = "core_workload_insertion_retry_limit";
public static final String INSERTION_RETRY_LIMIT_DEFAULT = "0";
/**
* On average, how long to wait between the retries, in seconds.
*/
public static final String INSERTION_RETRY_INTERVAL = "core_workload_insertion_retry_interval";
public static final String INSERTION_RETRY_INTERVAL_DEFAULT = "3";
/**
* Field name prefix.
*/
public static final String FIELD_NAME_PREFIX = "fieldnameprefix";
/**
* Default value of the field name prefix.
*/
public static final String FIELD_NAME_PREFIX_DEFAULT = "field";
protected NumberGenerator keysequence;
protected DiscreteGenerator operationchooser;
protected NumberGenerator keychooser;
protected NumberGenerator fieldchooser;
protected AcknowledgedCounterGenerator transactioninsertkeysequence;
protected NumberGenerator scanlength;
protected boolean orderedinserts;
protected long fieldcount;
protected long recordcount;
protected int zeropadding;
protected int insertionRetryLimit;
protected int insertionRetryInterval;
private Measurements measurements = Measurements.getMeasurements();
public static String buildKeyName(long keynum, int zeropadding, boolean orderedinserts) {
if (!orderedinserts) {
keynum = Utils.hash(keynum);
}
String value = Long.toString(keynum);
int fill = zeropadding - value.length();
String prekey = "user";
for (int i = 0; i < fill; i++) {
prekey += '0';
}
return prekey + value;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
NumberGenerator fieldlengthgenerator;
String fieldlengthdistribution = p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY, FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
int fieldlength =
Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
int minfieldlength =
Integer.parseInt(p.getProperty(MIN_FIELD_LENGTH_PROPERTY, MIN_FIELD_LENGTH_PROPERTY_DEFAULT));
String fieldlengthhistogram = p.getProperty(
FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY, FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT);
if (fieldlengthdistribution.compareTo("constant") == 0) {
fieldlengthgenerator = new ConstantIntegerGenerator(fieldlength);
} else if (fieldlengthdistribution.compareTo("uniform") == 0) {
fieldlengthgenerator = new UniformLongGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("zipfian") == 0) {
fieldlengthgenerator = new ZipfianGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("histogram") == 0) {
try {
fieldlengthgenerator = new HistogramGenerator(fieldlengthhistogram);
} catch (IOException e) {
throw new WorkloadException(
"Couldn't read field length histogram file: " + fieldlengthhistogram, e);
}
} else {
throw new WorkloadException(
"Unknown field length distribution \"" + fieldlengthdistribution + "\"");
}
return fieldlengthgenerator;
}
/**
* Initialize the scenario.
* Called once, in the main client thread, before any operations are started.
*/
@Override
public void init(Properties p) throws WorkloadException {
table = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
fieldcount =
Long.parseLong(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT));
final String fieldnameprefix = p.getProperty(FIELD_NAME_PREFIX, FIELD_NAME_PREFIX_DEFAULT);
fieldnames = new ArrayList<>();
for (int i = 0; i < fieldcount; i++) {
fieldnames.add(fieldnameprefix + i);
}
fieldlengthgenerator = CoreWorkload.getFieldLengthGenerator(p);
recordcount =
Long.parseLong(p.getProperty(Client.RECORD_COUNT_PROPERTY, Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
String requestdistrib =
p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
int minscanlength =
Integer.parseInt(p.getProperty(MIN_SCAN_LENGTH_PROPERTY, MIN_SCAN_LENGTH_PROPERTY_DEFAULT));
int maxscanlength =
Integer.parseInt(p.getProperty(MAX_SCAN_LENGTH_PROPERTY, MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(SCAN_LENGTH_DISTRIBUTION_PROPERTY, SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
long insertstart =
Long.parseLong(p.getProperty(INSERT_START_PROPERTY, INSERT_START_PROPERTY_DEFAULT));
long insertcount=
Integer.parseInt(p.getProperty(INSERT_COUNT_PROPERTY, String.valueOf(recordcount - insertstart)));
// Confirm valid values for insertstart and insertcount in relation to recordcount
if (recordcount < (insertstart + insertcount)) {
System.err.println("Invalid combination of insertstart, insertcount and recordcount.");
System.err.println("recordcount must be bigger than insertstart + insertcount.");
System.exit(-1);
}
zeropadding =
Integer.parseInt(p.getProperty(ZERO_PADDING_PROPERTY, ZERO_PADDING_PROPERTY_DEFAULT));
readallfields = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_PROPERTY, READ_ALL_FIELDS_PROPERTY_DEFAULT));
readallfieldsbyname = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_BY_NAME_PROPERTY, READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT));
writeallfields = Boolean.parseBoolean(
p.getProperty(WRITE_ALL_FIELDS_PROPERTY, WRITE_ALL_FIELDS_PROPERTY_DEFAULT));
dataintegrity = Boolean.parseBoolean(
p.getProperty(DATA_INTEGRITY_PROPERTY, DATA_INTEGRITY_PROPERTY_DEFAULT));
// Confirm that fieldlengthgenerator returns a constant if data
// integrity check requested.
if (dataintegrity && !(p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT)).equals("constant")) {
System.err.println("Must have constant field size to check data integrity.");
System.exit(-1);
}
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
if (p.getProperty(INSERT_ORDER_PROPERTY, INSERT_ORDER_PROPERTY_DEFAULT).compareTo("hashed") == 0) {
orderedinserts = false;
} else {
orderedinserts = true;
}
keysequence = new CounterGenerator(insertstart);
operationchooser = createOperationGenerator(p);
transactioninsertkeysequence = new AcknowledgedCounterGenerator(recordcount);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("exponential") == 0) {
double percentile = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordcount * frac);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
// it does this by generating a random "next key" in part by taking the modulus over the
// number of keys.
// If the number of keys changes, this would shift the modulus, and we don't want that to
// change which keys are popular so we'll actually construct the scrambled zipfian generator
// with a keyspace that is larger than exists at the beginning of the test. that is, we'll predict
// the number of inserts, and tell the scrambled zipfian generator the number of existing keys
// plus the number of predicted keys as the total keyspace. then, if the generator picks a key
// that hasn't been inserted yet, will just ignore it and pick another key. this way, the size of
// the keyspace doesn't change from the perspective of the scrambled zipfian generator
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
int opcount = Integer.parseInt(p.getProperty(Client.OPERATION_COUNT_PROPERTY));
int expectednewkeys = (int) ((opcount) * insertproportion * 2.0); // 2 is fudge factor
keychooser = new ScrambledZipfianGenerator(insertstart, insertstart + insertcount + expectednewkeys);
} else if (requestdistrib.compareTo("latest") == 0) {
keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(insertstart, insertstart + insertcount - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
fieldchooser = new UniformLongGenerator(0, fieldcount - 1);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(minscanlength, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(minscanlength, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
insertionRetryLimit = Integer.parseInt(p.getProperty(
INSERTION_RETRY_LIMIT, INSERTION_RETRY_LIMIT_DEFAULT));
insertionRetryInterval = Integer.parseInt(p.getProperty(
INSERTION_RETRY_INTERVAL, INSERTION_RETRY_INTERVAL_DEFAULT));
}
/**
* Builds a value for a randomly chosen field.
*/
private HashMap<String, ByteIterator> buildSingleValue(String key) {
HashMap<String, ByteIterator> value = new HashMap<>();
String fieldkey = fieldnames.get(fieldchooser.nextValue().intValue());
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
value.put(fieldkey, data);
return value;
}
/**
* Builds values for all fields.
*/
private HashMap<String, ByteIterator> buildValues(String key) {
HashMap<String, ByteIterator> values = new HashMap<>();
for (String fieldkey : fieldnames) {
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
values.put(fieldkey, data);
}
return values;
}
/**
* Build a deterministic value given the key information.
*/
private String buildDeterministicValue(String key, String fieldkey) {
int size = fieldlengthgenerator.nextValue().intValue();
StringBuilder sb = new StringBuilder(size);
sb.append(key);
sb.append(':');
sb.append(fieldkey);
while (sb.length() < size) {
sb.append(':');
sb.append(sb.toString().hashCode());
}
sb.setLength(size);
return sb.toString();
}
/**
* Do one insert operation. Because it will be called concurrently from multiple client threads,
* this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
int keynum = keysequence.nextValue().intValue();
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
Status status;
int numOfRetries = 0;
do {
status = db.insert(table, dbkey, values);
if (null != status && status.isOk()) {
break;
}
// Retry if configured. Without retrying, the load process will fail
// even if one single insertion fails. User can optionally configure
// an insertion retry limit (default is 0) to enable retry.
if (++numOfRetries <= insertionRetryLimit) {
System.err.println("Retrying insertion, retry count: " + numOfRetries);
try {
// Sleep for a random number between [0.8, 1.2)*insertionRetryInterval.
int sleepTime = (int) (1000 * insertionRetryInterval * (0.8 + 0.4 * Math.random()));
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
break;
}
} else {
System.err.println("Error inserting, not retrying any more. number of attempts: " + numOfRetries +
"Insertion Retry Limit: " + insertionRetryLimit);
break;
}
} while (true);
return null != status && status.isOk();
}
/**
* Do one transaction operation. Because it will be called concurrently from multiple client
* threads, this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if(operation == null) {
return false;
}
switch (operation) {
case "READ":
doTransactionRead(db);
break;
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "SCAN":
doTransactionScan(db);
break;
default:
doTransactionReadModifyWrite(db);
}
return true;
}
/**
* Results are reported in the first three buckets of the histogram under
* the label "VERIFY".
* Bucket 0 means the expected data was returned.
* Bucket 1 means incorrect data was returned.
* Bucket 2 means null data was returned when some data was expected.
*/
protected void verifyRow(String key, HashMap<String, ByteIterator> cells) {
Status verifyStatus = Status.OK;
long startTime = System.nanoTime();
if (!cells.isEmpty()) {
for (Map.Entry<String, ByteIterator> entry : cells.entrySet()) {
if (!entry.getValue().toString().equals(buildDeterministicValue(key, entry.getKey()))) {
verifyStatus = Status.UNEXPECTED_STATE;
break;
}
}
} else {
// This assumes that null data is never valid
verifyStatus = Status.ERROR;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
}
long nextKeynum() {
long keynum;
if (keychooser instanceof ExponentialGenerator) {
do {
keynum = transactioninsertkeysequence.lastValue() - keychooser.nextValue().intValue();
} while (keynum < 0);
} else {
do {
keynum = keychooser.nextValue().intValue();
} while (keynum > transactioninsertkeysequence.lastValue());
}
return keynum;
}
public void doTransactionRead(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
} else if (dataintegrity || readallfieldsbyname) {
// pass the full field list if dataintegrity is on for verification
fields = new HashSet<String>(fieldnames);
}
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
db.read(table, keyname, fields, cells);
if (dataintegrity) {
verifyRow(keyname, cells);
}
}
public void doTransactionReadModifyWrite(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
// do the transaction
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
db.read(table, keyname, fields, cells);
db.update(table, keyname, values);
long en = System.nanoTime();
if (dataintegrity) {
verifyRow(keyname, cells);
}
measurements.measure("READ-MODIFY-WRITE", (int) ((en - st) / 1000));
measurements.measureIntended("READ-MODIFY-WRITE", (int) ((en - ist) / 1000));
}
public void doTransactionScan(DB db) {<FILL_FUNCTION_BODY>}
public void doTransactionUpdate(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
db.update(table, keyname, values);
}
public void doTransactionInsert(DB db) {
// choose the next key
long keynum = transactioninsertkeysequence.nextValue();
try {
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
db.insert(table, dbkey, values);
} finally {
transactioninsertkeysequence.acknowledge(keynum);
}
}
/**
* Creates a weighted discrete values with database operations for a workload to perform.
* Weights/proportions are read from the properties list and defaults are used
* when values are not configured.
* Current operations are "READ", "UPDATE", "INSERT", "SCAN" and "READMODIFYWRITE".
*
* @param p The properties list to pull weights from.
* @return A generator that can be used to determine the next operation to perform.
* @throws IllegalArgumentException if the properties object was null.
*/
protected static DiscreteGenerator createOperationGenerator(final Properties p) {
if (p == null) {
throw new IllegalArgumentException("Properties object cannot be null");
}
final double readproportion = Double.parseDouble(
p.getProperty(READ_PROPORTION_PROPERTY, READ_PROPORTION_PROPERTY_DEFAULT));
final double updateproportion = Double.parseDouble(
p.getProperty(UPDATE_PROPORTION_PROPERTY, UPDATE_PROPORTION_PROPERTY_DEFAULT));
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
final double scanproportion = Double.parseDouble(
p.getProperty(SCAN_PROPORTION_PROPERTY, SCAN_PROPORTION_PROPERTY_DEFAULT));
final double readmodifywriteproportion = Double.parseDouble(p.getProperty(
READMODIFYWRITE_PROPORTION_PROPERTY, READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT));
final DiscreteGenerator operationchooser = new DiscreteGenerator();
if (readproportion > 0) {
operationchooser.addValue(readproportion, "READ");
}
if (updateproportion > 0) {
operationchooser.addValue(updateproportion, "UPDATE");
}
if (insertproportion > 0) {
operationchooser.addValue(insertproportion, "INSERT");
}
if (scanproportion > 0) {
operationchooser.addValue(scanproportion, "SCAN");
}
if (readmodifywriteproportion > 0) {
operationchooser.addValue(readmodifywriteproportion, "READMODIFYWRITE");
}
return operationchooser;
}
} |
// choose a random key
long keynum = nextKeynum();
String startkeyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
// choose a random scan length
int len = scanlength.nextValue().intValue();
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
db.scan(table, startkeyname, len, fields, new Vector<HashMap<String, ByteIterator>>());
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/CoreWorkload.java | CoreWorkload | doTransactionUpdate | class CoreWorkload extends Workload {
/**
* The name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY = "table";
/**
* The default name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY_DEFAULT = "usertable";
protected String table;
/**
* The name of the property for the number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY = "fieldcount";
/**
* Default number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY_DEFAULT = "10";
private List<String> fieldnames;
/**
* The name of the property for the field length distribution. Options are "uniform", "zipfian"
* (favouring short records), "constant", and "histogram".
* <p>
* If "uniform", "zipfian" or "constant", the maximum field length will be that specified by the
* fieldlength property. If "histogram", then the histogram will be read from the filename
* specified in the "fieldlengthhistogram" property.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY = "fieldlengthdistribution";
/**
* The default field length distribution.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "constant";
/**
* The name of the property for the length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY = "fieldlength";
/**
* The default maximum length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY_DEFAULT = "100";
/**
* The name of the property for the minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY = "minfieldlength";
/**
* The default minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of a property that specifies the filename containing the field length histogram (only
* used if fieldlengthdistribution is "histogram").
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY = "fieldlengthhistogram";
/**
* The default filename containing a field length histogram.
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT = "hist.txt";
/**
* Generator object that produces field lengths. The value of this depends on the properties that
* start with "FIELD_LENGTH_".
*/
protected NumberGenerator fieldlengthgenerator;
/**
* The name of the property for deciding whether to read one field (false) or all fields (true) of
* a record.
*/
public static final String READ_ALL_FIELDS_PROPERTY = "readallfields";
/**
* The default value for the readallfields property.
*/
public static final String READ_ALL_FIELDS_PROPERTY_DEFAULT = "true";
protected boolean readallfields;
/**
* The name of the property for determining how to read all the fields when readallfields is true.
* If set to true, all the field names will be passed into the underlying client. If set to false,
* null will be passed into the underlying client. When passed a null, some clients may retrieve
* the entire row with a wildcard, which may be slower than naming all the fields.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY = "readallfieldsbyname";
/**
* The default value for the readallfieldsbyname property.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT = "false";
protected boolean readallfieldsbyname;
/**
* The name of the property for deciding whether to write one field (false) or all fields (true)
* of a record.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY = "writeallfields";
/**
* The default value for the writeallfields property.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY_DEFAULT = "false";
protected boolean writeallfields;
/**
* The name of the property for deciding whether to check all returned
* data against the formation template to ensure data integrity.
*/
public static final String DATA_INTEGRITY_PROPERTY = "dataintegrity";
/**
* The default value for the dataintegrity property.
*/
public static final String DATA_INTEGRITY_PROPERTY_DEFAULT = "false";
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
private boolean dataintegrity;
/**
* The name of the property for the proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY = "readproportion";
/**
* The default proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY_DEFAULT = "0.95";
/**
* The name of the property for the proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY = "updateproportion";
/**
* The default proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY_DEFAULT = "0.05";
/**
* The name of the property for the proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY = "insertproportion";
/**
* The default proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY = "scanproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are read-modify-write.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY = "readmodifywriteproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the the distribution of requests across the keyspace. Options are
* "uniform", "zipfian" and "latest"
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY = "requestdistribution";
/**
* The default distribution of requests across the keyspace.
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for adding zero padding to record numbers in order to match
* string sort order. Controls the number of 0s to left pad with.
*/
public static final String ZERO_PADDING_PROPERTY = "zeropadding";
/**
* The default zero padding value. Matches integer sort order
*/
public static final String ZERO_PADDING_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the min scan length (number of records).
*/
public static final String MIN_SCAN_LENGTH_PROPERTY = "minscanlength";
/**
* The default min scan length.
*/
public static final String MIN_SCAN_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the max scan length (number of records).
*/
public static final String MAX_SCAN_LENGTH_PROPERTY = "maxscanlength";
/**
* The default max scan length.
*/
public static final String MAX_SCAN_LENGTH_PROPERTY_DEFAULT = "1000";
/**
* The name of the property for the scan length distribution. Options are "uniform" and "zipfian"
* (favoring short scans)
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY = "scanlengthdistribution";
/**
* The default max scan length.
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for the order to insert records. Options are "ordered" or "hashed"
*/
public static final String INSERT_ORDER_PROPERTY = "insertorder";
/**
* Default insert order.
*/
public static final String INSERT_ORDER_PROPERTY_DEFAULT = "hashed";
/**
* Percentage data items that constitute the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION = "hotspotdatafraction";
/**
* Default value of the size of the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION_DEFAULT = "0.2";
/**
* Percentage operations that access the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION = "hotspotopnfraction";
/**
* Default value of the percentage operations accessing the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION_DEFAULT = "0.8";
/**
* How many times to retry when insertion of a single item to a DB fails.
*/
public static final String INSERTION_RETRY_LIMIT = "core_workload_insertion_retry_limit";
public static final String INSERTION_RETRY_LIMIT_DEFAULT = "0";
/**
* On average, how long to wait between the retries, in seconds.
*/
public static final String INSERTION_RETRY_INTERVAL = "core_workload_insertion_retry_interval";
public static final String INSERTION_RETRY_INTERVAL_DEFAULT = "3";
/**
* Field name prefix.
*/
public static final String FIELD_NAME_PREFIX = "fieldnameprefix";
/**
* Default value of the field name prefix.
*/
public static final String FIELD_NAME_PREFIX_DEFAULT = "field";
protected NumberGenerator keysequence;
protected DiscreteGenerator operationchooser;
protected NumberGenerator keychooser;
protected NumberGenerator fieldchooser;
protected AcknowledgedCounterGenerator transactioninsertkeysequence;
protected NumberGenerator scanlength;
protected boolean orderedinserts;
protected long fieldcount;
protected long recordcount;
protected int zeropadding;
protected int insertionRetryLimit;
protected int insertionRetryInterval;
private Measurements measurements = Measurements.getMeasurements();
public static String buildKeyName(long keynum, int zeropadding, boolean orderedinserts) {
if (!orderedinserts) {
keynum = Utils.hash(keynum);
}
String value = Long.toString(keynum);
int fill = zeropadding - value.length();
String prekey = "user";
for (int i = 0; i < fill; i++) {
prekey += '0';
}
return prekey + value;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
NumberGenerator fieldlengthgenerator;
String fieldlengthdistribution = p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY, FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
int fieldlength =
Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
int minfieldlength =
Integer.parseInt(p.getProperty(MIN_FIELD_LENGTH_PROPERTY, MIN_FIELD_LENGTH_PROPERTY_DEFAULT));
String fieldlengthhistogram = p.getProperty(
FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY, FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT);
if (fieldlengthdistribution.compareTo("constant") == 0) {
fieldlengthgenerator = new ConstantIntegerGenerator(fieldlength);
} else if (fieldlengthdistribution.compareTo("uniform") == 0) {
fieldlengthgenerator = new UniformLongGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("zipfian") == 0) {
fieldlengthgenerator = new ZipfianGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("histogram") == 0) {
try {
fieldlengthgenerator = new HistogramGenerator(fieldlengthhistogram);
} catch (IOException e) {
throw new WorkloadException(
"Couldn't read field length histogram file: " + fieldlengthhistogram, e);
}
} else {
throw new WorkloadException(
"Unknown field length distribution \"" + fieldlengthdistribution + "\"");
}
return fieldlengthgenerator;
}
/**
* Initialize the scenario.
* Called once, in the main client thread, before any operations are started.
*/
@Override
public void init(Properties p) throws WorkloadException {
table = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
fieldcount =
Long.parseLong(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT));
final String fieldnameprefix = p.getProperty(FIELD_NAME_PREFIX, FIELD_NAME_PREFIX_DEFAULT);
fieldnames = new ArrayList<>();
for (int i = 0; i < fieldcount; i++) {
fieldnames.add(fieldnameprefix + i);
}
fieldlengthgenerator = CoreWorkload.getFieldLengthGenerator(p);
recordcount =
Long.parseLong(p.getProperty(Client.RECORD_COUNT_PROPERTY, Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
String requestdistrib =
p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
int minscanlength =
Integer.parseInt(p.getProperty(MIN_SCAN_LENGTH_PROPERTY, MIN_SCAN_LENGTH_PROPERTY_DEFAULT));
int maxscanlength =
Integer.parseInt(p.getProperty(MAX_SCAN_LENGTH_PROPERTY, MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(SCAN_LENGTH_DISTRIBUTION_PROPERTY, SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
long insertstart =
Long.parseLong(p.getProperty(INSERT_START_PROPERTY, INSERT_START_PROPERTY_DEFAULT));
long insertcount=
Integer.parseInt(p.getProperty(INSERT_COUNT_PROPERTY, String.valueOf(recordcount - insertstart)));
// Confirm valid values for insertstart and insertcount in relation to recordcount
if (recordcount < (insertstart + insertcount)) {
System.err.println("Invalid combination of insertstart, insertcount and recordcount.");
System.err.println("recordcount must be bigger than insertstart + insertcount.");
System.exit(-1);
}
zeropadding =
Integer.parseInt(p.getProperty(ZERO_PADDING_PROPERTY, ZERO_PADDING_PROPERTY_DEFAULT));
readallfields = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_PROPERTY, READ_ALL_FIELDS_PROPERTY_DEFAULT));
readallfieldsbyname = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_BY_NAME_PROPERTY, READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT));
writeallfields = Boolean.parseBoolean(
p.getProperty(WRITE_ALL_FIELDS_PROPERTY, WRITE_ALL_FIELDS_PROPERTY_DEFAULT));
dataintegrity = Boolean.parseBoolean(
p.getProperty(DATA_INTEGRITY_PROPERTY, DATA_INTEGRITY_PROPERTY_DEFAULT));
// Confirm that fieldlengthgenerator returns a constant if data
// integrity check requested.
if (dataintegrity && !(p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT)).equals("constant")) {
System.err.println("Must have constant field size to check data integrity.");
System.exit(-1);
}
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
if (p.getProperty(INSERT_ORDER_PROPERTY, INSERT_ORDER_PROPERTY_DEFAULT).compareTo("hashed") == 0) {
orderedinserts = false;
} else {
orderedinserts = true;
}
keysequence = new CounterGenerator(insertstart);
operationchooser = createOperationGenerator(p);
transactioninsertkeysequence = new AcknowledgedCounterGenerator(recordcount);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("exponential") == 0) {
double percentile = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordcount * frac);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
// it does this by generating a random "next key" in part by taking the modulus over the
// number of keys.
// If the number of keys changes, this would shift the modulus, and we don't want that to
// change which keys are popular so we'll actually construct the scrambled zipfian generator
// with a keyspace that is larger than exists at the beginning of the test. that is, we'll predict
// the number of inserts, and tell the scrambled zipfian generator the number of existing keys
// plus the number of predicted keys as the total keyspace. then, if the generator picks a key
// that hasn't been inserted yet, will just ignore it and pick another key. this way, the size of
// the keyspace doesn't change from the perspective of the scrambled zipfian generator
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
int opcount = Integer.parseInt(p.getProperty(Client.OPERATION_COUNT_PROPERTY));
int expectednewkeys = (int) ((opcount) * insertproportion * 2.0); // 2 is fudge factor
keychooser = new ScrambledZipfianGenerator(insertstart, insertstart + insertcount + expectednewkeys);
} else if (requestdistrib.compareTo("latest") == 0) {
keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(insertstart, insertstart + insertcount - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
fieldchooser = new UniformLongGenerator(0, fieldcount - 1);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(minscanlength, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(minscanlength, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
insertionRetryLimit = Integer.parseInt(p.getProperty(
INSERTION_RETRY_LIMIT, INSERTION_RETRY_LIMIT_DEFAULT));
insertionRetryInterval = Integer.parseInt(p.getProperty(
INSERTION_RETRY_INTERVAL, INSERTION_RETRY_INTERVAL_DEFAULT));
}
/**
* Builds a value for a randomly chosen field.
*/
private HashMap<String, ByteIterator> buildSingleValue(String key) {
HashMap<String, ByteIterator> value = new HashMap<>();
String fieldkey = fieldnames.get(fieldchooser.nextValue().intValue());
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
value.put(fieldkey, data);
return value;
}
/**
* Builds values for all fields.
*/
private HashMap<String, ByteIterator> buildValues(String key) {
HashMap<String, ByteIterator> values = new HashMap<>();
for (String fieldkey : fieldnames) {
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
values.put(fieldkey, data);
}
return values;
}
/**
* Build a deterministic value given the key information.
*/
private String buildDeterministicValue(String key, String fieldkey) {
int size = fieldlengthgenerator.nextValue().intValue();
StringBuilder sb = new StringBuilder(size);
sb.append(key);
sb.append(':');
sb.append(fieldkey);
while (sb.length() < size) {
sb.append(':');
sb.append(sb.toString().hashCode());
}
sb.setLength(size);
return sb.toString();
}
/**
* Do one insert operation. Because it will be called concurrently from multiple client threads,
* this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
int keynum = keysequence.nextValue().intValue();
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
Status status;
int numOfRetries = 0;
do {
status = db.insert(table, dbkey, values);
if (null != status && status.isOk()) {
break;
}
// Retry if configured. Without retrying, the load process will fail
// even if one single insertion fails. User can optionally configure
// an insertion retry limit (default is 0) to enable retry.
if (++numOfRetries <= insertionRetryLimit) {
System.err.println("Retrying insertion, retry count: " + numOfRetries);
try {
// Sleep for a random number between [0.8, 1.2)*insertionRetryInterval.
int sleepTime = (int) (1000 * insertionRetryInterval * (0.8 + 0.4 * Math.random()));
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
break;
}
} else {
System.err.println("Error inserting, not retrying any more. number of attempts: " + numOfRetries +
"Insertion Retry Limit: " + insertionRetryLimit);
break;
}
} while (true);
return null != status && status.isOk();
}
/**
* Do one transaction operation. Because it will be called concurrently from multiple client
* threads, this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if(operation == null) {
return false;
}
switch (operation) {
case "READ":
doTransactionRead(db);
break;
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "SCAN":
doTransactionScan(db);
break;
default:
doTransactionReadModifyWrite(db);
}
return true;
}
/**
* Results are reported in the first three buckets of the histogram under
* the label "VERIFY".
* Bucket 0 means the expected data was returned.
* Bucket 1 means incorrect data was returned.
* Bucket 2 means null data was returned when some data was expected.
*/
protected void verifyRow(String key, HashMap<String, ByteIterator> cells) {
Status verifyStatus = Status.OK;
long startTime = System.nanoTime();
if (!cells.isEmpty()) {
for (Map.Entry<String, ByteIterator> entry : cells.entrySet()) {
if (!entry.getValue().toString().equals(buildDeterministicValue(key, entry.getKey()))) {
verifyStatus = Status.UNEXPECTED_STATE;
break;
}
}
} else {
// This assumes that null data is never valid
verifyStatus = Status.ERROR;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
}
long nextKeynum() {
long keynum;
if (keychooser instanceof ExponentialGenerator) {
do {
keynum = transactioninsertkeysequence.lastValue() - keychooser.nextValue().intValue();
} while (keynum < 0);
} else {
do {
keynum = keychooser.nextValue().intValue();
} while (keynum > transactioninsertkeysequence.lastValue());
}
return keynum;
}
public void doTransactionRead(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
} else if (dataintegrity || readallfieldsbyname) {
// pass the full field list if dataintegrity is on for verification
fields = new HashSet<String>(fieldnames);
}
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
db.read(table, keyname, fields, cells);
if (dataintegrity) {
verifyRow(keyname, cells);
}
}
public void doTransactionReadModifyWrite(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
// do the transaction
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
db.read(table, keyname, fields, cells);
db.update(table, keyname, values);
long en = System.nanoTime();
if (dataintegrity) {
verifyRow(keyname, cells);
}
measurements.measure("READ-MODIFY-WRITE", (int) ((en - st) / 1000));
measurements.measureIntended("READ-MODIFY-WRITE", (int) ((en - ist) / 1000));
}
public void doTransactionScan(DB db) {
// choose a random key
long keynum = nextKeynum();
String startkeyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
// choose a random scan length
int len = scanlength.nextValue().intValue();
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
db.scan(table, startkeyname, len, fields, new Vector<HashMap<String, ByteIterator>>());
}
public void doTransactionUpdate(DB db) {<FILL_FUNCTION_BODY>}
public void doTransactionInsert(DB db) {
// choose the next key
long keynum = transactioninsertkeysequence.nextValue();
try {
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
db.insert(table, dbkey, values);
} finally {
transactioninsertkeysequence.acknowledge(keynum);
}
}
/**
* Creates a weighted discrete values with database operations for a workload to perform.
* Weights/proportions are read from the properties list and defaults are used
* when values are not configured.
* Current operations are "READ", "UPDATE", "INSERT", "SCAN" and "READMODIFYWRITE".
*
* @param p The properties list to pull weights from.
* @return A generator that can be used to determine the next operation to perform.
* @throws IllegalArgumentException if the properties object was null.
*/
protected static DiscreteGenerator createOperationGenerator(final Properties p) {
if (p == null) {
throw new IllegalArgumentException("Properties object cannot be null");
}
final double readproportion = Double.parseDouble(
p.getProperty(READ_PROPORTION_PROPERTY, READ_PROPORTION_PROPERTY_DEFAULT));
final double updateproportion = Double.parseDouble(
p.getProperty(UPDATE_PROPORTION_PROPERTY, UPDATE_PROPORTION_PROPERTY_DEFAULT));
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
final double scanproportion = Double.parseDouble(
p.getProperty(SCAN_PROPORTION_PROPERTY, SCAN_PROPORTION_PROPERTY_DEFAULT));
final double readmodifywriteproportion = Double.parseDouble(p.getProperty(
READMODIFYWRITE_PROPORTION_PROPERTY, READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT));
final DiscreteGenerator operationchooser = new DiscreteGenerator();
if (readproportion > 0) {
operationchooser.addValue(readproportion, "READ");
}
if (updateproportion > 0) {
operationchooser.addValue(updateproportion, "UPDATE");
}
if (insertproportion > 0) {
operationchooser.addValue(insertproportion, "INSERT");
}
if (scanproportion > 0) {
operationchooser.addValue(scanproportion, "SCAN");
}
if (readmodifywriteproportion > 0) {
operationchooser.addValue(readmodifywriteproportion, "READMODIFYWRITE");
}
return operationchooser;
}
} |
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
db.update(table, keyname, values);
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/CoreWorkload.java | CoreWorkload | doTransactionInsert | class CoreWorkload extends Workload {
/**
* The name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY = "table";
/**
* The default name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY_DEFAULT = "usertable";
protected String table;
/**
* The name of the property for the number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY = "fieldcount";
/**
* Default number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY_DEFAULT = "10";
private List<String> fieldnames;
/**
* The name of the property for the field length distribution. Options are "uniform", "zipfian"
* (favouring short records), "constant", and "histogram".
* <p>
* If "uniform", "zipfian" or "constant", the maximum field length will be that specified by the
* fieldlength property. If "histogram", then the histogram will be read from the filename
* specified in the "fieldlengthhistogram" property.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY = "fieldlengthdistribution";
/**
* The default field length distribution.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "constant";
/**
* The name of the property for the length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY = "fieldlength";
/**
* The default maximum length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY_DEFAULT = "100";
/**
* The name of the property for the minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY = "minfieldlength";
/**
* The default minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of a property that specifies the filename containing the field length histogram (only
* used if fieldlengthdistribution is "histogram").
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY = "fieldlengthhistogram";
/**
* The default filename containing a field length histogram.
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT = "hist.txt";
/**
* Generator object that produces field lengths. The value of this depends on the properties that
* start with "FIELD_LENGTH_".
*/
protected NumberGenerator fieldlengthgenerator;
/**
* The name of the property for deciding whether to read one field (false) or all fields (true) of
* a record.
*/
public static final String READ_ALL_FIELDS_PROPERTY = "readallfields";
/**
* The default value for the readallfields property.
*/
public static final String READ_ALL_FIELDS_PROPERTY_DEFAULT = "true";
protected boolean readallfields;
/**
* The name of the property for determining how to read all the fields when readallfields is true.
* If set to true, all the field names will be passed into the underlying client. If set to false,
* null will be passed into the underlying client. When passed a null, some clients may retrieve
* the entire row with a wildcard, which may be slower than naming all the fields.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY = "readallfieldsbyname";
/**
* The default value for the readallfieldsbyname property.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT = "false";
protected boolean readallfieldsbyname;
/**
* The name of the property for deciding whether to write one field (false) or all fields (true)
* of a record.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY = "writeallfields";
/**
* The default value for the writeallfields property.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY_DEFAULT = "false";
protected boolean writeallfields;
/**
* The name of the property for deciding whether to check all returned
* data against the formation template to ensure data integrity.
*/
public static final String DATA_INTEGRITY_PROPERTY = "dataintegrity";
/**
* The default value for the dataintegrity property.
*/
public static final String DATA_INTEGRITY_PROPERTY_DEFAULT = "false";
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
private boolean dataintegrity;
/**
* The name of the property for the proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY = "readproportion";
/**
* The default proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY_DEFAULT = "0.95";
/**
* The name of the property for the proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY = "updateproportion";
/**
* The default proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY_DEFAULT = "0.05";
/**
* The name of the property for the proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY = "insertproportion";
/**
* The default proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY = "scanproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are read-modify-write.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY = "readmodifywriteproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the the distribution of requests across the keyspace. Options are
* "uniform", "zipfian" and "latest"
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY = "requestdistribution";
/**
* The default distribution of requests across the keyspace.
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for adding zero padding to record numbers in order to match
* string sort order. Controls the number of 0s to left pad with.
*/
public static final String ZERO_PADDING_PROPERTY = "zeropadding";
/**
* The default zero padding value. Matches integer sort order
*/
public static final String ZERO_PADDING_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the min scan length (number of records).
*/
public static final String MIN_SCAN_LENGTH_PROPERTY = "minscanlength";
/**
* The default min scan length.
*/
public static final String MIN_SCAN_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the max scan length (number of records).
*/
public static final String MAX_SCAN_LENGTH_PROPERTY = "maxscanlength";
/**
* The default max scan length.
*/
public static final String MAX_SCAN_LENGTH_PROPERTY_DEFAULT = "1000";
/**
* The name of the property for the scan length distribution. Options are "uniform" and "zipfian"
* (favoring short scans)
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY = "scanlengthdistribution";
/**
* The default max scan length.
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for the order to insert records. Options are "ordered" or "hashed"
*/
public static final String INSERT_ORDER_PROPERTY = "insertorder";
/**
* Default insert order.
*/
public static final String INSERT_ORDER_PROPERTY_DEFAULT = "hashed";
/**
* Percentage data items that constitute the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION = "hotspotdatafraction";
/**
* Default value of the size of the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION_DEFAULT = "0.2";
/**
* Percentage operations that access the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION = "hotspotopnfraction";
/**
* Default value of the percentage operations accessing the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION_DEFAULT = "0.8";
/**
* How many times to retry when insertion of a single item to a DB fails.
*/
public static final String INSERTION_RETRY_LIMIT = "core_workload_insertion_retry_limit";
public static final String INSERTION_RETRY_LIMIT_DEFAULT = "0";
/**
* On average, how long to wait between the retries, in seconds.
*/
public static final String INSERTION_RETRY_INTERVAL = "core_workload_insertion_retry_interval";
public static final String INSERTION_RETRY_INTERVAL_DEFAULT = "3";
/**
* Field name prefix.
*/
public static final String FIELD_NAME_PREFIX = "fieldnameprefix";
/**
* Default value of the field name prefix.
*/
public static final String FIELD_NAME_PREFIX_DEFAULT = "field";
protected NumberGenerator keysequence;
protected DiscreteGenerator operationchooser;
protected NumberGenerator keychooser;
protected NumberGenerator fieldchooser;
protected AcknowledgedCounterGenerator transactioninsertkeysequence;
protected NumberGenerator scanlength;
protected boolean orderedinserts;
protected long fieldcount;
protected long recordcount;
protected int zeropadding;
protected int insertionRetryLimit;
protected int insertionRetryInterval;
private Measurements measurements = Measurements.getMeasurements();
public static String buildKeyName(long keynum, int zeropadding, boolean orderedinserts) {
if (!orderedinserts) {
keynum = Utils.hash(keynum);
}
String value = Long.toString(keynum);
int fill = zeropadding - value.length();
String prekey = "user";
for (int i = 0; i < fill; i++) {
prekey += '0';
}
return prekey + value;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
NumberGenerator fieldlengthgenerator;
String fieldlengthdistribution = p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY, FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
int fieldlength =
Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
int minfieldlength =
Integer.parseInt(p.getProperty(MIN_FIELD_LENGTH_PROPERTY, MIN_FIELD_LENGTH_PROPERTY_DEFAULT));
String fieldlengthhistogram = p.getProperty(
FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY, FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT);
if (fieldlengthdistribution.compareTo("constant") == 0) {
fieldlengthgenerator = new ConstantIntegerGenerator(fieldlength);
} else if (fieldlengthdistribution.compareTo("uniform") == 0) {
fieldlengthgenerator = new UniformLongGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("zipfian") == 0) {
fieldlengthgenerator = new ZipfianGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("histogram") == 0) {
try {
fieldlengthgenerator = new HistogramGenerator(fieldlengthhistogram);
} catch (IOException e) {
throw new WorkloadException(
"Couldn't read field length histogram file: " + fieldlengthhistogram, e);
}
} else {
throw new WorkloadException(
"Unknown field length distribution \"" + fieldlengthdistribution + "\"");
}
return fieldlengthgenerator;
}
/**
* Initialize the scenario.
* Called once, in the main client thread, before any operations are started.
*/
@Override
public void init(Properties p) throws WorkloadException {
table = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
fieldcount =
Long.parseLong(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT));
final String fieldnameprefix = p.getProperty(FIELD_NAME_PREFIX, FIELD_NAME_PREFIX_DEFAULT);
fieldnames = new ArrayList<>();
for (int i = 0; i < fieldcount; i++) {
fieldnames.add(fieldnameprefix + i);
}
fieldlengthgenerator = CoreWorkload.getFieldLengthGenerator(p);
recordcount =
Long.parseLong(p.getProperty(Client.RECORD_COUNT_PROPERTY, Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
String requestdistrib =
p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
int minscanlength =
Integer.parseInt(p.getProperty(MIN_SCAN_LENGTH_PROPERTY, MIN_SCAN_LENGTH_PROPERTY_DEFAULT));
int maxscanlength =
Integer.parseInt(p.getProperty(MAX_SCAN_LENGTH_PROPERTY, MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(SCAN_LENGTH_DISTRIBUTION_PROPERTY, SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
long insertstart =
Long.parseLong(p.getProperty(INSERT_START_PROPERTY, INSERT_START_PROPERTY_DEFAULT));
long insertcount=
Integer.parseInt(p.getProperty(INSERT_COUNT_PROPERTY, String.valueOf(recordcount - insertstart)));
// Confirm valid values for insertstart and insertcount in relation to recordcount
if (recordcount < (insertstart + insertcount)) {
System.err.println("Invalid combination of insertstart, insertcount and recordcount.");
System.err.println("recordcount must be bigger than insertstart + insertcount.");
System.exit(-1);
}
zeropadding =
Integer.parseInt(p.getProperty(ZERO_PADDING_PROPERTY, ZERO_PADDING_PROPERTY_DEFAULT));
readallfields = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_PROPERTY, READ_ALL_FIELDS_PROPERTY_DEFAULT));
readallfieldsbyname = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_BY_NAME_PROPERTY, READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT));
writeallfields = Boolean.parseBoolean(
p.getProperty(WRITE_ALL_FIELDS_PROPERTY, WRITE_ALL_FIELDS_PROPERTY_DEFAULT));
dataintegrity = Boolean.parseBoolean(
p.getProperty(DATA_INTEGRITY_PROPERTY, DATA_INTEGRITY_PROPERTY_DEFAULT));
// Confirm that fieldlengthgenerator returns a constant if data
// integrity check requested.
if (dataintegrity && !(p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT)).equals("constant")) {
System.err.println("Must have constant field size to check data integrity.");
System.exit(-1);
}
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
if (p.getProperty(INSERT_ORDER_PROPERTY, INSERT_ORDER_PROPERTY_DEFAULT).compareTo("hashed") == 0) {
orderedinserts = false;
} else {
orderedinserts = true;
}
keysequence = new CounterGenerator(insertstart);
operationchooser = createOperationGenerator(p);
transactioninsertkeysequence = new AcknowledgedCounterGenerator(recordcount);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("exponential") == 0) {
double percentile = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordcount * frac);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
// it does this by generating a random "next key" in part by taking the modulus over the
// number of keys.
// If the number of keys changes, this would shift the modulus, and we don't want that to
// change which keys are popular so we'll actually construct the scrambled zipfian generator
// with a keyspace that is larger than exists at the beginning of the test. that is, we'll predict
// the number of inserts, and tell the scrambled zipfian generator the number of existing keys
// plus the number of predicted keys as the total keyspace. then, if the generator picks a key
// that hasn't been inserted yet, will just ignore it and pick another key. this way, the size of
// the keyspace doesn't change from the perspective of the scrambled zipfian generator
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
int opcount = Integer.parseInt(p.getProperty(Client.OPERATION_COUNT_PROPERTY));
int expectednewkeys = (int) ((opcount) * insertproportion * 2.0); // 2 is fudge factor
keychooser = new ScrambledZipfianGenerator(insertstart, insertstart + insertcount + expectednewkeys);
} else if (requestdistrib.compareTo("latest") == 0) {
keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(insertstart, insertstart + insertcount - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
fieldchooser = new UniformLongGenerator(0, fieldcount - 1);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(minscanlength, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(minscanlength, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
insertionRetryLimit = Integer.parseInt(p.getProperty(
INSERTION_RETRY_LIMIT, INSERTION_RETRY_LIMIT_DEFAULT));
insertionRetryInterval = Integer.parseInt(p.getProperty(
INSERTION_RETRY_INTERVAL, INSERTION_RETRY_INTERVAL_DEFAULT));
}
/**
* Builds a value for a randomly chosen field.
*/
private HashMap<String, ByteIterator> buildSingleValue(String key) {
HashMap<String, ByteIterator> value = new HashMap<>();
String fieldkey = fieldnames.get(fieldchooser.nextValue().intValue());
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
value.put(fieldkey, data);
return value;
}
/**
* Builds values for all fields.
*/
private HashMap<String, ByteIterator> buildValues(String key) {
HashMap<String, ByteIterator> values = new HashMap<>();
for (String fieldkey : fieldnames) {
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
values.put(fieldkey, data);
}
return values;
}
/**
* Build a deterministic value given the key information.
*/
private String buildDeterministicValue(String key, String fieldkey) {
int size = fieldlengthgenerator.nextValue().intValue();
StringBuilder sb = new StringBuilder(size);
sb.append(key);
sb.append(':');
sb.append(fieldkey);
while (sb.length() < size) {
sb.append(':');
sb.append(sb.toString().hashCode());
}
sb.setLength(size);
return sb.toString();
}
/**
* Do one insert operation. Because it will be called concurrently from multiple client threads,
* this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
int keynum = keysequence.nextValue().intValue();
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
Status status;
int numOfRetries = 0;
do {
status = db.insert(table, dbkey, values);
if (null != status && status.isOk()) {
break;
}
// Retry if configured. Without retrying, the load process will fail
// even if one single insertion fails. User can optionally configure
// an insertion retry limit (default is 0) to enable retry.
if (++numOfRetries <= insertionRetryLimit) {
System.err.println("Retrying insertion, retry count: " + numOfRetries);
try {
// Sleep for a random number between [0.8, 1.2)*insertionRetryInterval.
int sleepTime = (int) (1000 * insertionRetryInterval * (0.8 + 0.4 * Math.random()));
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
break;
}
} else {
System.err.println("Error inserting, not retrying any more. number of attempts: " + numOfRetries +
"Insertion Retry Limit: " + insertionRetryLimit);
break;
}
} while (true);
return null != status && status.isOk();
}
/**
* Do one transaction operation. Because it will be called concurrently from multiple client
* threads, this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if(operation == null) {
return false;
}
switch (operation) {
case "READ":
doTransactionRead(db);
break;
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "SCAN":
doTransactionScan(db);
break;
default:
doTransactionReadModifyWrite(db);
}
return true;
}
/**
* Results are reported in the first three buckets of the histogram under
* the label "VERIFY".
* Bucket 0 means the expected data was returned.
* Bucket 1 means incorrect data was returned.
* Bucket 2 means null data was returned when some data was expected.
*/
protected void verifyRow(String key, HashMap<String, ByteIterator> cells) {
Status verifyStatus = Status.OK;
long startTime = System.nanoTime();
if (!cells.isEmpty()) {
for (Map.Entry<String, ByteIterator> entry : cells.entrySet()) {
if (!entry.getValue().toString().equals(buildDeterministicValue(key, entry.getKey()))) {
verifyStatus = Status.UNEXPECTED_STATE;
break;
}
}
} else {
// This assumes that null data is never valid
verifyStatus = Status.ERROR;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
}
long nextKeynum() {
long keynum;
if (keychooser instanceof ExponentialGenerator) {
do {
keynum = transactioninsertkeysequence.lastValue() - keychooser.nextValue().intValue();
} while (keynum < 0);
} else {
do {
keynum = keychooser.nextValue().intValue();
} while (keynum > transactioninsertkeysequence.lastValue());
}
return keynum;
}
public void doTransactionRead(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
} else if (dataintegrity || readallfieldsbyname) {
// pass the full field list if dataintegrity is on for verification
fields = new HashSet<String>(fieldnames);
}
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
db.read(table, keyname, fields, cells);
if (dataintegrity) {
verifyRow(keyname, cells);
}
}
public void doTransactionReadModifyWrite(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
// do the transaction
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
db.read(table, keyname, fields, cells);
db.update(table, keyname, values);
long en = System.nanoTime();
if (dataintegrity) {
verifyRow(keyname, cells);
}
measurements.measure("READ-MODIFY-WRITE", (int) ((en - st) / 1000));
measurements.measureIntended("READ-MODIFY-WRITE", (int) ((en - ist) / 1000));
}
public void doTransactionScan(DB db) {
// choose a random key
long keynum = nextKeynum();
String startkeyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
// choose a random scan length
int len = scanlength.nextValue().intValue();
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
db.scan(table, startkeyname, len, fields, new Vector<HashMap<String, ByteIterator>>());
}
public void doTransactionUpdate(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
db.update(table, keyname, values);
}
public void doTransactionInsert(DB db) {<FILL_FUNCTION_BODY>}
/**
* Creates a weighted discrete values with database operations for a workload to perform.
* Weights/proportions are read from the properties list and defaults are used
* when values are not configured.
* Current operations are "READ", "UPDATE", "INSERT", "SCAN" and "READMODIFYWRITE".
*
* @param p The properties list to pull weights from.
* @return A generator that can be used to determine the next operation to perform.
* @throws IllegalArgumentException if the properties object was null.
*/
protected static DiscreteGenerator createOperationGenerator(final Properties p) {
if (p == null) {
throw new IllegalArgumentException("Properties object cannot be null");
}
final double readproportion = Double.parseDouble(
p.getProperty(READ_PROPORTION_PROPERTY, READ_PROPORTION_PROPERTY_DEFAULT));
final double updateproportion = Double.parseDouble(
p.getProperty(UPDATE_PROPORTION_PROPERTY, UPDATE_PROPORTION_PROPERTY_DEFAULT));
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
final double scanproportion = Double.parseDouble(
p.getProperty(SCAN_PROPORTION_PROPERTY, SCAN_PROPORTION_PROPERTY_DEFAULT));
final double readmodifywriteproportion = Double.parseDouble(p.getProperty(
READMODIFYWRITE_PROPORTION_PROPERTY, READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT));
final DiscreteGenerator operationchooser = new DiscreteGenerator();
if (readproportion > 0) {
operationchooser.addValue(readproportion, "READ");
}
if (updateproportion > 0) {
operationchooser.addValue(updateproportion, "UPDATE");
}
if (insertproportion > 0) {
operationchooser.addValue(insertproportion, "INSERT");
}
if (scanproportion > 0) {
operationchooser.addValue(scanproportion, "SCAN");
}
if (readmodifywriteproportion > 0) {
operationchooser.addValue(readmodifywriteproportion, "READMODIFYWRITE");
}
return operationchooser;
}
} |
// choose the next key
long keynum = transactioninsertkeysequence.nextValue();
try {
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
db.insert(table, dbkey, values);
} finally {
transactioninsertkeysequence.acknowledge(keynum);
}
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/CoreWorkload.java | CoreWorkload | createOperationGenerator | class CoreWorkload extends Workload {
/**
* The name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY = "table";
/**
* The default name of the database table to run queries against.
*/
public static final String TABLENAME_PROPERTY_DEFAULT = "usertable";
protected String table;
/**
* The name of the property for the number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY = "fieldcount";
/**
* Default number of fields in a record.
*/
public static final String FIELD_COUNT_PROPERTY_DEFAULT = "10";
private List<String> fieldnames;
/**
* The name of the property for the field length distribution. Options are "uniform", "zipfian"
* (favouring short records), "constant", and "histogram".
* <p>
* If "uniform", "zipfian" or "constant", the maximum field length will be that specified by the
* fieldlength property. If "histogram", then the histogram will be read from the filename
* specified in the "fieldlengthhistogram" property.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY = "fieldlengthdistribution";
/**
* The default field length distribution.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "constant";
/**
* The name of the property for the length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY = "fieldlength";
/**
* The default maximum length of a field in bytes.
*/
public static final String FIELD_LENGTH_PROPERTY_DEFAULT = "100";
/**
* The name of the property for the minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY = "minfieldlength";
/**
* The default minimum length of a field in bytes.
*/
public static final String MIN_FIELD_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of a property that specifies the filename containing the field length histogram (only
* used if fieldlengthdistribution is "histogram").
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY = "fieldlengthhistogram";
/**
* The default filename containing a field length histogram.
*/
public static final String FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT = "hist.txt";
/**
* Generator object that produces field lengths. The value of this depends on the properties that
* start with "FIELD_LENGTH_".
*/
protected NumberGenerator fieldlengthgenerator;
/**
* The name of the property for deciding whether to read one field (false) or all fields (true) of
* a record.
*/
public static final String READ_ALL_FIELDS_PROPERTY = "readallfields";
/**
* The default value for the readallfields property.
*/
public static final String READ_ALL_FIELDS_PROPERTY_DEFAULT = "true";
protected boolean readallfields;
/**
* The name of the property for determining how to read all the fields when readallfields is true.
* If set to true, all the field names will be passed into the underlying client. If set to false,
* null will be passed into the underlying client. When passed a null, some clients may retrieve
* the entire row with a wildcard, which may be slower than naming all the fields.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY = "readallfieldsbyname";
/**
* The default value for the readallfieldsbyname property.
*/
public static final String READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT = "false";
protected boolean readallfieldsbyname;
/**
* The name of the property for deciding whether to write one field (false) or all fields (true)
* of a record.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY = "writeallfields";
/**
* The default value for the writeallfields property.
*/
public static final String WRITE_ALL_FIELDS_PROPERTY_DEFAULT = "false";
protected boolean writeallfields;
/**
* The name of the property for deciding whether to check all returned
* data against the formation template to ensure data integrity.
*/
public static final String DATA_INTEGRITY_PROPERTY = "dataintegrity";
/**
* The default value for the dataintegrity property.
*/
public static final String DATA_INTEGRITY_PROPERTY_DEFAULT = "false";
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
private boolean dataintegrity;
/**
* The name of the property for the proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY = "readproportion";
/**
* The default proportion of transactions that are reads.
*/
public static final String READ_PROPORTION_PROPERTY_DEFAULT = "0.95";
/**
* The name of the property for the proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY = "updateproportion";
/**
* The default proportion of transactions that are updates.
*/
public static final String UPDATE_PROPORTION_PROPERTY_DEFAULT = "0.05";
/**
* The name of the property for the proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY = "insertproportion";
/**
* The default proportion of transactions that are inserts.
*/
public static final String INSERT_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY = "scanproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String SCAN_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the proportion of transactions that are read-modify-write.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY = "readmodifywriteproportion";
/**
* The default proportion of transactions that are scans.
*/
public static final String READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT = "0.0";
/**
* The name of the property for the the distribution of requests across the keyspace. Options are
* "uniform", "zipfian" and "latest"
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY = "requestdistribution";
/**
* The default distribution of requests across the keyspace.
*/
public static final String REQUEST_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for adding zero padding to record numbers in order to match
* string sort order. Controls the number of 0s to left pad with.
*/
public static final String ZERO_PADDING_PROPERTY = "zeropadding";
/**
* The default zero padding value. Matches integer sort order
*/
public static final String ZERO_PADDING_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the min scan length (number of records).
*/
public static final String MIN_SCAN_LENGTH_PROPERTY = "minscanlength";
/**
* The default min scan length.
*/
public static final String MIN_SCAN_LENGTH_PROPERTY_DEFAULT = "1";
/**
* The name of the property for the max scan length (number of records).
*/
public static final String MAX_SCAN_LENGTH_PROPERTY = "maxscanlength";
/**
* The default max scan length.
*/
public static final String MAX_SCAN_LENGTH_PROPERTY_DEFAULT = "1000";
/**
* The name of the property for the scan length distribution. Options are "uniform" and "zipfian"
* (favoring short scans)
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY = "scanlengthdistribution";
/**
* The default max scan length.
*/
public static final String SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT = "uniform";
/**
* The name of the property for the order to insert records. Options are "ordered" or "hashed"
*/
public static final String INSERT_ORDER_PROPERTY = "insertorder";
/**
* Default insert order.
*/
public static final String INSERT_ORDER_PROPERTY_DEFAULT = "hashed";
/**
* Percentage data items that constitute the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION = "hotspotdatafraction";
/**
* Default value of the size of the hot set.
*/
public static final String HOTSPOT_DATA_FRACTION_DEFAULT = "0.2";
/**
* Percentage operations that access the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION = "hotspotopnfraction";
/**
* Default value of the percentage operations accessing the hot set.
*/
public static final String HOTSPOT_OPN_FRACTION_DEFAULT = "0.8";
/**
* How many times to retry when insertion of a single item to a DB fails.
*/
public static final String INSERTION_RETRY_LIMIT = "core_workload_insertion_retry_limit";
public static final String INSERTION_RETRY_LIMIT_DEFAULT = "0";
/**
* On average, how long to wait between the retries, in seconds.
*/
public static final String INSERTION_RETRY_INTERVAL = "core_workload_insertion_retry_interval";
public static final String INSERTION_RETRY_INTERVAL_DEFAULT = "3";
/**
* Field name prefix.
*/
public static final String FIELD_NAME_PREFIX = "fieldnameprefix";
/**
* Default value of the field name prefix.
*/
public static final String FIELD_NAME_PREFIX_DEFAULT = "field";
protected NumberGenerator keysequence;
protected DiscreteGenerator operationchooser;
protected NumberGenerator keychooser;
protected NumberGenerator fieldchooser;
protected AcknowledgedCounterGenerator transactioninsertkeysequence;
protected NumberGenerator scanlength;
protected boolean orderedinserts;
protected long fieldcount;
protected long recordcount;
protected int zeropadding;
protected int insertionRetryLimit;
protected int insertionRetryInterval;
private Measurements measurements = Measurements.getMeasurements();
public static String buildKeyName(long keynum, int zeropadding, boolean orderedinserts) {
if (!orderedinserts) {
keynum = Utils.hash(keynum);
}
String value = Long.toString(keynum);
int fill = zeropadding - value.length();
String prekey = "user";
for (int i = 0; i < fill; i++) {
prekey += '0';
}
return prekey + value;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
NumberGenerator fieldlengthgenerator;
String fieldlengthdistribution = p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY, FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
int fieldlength =
Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
int minfieldlength =
Integer.parseInt(p.getProperty(MIN_FIELD_LENGTH_PROPERTY, MIN_FIELD_LENGTH_PROPERTY_DEFAULT));
String fieldlengthhistogram = p.getProperty(
FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY, FIELD_LENGTH_HISTOGRAM_FILE_PROPERTY_DEFAULT);
if (fieldlengthdistribution.compareTo("constant") == 0) {
fieldlengthgenerator = new ConstantIntegerGenerator(fieldlength);
} else if (fieldlengthdistribution.compareTo("uniform") == 0) {
fieldlengthgenerator = new UniformLongGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("zipfian") == 0) {
fieldlengthgenerator = new ZipfianGenerator(minfieldlength, fieldlength);
} else if (fieldlengthdistribution.compareTo("histogram") == 0) {
try {
fieldlengthgenerator = new HistogramGenerator(fieldlengthhistogram);
} catch (IOException e) {
throw new WorkloadException(
"Couldn't read field length histogram file: " + fieldlengthhistogram, e);
}
} else {
throw new WorkloadException(
"Unknown field length distribution \"" + fieldlengthdistribution + "\"");
}
return fieldlengthgenerator;
}
/**
* Initialize the scenario.
* Called once, in the main client thread, before any operations are started.
*/
@Override
public void init(Properties p) throws WorkloadException {
table = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT);
fieldcount =
Long.parseLong(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT));
final String fieldnameprefix = p.getProperty(FIELD_NAME_PREFIX, FIELD_NAME_PREFIX_DEFAULT);
fieldnames = new ArrayList<>();
for (int i = 0; i < fieldcount; i++) {
fieldnames.add(fieldnameprefix + i);
}
fieldlengthgenerator = CoreWorkload.getFieldLengthGenerator(p);
recordcount =
Long.parseLong(p.getProperty(Client.RECORD_COUNT_PROPERTY, Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
String requestdistrib =
p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
int minscanlength =
Integer.parseInt(p.getProperty(MIN_SCAN_LENGTH_PROPERTY, MIN_SCAN_LENGTH_PROPERTY_DEFAULT));
int maxscanlength =
Integer.parseInt(p.getProperty(MAX_SCAN_LENGTH_PROPERTY, MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(SCAN_LENGTH_DISTRIBUTION_PROPERTY, SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
long insertstart =
Long.parseLong(p.getProperty(INSERT_START_PROPERTY, INSERT_START_PROPERTY_DEFAULT));
long insertcount=
Integer.parseInt(p.getProperty(INSERT_COUNT_PROPERTY, String.valueOf(recordcount - insertstart)));
// Confirm valid values for insertstart and insertcount in relation to recordcount
if (recordcount < (insertstart + insertcount)) {
System.err.println("Invalid combination of insertstart, insertcount and recordcount.");
System.err.println("recordcount must be bigger than insertstart + insertcount.");
System.exit(-1);
}
zeropadding =
Integer.parseInt(p.getProperty(ZERO_PADDING_PROPERTY, ZERO_PADDING_PROPERTY_DEFAULT));
readallfields = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_PROPERTY, READ_ALL_FIELDS_PROPERTY_DEFAULT));
readallfieldsbyname = Boolean.parseBoolean(
p.getProperty(READ_ALL_FIELDS_BY_NAME_PROPERTY, READ_ALL_FIELDS_BY_NAME_PROPERTY_DEFAULT));
writeallfields = Boolean.parseBoolean(
p.getProperty(WRITE_ALL_FIELDS_PROPERTY, WRITE_ALL_FIELDS_PROPERTY_DEFAULT));
dataintegrity = Boolean.parseBoolean(
p.getProperty(DATA_INTEGRITY_PROPERTY, DATA_INTEGRITY_PROPERTY_DEFAULT));
// Confirm that fieldlengthgenerator returns a constant if data
// integrity check requested.
if (dataintegrity && !(p.getProperty(
FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT)).equals("constant")) {
System.err.println("Must have constant field size to check data integrity.");
System.exit(-1);
}
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
if (p.getProperty(INSERT_ORDER_PROPERTY, INSERT_ORDER_PROPERTY_DEFAULT).compareTo("hashed") == 0) {
orderedinserts = false;
} else {
orderedinserts = true;
}
keysequence = new CounterGenerator(insertstart);
operationchooser = createOperationGenerator(p);
transactioninsertkeysequence = new AcknowledgedCounterGenerator(recordcount);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("exponential") == 0) {
double percentile = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(
ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordcount * frac);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(insertstart, insertstart + insertcount - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
// it does this by generating a random "next key" in part by taking the modulus over the
// number of keys.
// If the number of keys changes, this would shift the modulus, and we don't want that to
// change which keys are popular so we'll actually construct the scrambled zipfian generator
// with a keyspace that is larger than exists at the beginning of the test. that is, we'll predict
// the number of inserts, and tell the scrambled zipfian generator the number of existing keys
// plus the number of predicted keys as the total keyspace. then, if the generator picks a key
// that hasn't been inserted yet, will just ignore it and pick another key. this way, the size of
// the keyspace doesn't change from the perspective of the scrambled zipfian generator
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
int opcount = Integer.parseInt(p.getProperty(Client.OPERATION_COUNT_PROPERTY));
int expectednewkeys = (int) ((opcount) * insertproportion * 2.0); // 2 is fudge factor
keychooser = new ScrambledZipfianGenerator(insertstart, insertstart + insertcount + expectednewkeys);
} else if (requestdistrib.compareTo("latest") == 0) {
keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(insertstart, insertstart + insertcount - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
fieldchooser = new UniformLongGenerator(0, fieldcount - 1);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(minscanlength, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(minscanlength, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
insertionRetryLimit = Integer.parseInt(p.getProperty(
INSERTION_RETRY_LIMIT, INSERTION_RETRY_LIMIT_DEFAULT));
insertionRetryInterval = Integer.parseInt(p.getProperty(
INSERTION_RETRY_INTERVAL, INSERTION_RETRY_INTERVAL_DEFAULT));
}
/**
* Builds a value for a randomly chosen field.
*/
private HashMap<String, ByteIterator> buildSingleValue(String key) {
HashMap<String, ByteIterator> value = new HashMap<>();
String fieldkey = fieldnames.get(fieldchooser.nextValue().intValue());
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
value.put(fieldkey, data);
return value;
}
/**
* Builds values for all fields.
*/
private HashMap<String, ByteIterator> buildValues(String key) {
HashMap<String, ByteIterator> values = new HashMap<>();
for (String fieldkey : fieldnames) {
ByteIterator data;
if (dataintegrity) {
data = new StringByteIterator(buildDeterministicValue(key, fieldkey));
} else {
// fill with random data
data = new RandomByteIterator(fieldlengthgenerator.nextValue().longValue());
}
values.put(fieldkey, data);
}
return values;
}
/**
* Build a deterministic value given the key information.
*/
private String buildDeterministicValue(String key, String fieldkey) {
int size = fieldlengthgenerator.nextValue().intValue();
StringBuilder sb = new StringBuilder(size);
sb.append(key);
sb.append(':');
sb.append(fieldkey);
while (sb.length() < size) {
sb.append(':');
sb.append(sb.toString().hashCode());
}
sb.setLength(size);
return sb.toString();
}
/**
* Do one insert operation. Because it will be called concurrently from multiple client threads,
* this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
int keynum = keysequence.nextValue().intValue();
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
Status status;
int numOfRetries = 0;
do {
status = db.insert(table, dbkey, values);
if (null != status && status.isOk()) {
break;
}
// Retry if configured. Without retrying, the load process will fail
// even if one single insertion fails. User can optionally configure
// an insertion retry limit (default is 0) to enable retry.
if (++numOfRetries <= insertionRetryLimit) {
System.err.println("Retrying insertion, retry count: " + numOfRetries);
try {
// Sleep for a random number between [0.8, 1.2)*insertionRetryInterval.
int sleepTime = (int) (1000 * insertionRetryInterval * (0.8 + 0.4 * Math.random()));
Thread.sleep(sleepTime);
} catch (InterruptedException e) {
break;
}
} else {
System.err.println("Error inserting, not retrying any more. number of attempts: " + numOfRetries +
"Insertion Retry Limit: " + insertionRetryLimit);
break;
}
} while (true);
return null != status && status.isOk();
}
/**
* Do one transaction operation. Because it will be called concurrently from multiple client
* threads, this function must be thread safe. However, avoid synchronized, or the threads will block waiting
* for each other, and it will be difficult to reach the target throughput. Ideally, this function would
* have no side effects other than DB operations.
*/
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if(operation == null) {
return false;
}
switch (operation) {
case "READ":
doTransactionRead(db);
break;
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "SCAN":
doTransactionScan(db);
break;
default:
doTransactionReadModifyWrite(db);
}
return true;
}
/**
* Results are reported in the first three buckets of the histogram under
* the label "VERIFY".
* Bucket 0 means the expected data was returned.
* Bucket 1 means incorrect data was returned.
* Bucket 2 means null data was returned when some data was expected.
*/
protected void verifyRow(String key, HashMap<String, ByteIterator> cells) {
Status verifyStatus = Status.OK;
long startTime = System.nanoTime();
if (!cells.isEmpty()) {
for (Map.Entry<String, ByteIterator> entry : cells.entrySet()) {
if (!entry.getValue().toString().equals(buildDeterministicValue(key, entry.getKey()))) {
verifyStatus = Status.UNEXPECTED_STATE;
break;
}
}
} else {
// This assumes that null data is never valid
verifyStatus = Status.ERROR;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
}
long nextKeynum() {
long keynum;
if (keychooser instanceof ExponentialGenerator) {
do {
keynum = transactioninsertkeysequence.lastValue() - keychooser.nextValue().intValue();
} while (keynum < 0);
} else {
do {
keynum = keychooser.nextValue().intValue();
} while (keynum > transactioninsertkeysequence.lastValue());
}
return keynum;
}
public void doTransactionRead(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
} else if (dataintegrity || readallfieldsbyname) {
// pass the full field list if dataintegrity is on for verification
fields = new HashSet<String>(fieldnames);
}
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
db.read(table, keyname, fields, cells);
if (dataintegrity) {
verifyRow(keyname, cells);
}
}
public void doTransactionReadModifyWrite(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
// do the transaction
HashMap<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
long ist = measurements.getIntendedStartTimeNs();
long st = System.nanoTime();
db.read(table, keyname, fields, cells);
db.update(table, keyname, values);
long en = System.nanoTime();
if (dataintegrity) {
verifyRow(keyname, cells);
}
measurements.measure("READ-MODIFY-WRITE", (int) ((en - st) / 1000));
measurements.measureIntended("READ-MODIFY-WRITE", (int) ((en - ist) / 1000));
}
public void doTransactionScan(DB db) {
// choose a random key
long keynum = nextKeynum();
String startkeyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
// choose a random scan length
int len = scanlength.nextValue().intValue();
HashSet<String> fields = null;
if (!readallfields) {
// read a random field
String fieldname = fieldnames.get(fieldchooser.nextValue().intValue());
fields = new HashSet<String>();
fields.add(fieldname);
}
db.scan(table, startkeyname, len, fields, new Vector<HashMap<String, ByteIterator>>());
}
public void doTransactionUpdate(DB db) {
// choose a random key
long keynum = nextKeynum();
String keyname = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values;
if (writeallfields) {
// new data for all the fields
values = buildValues(keyname);
} else {
// update a random field
values = buildSingleValue(keyname);
}
db.update(table, keyname, values);
}
public void doTransactionInsert(DB db) {
// choose the next key
long keynum = transactioninsertkeysequence.nextValue();
try {
String dbkey = CoreWorkload.buildKeyName(keynum, zeropadding, orderedinserts);
HashMap<String, ByteIterator> values = buildValues(dbkey);
db.insert(table, dbkey, values);
} finally {
transactioninsertkeysequence.acknowledge(keynum);
}
}
/**
* Creates a weighted discrete values with database operations for a workload to perform.
* Weights/proportions are read from the properties list and defaults are used
* when values are not configured.
* Current operations are "READ", "UPDATE", "INSERT", "SCAN" and "READMODIFYWRITE".
*
* @param p The properties list to pull weights from.
* @return A generator that can be used to determine the next operation to perform.
* @throws IllegalArgumentException if the properties object was null.
*/
protected static DiscreteGenerator createOperationGenerator(final Properties p) {<FILL_FUNCTION_BODY>}
} |
if (p == null) {
throw new IllegalArgumentException("Properties object cannot be null");
}
final double readproportion = Double.parseDouble(
p.getProperty(READ_PROPORTION_PROPERTY, READ_PROPORTION_PROPERTY_DEFAULT));
final double updateproportion = Double.parseDouble(
p.getProperty(UPDATE_PROPORTION_PROPERTY, UPDATE_PROPORTION_PROPERTY_DEFAULT));
final double insertproportion = Double.parseDouble(
p.getProperty(INSERT_PROPORTION_PROPERTY, INSERT_PROPORTION_PROPERTY_DEFAULT));
final double scanproportion = Double.parseDouble(
p.getProperty(SCAN_PROPORTION_PROPERTY, SCAN_PROPORTION_PROPERTY_DEFAULT));
final double readmodifywriteproportion = Double.parseDouble(p.getProperty(
READMODIFYWRITE_PROPORTION_PROPERTY, READMODIFYWRITE_PROPORTION_PROPERTY_DEFAULT));
final DiscreteGenerator operationchooser = new DiscreteGenerator();
if (readproportion > 0) {
operationchooser.addValue(readproportion, "READ");
}
if (updateproportion > 0) {
operationchooser.addValue(updateproportion, "UPDATE");
}
if (insertproportion > 0) {
operationchooser.addValue(insertproportion, "INSERT");
}
if (scanproportion > 0) {
operationchooser.addValue(scanproportion, "SCAN");
}
if (readmodifywriteproportion > 0) {
operationchooser.addValue(readmodifywriteproportion, "READMODIFYWRITE");
}
return operationchooser;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/RestWorkload.java | RestWorkload | init | class RestWorkload extends CoreWorkload {
/**
* The name of the property for the proportion of transactions that are
* delete.
*/
public static final String DELETE_PROPORTION_PROPERTY = "deleteproportion";
/**
* The default proportion of transactions that are delete.
*/
public static final String DELETE_PROPORTION_PROPERTY_DEFAULT = "0.00";
/**
* The name of the property for the file that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY = "fieldlengthdistfile";
/**
* The default file name that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY_DEFAULT = "fieldLengthDistFile.txt";
/**
* In web services even though the CRUD operations follow the same request
* distribution, they have different traces and distribution parameter
* values. Hence configuring the parameters of these operations separately
* makes the benchmark more flexible and capable of generating better
* realistic workloads.
*/
// Read related properties.
private static final String READ_TRACE_FILE = "url.trace.read";
private static final String READ_TRACE_FILE_DEFAULT = "readtrace.txt";
private static final String READ_ZIPFIAN_CONSTANT = "readzipfconstant";
private static final String READ_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String READ_RECORD_COUNT_PROPERTY = "readrecordcount";
// Insert related properties.
private static final String INSERT_TRACE_FILE = "url.trace.insert";
private static final String INSERT_TRACE_FILE_DEFAULT = "inserttrace.txt";
private static final String INSERT_ZIPFIAN_CONSTANT = "insertzipfconstant";
private static final String INSERT_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT = "insertsizezipfconstant";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_RECORD_COUNT_PROPERTY = "insertrecordcount";
// Delete related properties.
private static final String DELETE_TRACE_FILE = "url.trace.delete";
private static final String DELETE_TRACE_FILE_DEFAULT = "deletetrace.txt";
private static final String DELETE_ZIPFIAN_CONSTANT = "deletezipfconstant";
private static final String DELETE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String DELETE_RECORD_COUNT_PROPERTY = "deleterecordcount";
// Delete related properties.
private static final String UPDATE_TRACE_FILE = "url.trace.update";
private static final String UPDATE_TRACE_FILE_DEFAULT = "updatetrace.txt";
private static final String UPDATE_ZIPFIAN_CONSTANT = "updatezipfconstant";
private static final String UPDATE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String UPDATE_RECORD_COUNT_PROPERTY = "updaterecordcount";
private Map<Integer, String> readUrlMap;
private Map<Integer, String> insertUrlMap;
private Map<Integer, String> deleteUrlMap;
private Map<Integer, String> updateUrlMap;
private int readRecordCount;
private int insertRecordCount;
private int deleteRecordCount;
private int updateRecordCount;
private NumberGenerator readKeyChooser;
private NumberGenerator insertKeyChooser;
private NumberGenerator deleteKeyChooser;
private NumberGenerator updateKeyChooser;
private NumberGenerator fieldlengthgenerator;
private DiscreteGenerator operationchooser;
@Override
public void init(Properties p) throws WorkloadException {<FILL_FUNCTION_BODY>}
public static DiscreteGenerator createOperationGenerator(final Properties p) {
// Re-using CoreWorkload method.
final DiscreteGenerator operationChooser = CoreWorkload.createOperationGenerator(p);
// Needs special handling for delete operations not supported in CoreWorkload.
double deleteproportion = Double
.parseDouble(p.getProperty(DELETE_PROPORTION_PROPERTY, DELETE_PROPORTION_PROPERTY_DEFAULT));
if (deleteproportion > 0) {
operationChooser.addValue(deleteproportion, "DELETE");
}
return operationChooser;
}
private static NumberGenerator getKeyChooser(String requestDistrib, int recordCount, double zipfContant,
Properties p) throws WorkloadException {
NumberGenerator keychooser;
switch (requestDistrib) {
case "exponential":
double percentile = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordCount * frac);
break;
case "uniform":
keychooser = new UniformLongGenerator(0, recordCount - 1);
break;
case "zipfian":
keychooser = new ZipfianGenerator(recordCount, zipfContant);
break;
case "latest":
throw new WorkloadException("Latest request distribution is not supported for RestWorkload.");
case "hotspot":
double hotsetfraction = Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction = Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(0, recordCount - 1, hotsetfraction, hotopnfraction);
break;
default:
throw new WorkloadException("Unknown request distribution \"" + requestDistrib + "\"");
}
return keychooser;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
// Re-using CoreWorkload method.
NumberGenerator fieldLengthGenerator = CoreWorkload.getFieldLengthGenerator(p);
String fieldlengthdistribution = p.getProperty(FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
// Needs special handling for Zipfian distribution for variable Zipf Constant.
if (fieldlengthdistribution.compareTo("zipfian") == 0) {
int fieldlength = Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
double insertsizezipfconstant = Double
.parseDouble(p.getProperty(INSERT_SIZE_ZIPFIAN_CONSTANT, INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT));
fieldLengthGenerator = new ZipfianGenerator(1, fieldlength, insertsizezipfconstant);
}
return fieldLengthGenerator;
}
/**
* Reads the trace file and returns a URL map.
*/
private static Map<Integer, String> getTrace(String filePath, int recordCount)
throws WorkloadException {
Map<Integer, String> urlMap = new HashMap<Integer, String>();
int count = 0;
String line;
try {
FileReader inputFile = new FileReader(filePath);
BufferedReader bufferReader = new BufferedReader(inputFile);
while ((line = bufferReader.readLine()) != null) {
urlMap.put(count++, line.trim());
if (count >= recordCount) {
break;
}
}
bufferReader.close();
} catch (IOException e) {
throw new WorkloadException(
"Error while reading the trace. Please make sure the trace file path is correct. "
+ e.getLocalizedMessage());
}
return urlMap;
}
/**
* Not required for Rest Clients as data population is service specific.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
return false;
}
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if (operation == null) {
return false;
}
switch (operation) {
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "DELETE":
doTransactionDelete(db);
break;
default:
doTransactionRead(db);
}
return true;
}
/**
* Returns next URL to be called.
*/
private String getNextURL(int opType) {
if (opType == 1) {
return readUrlMap.get(readKeyChooser.nextValue().intValue());
} else if (opType == 2) {
return insertUrlMap.get(insertKeyChooser.nextValue().intValue());
} else if (opType == 3) {
return deleteUrlMap.get(deleteKeyChooser.nextValue().intValue());
} else {
return updateUrlMap.get(updateKeyChooser.nextValue().intValue());
}
}
@Override
public void doTransactionRead(DB db) {
HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
db.read(null, getNextURL(1), null, result);
}
@Override
public void doTransactionInsert(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of insert data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.insert(null, getNextURL(2), value);
}
public void doTransactionDelete(DB db) {
db.delete(null, getNextURL(3));
}
@Override
public void doTransactionUpdate(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of update data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.update(null, getNextURL(4), value);
}
} |
readRecordCount = Integer.parseInt(p.getProperty(READ_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
insertRecordCount = Integer
.parseInt(p.getProperty(INSERT_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
deleteRecordCount = Integer
.parseInt(p.getProperty(DELETE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
updateRecordCount = Integer
.parseInt(p.getProperty(UPDATE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
readUrlMap = getTrace(p.getProperty(READ_TRACE_FILE, READ_TRACE_FILE_DEFAULT), readRecordCount);
insertUrlMap = getTrace(p.getProperty(INSERT_TRACE_FILE, INSERT_TRACE_FILE_DEFAULT), insertRecordCount);
deleteUrlMap = getTrace(p.getProperty(DELETE_TRACE_FILE, DELETE_TRACE_FILE_DEFAULT), deleteRecordCount);
updateUrlMap = getTrace(p.getProperty(UPDATE_TRACE_FILE, UPDATE_TRACE_FILE_DEFAULT), updateRecordCount);
operationchooser = createOperationGenerator(p);
// Common distribution for all operations.
String requestDistrib = p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
double readZipfconstant = Double.parseDouble(p.getProperty(READ_ZIPFIAN_CONSTANT, READ_ZIPFIAN_CONSTANT_DEAFULT));
readKeyChooser = getKeyChooser(requestDistrib, readUrlMap.size(), readZipfconstant, p);
double updateZipfconstant = Double
.parseDouble(p.getProperty(UPDATE_ZIPFIAN_CONSTANT, UPDATE_ZIPFIAN_CONSTANT_DEAFULT));
updateKeyChooser = getKeyChooser(requestDistrib, updateUrlMap.size(), updateZipfconstant, p);
double insertZipfconstant = Double
.parseDouble(p.getProperty(INSERT_ZIPFIAN_CONSTANT, INSERT_ZIPFIAN_CONSTANT_DEAFULT));
insertKeyChooser = getKeyChooser(requestDistrib, insertUrlMap.size(), insertZipfconstant, p);
double deleteZipfconstant = Double
.parseDouble(p.getProperty(DELETE_ZIPFIAN_CONSTANT, DELETE_ZIPFIAN_CONSTANT_DEAFULT));
deleteKeyChooser = getKeyChooser(requestDistrib, deleteUrlMap.size(), deleteZipfconstant, p);
fieldlengthgenerator = getFieldLengthGenerator(p);
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/RestWorkload.java | RestWorkload | createOperationGenerator | class RestWorkload extends CoreWorkload {
/**
* The name of the property for the proportion of transactions that are
* delete.
*/
public static final String DELETE_PROPORTION_PROPERTY = "deleteproportion";
/**
* The default proportion of transactions that are delete.
*/
public static final String DELETE_PROPORTION_PROPERTY_DEFAULT = "0.00";
/**
* The name of the property for the file that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY = "fieldlengthdistfile";
/**
* The default file name that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY_DEFAULT = "fieldLengthDistFile.txt";
/**
* In web services even though the CRUD operations follow the same request
* distribution, they have different traces and distribution parameter
* values. Hence configuring the parameters of these operations separately
* makes the benchmark more flexible and capable of generating better
* realistic workloads.
*/
// Read related properties.
private static final String READ_TRACE_FILE = "url.trace.read";
private static final String READ_TRACE_FILE_DEFAULT = "readtrace.txt";
private static final String READ_ZIPFIAN_CONSTANT = "readzipfconstant";
private static final String READ_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String READ_RECORD_COUNT_PROPERTY = "readrecordcount";
// Insert related properties.
private static final String INSERT_TRACE_FILE = "url.trace.insert";
private static final String INSERT_TRACE_FILE_DEFAULT = "inserttrace.txt";
private static final String INSERT_ZIPFIAN_CONSTANT = "insertzipfconstant";
private static final String INSERT_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT = "insertsizezipfconstant";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_RECORD_COUNT_PROPERTY = "insertrecordcount";
// Delete related properties.
private static final String DELETE_TRACE_FILE = "url.trace.delete";
private static final String DELETE_TRACE_FILE_DEFAULT = "deletetrace.txt";
private static final String DELETE_ZIPFIAN_CONSTANT = "deletezipfconstant";
private static final String DELETE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String DELETE_RECORD_COUNT_PROPERTY = "deleterecordcount";
// Delete related properties.
private static final String UPDATE_TRACE_FILE = "url.trace.update";
private static final String UPDATE_TRACE_FILE_DEFAULT = "updatetrace.txt";
private static final String UPDATE_ZIPFIAN_CONSTANT = "updatezipfconstant";
private static final String UPDATE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String UPDATE_RECORD_COUNT_PROPERTY = "updaterecordcount";
private Map<Integer, String> readUrlMap;
private Map<Integer, String> insertUrlMap;
private Map<Integer, String> deleteUrlMap;
private Map<Integer, String> updateUrlMap;
private int readRecordCount;
private int insertRecordCount;
private int deleteRecordCount;
private int updateRecordCount;
private NumberGenerator readKeyChooser;
private NumberGenerator insertKeyChooser;
private NumberGenerator deleteKeyChooser;
private NumberGenerator updateKeyChooser;
private NumberGenerator fieldlengthgenerator;
private DiscreteGenerator operationchooser;
@Override
public void init(Properties p) throws WorkloadException {
readRecordCount = Integer.parseInt(p.getProperty(READ_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
insertRecordCount = Integer
.parseInt(p.getProperty(INSERT_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
deleteRecordCount = Integer
.parseInt(p.getProperty(DELETE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
updateRecordCount = Integer
.parseInt(p.getProperty(UPDATE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
readUrlMap = getTrace(p.getProperty(READ_TRACE_FILE, READ_TRACE_FILE_DEFAULT), readRecordCount);
insertUrlMap = getTrace(p.getProperty(INSERT_TRACE_FILE, INSERT_TRACE_FILE_DEFAULT), insertRecordCount);
deleteUrlMap = getTrace(p.getProperty(DELETE_TRACE_FILE, DELETE_TRACE_FILE_DEFAULT), deleteRecordCount);
updateUrlMap = getTrace(p.getProperty(UPDATE_TRACE_FILE, UPDATE_TRACE_FILE_DEFAULT), updateRecordCount);
operationchooser = createOperationGenerator(p);
// Common distribution for all operations.
String requestDistrib = p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
double readZipfconstant = Double.parseDouble(p.getProperty(READ_ZIPFIAN_CONSTANT, READ_ZIPFIAN_CONSTANT_DEAFULT));
readKeyChooser = getKeyChooser(requestDistrib, readUrlMap.size(), readZipfconstant, p);
double updateZipfconstant = Double
.parseDouble(p.getProperty(UPDATE_ZIPFIAN_CONSTANT, UPDATE_ZIPFIAN_CONSTANT_DEAFULT));
updateKeyChooser = getKeyChooser(requestDistrib, updateUrlMap.size(), updateZipfconstant, p);
double insertZipfconstant = Double
.parseDouble(p.getProperty(INSERT_ZIPFIAN_CONSTANT, INSERT_ZIPFIAN_CONSTANT_DEAFULT));
insertKeyChooser = getKeyChooser(requestDistrib, insertUrlMap.size(), insertZipfconstant, p);
double deleteZipfconstant = Double
.parseDouble(p.getProperty(DELETE_ZIPFIAN_CONSTANT, DELETE_ZIPFIAN_CONSTANT_DEAFULT));
deleteKeyChooser = getKeyChooser(requestDistrib, deleteUrlMap.size(), deleteZipfconstant, p);
fieldlengthgenerator = getFieldLengthGenerator(p);
}
public static DiscreteGenerator createOperationGenerator(final Properties p) {<FILL_FUNCTION_BODY>}
private static NumberGenerator getKeyChooser(String requestDistrib, int recordCount, double zipfContant,
Properties p) throws WorkloadException {
NumberGenerator keychooser;
switch (requestDistrib) {
case "exponential":
double percentile = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordCount * frac);
break;
case "uniform":
keychooser = new UniformLongGenerator(0, recordCount - 1);
break;
case "zipfian":
keychooser = new ZipfianGenerator(recordCount, zipfContant);
break;
case "latest":
throw new WorkloadException("Latest request distribution is not supported for RestWorkload.");
case "hotspot":
double hotsetfraction = Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction = Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(0, recordCount - 1, hotsetfraction, hotopnfraction);
break;
default:
throw new WorkloadException("Unknown request distribution \"" + requestDistrib + "\"");
}
return keychooser;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
// Re-using CoreWorkload method.
NumberGenerator fieldLengthGenerator = CoreWorkload.getFieldLengthGenerator(p);
String fieldlengthdistribution = p.getProperty(FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
// Needs special handling for Zipfian distribution for variable Zipf Constant.
if (fieldlengthdistribution.compareTo("zipfian") == 0) {
int fieldlength = Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
double insertsizezipfconstant = Double
.parseDouble(p.getProperty(INSERT_SIZE_ZIPFIAN_CONSTANT, INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT));
fieldLengthGenerator = new ZipfianGenerator(1, fieldlength, insertsizezipfconstant);
}
return fieldLengthGenerator;
}
/**
* Reads the trace file and returns a URL map.
*/
private static Map<Integer, String> getTrace(String filePath, int recordCount)
throws WorkloadException {
Map<Integer, String> urlMap = new HashMap<Integer, String>();
int count = 0;
String line;
try {
FileReader inputFile = new FileReader(filePath);
BufferedReader bufferReader = new BufferedReader(inputFile);
while ((line = bufferReader.readLine()) != null) {
urlMap.put(count++, line.trim());
if (count >= recordCount) {
break;
}
}
bufferReader.close();
} catch (IOException e) {
throw new WorkloadException(
"Error while reading the trace. Please make sure the trace file path is correct. "
+ e.getLocalizedMessage());
}
return urlMap;
}
/**
* Not required for Rest Clients as data population is service specific.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
return false;
}
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if (operation == null) {
return false;
}
switch (operation) {
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "DELETE":
doTransactionDelete(db);
break;
default:
doTransactionRead(db);
}
return true;
}
/**
* Returns next URL to be called.
*/
private String getNextURL(int opType) {
if (opType == 1) {
return readUrlMap.get(readKeyChooser.nextValue().intValue());
} else if (opType == 2) {
return insertUrlMap.get(insertKeyChooser.nextValue().intValue());
} else if (opType == 3) {
return deleteUrlMap.get(deleteKeyChooser.nextValue().intValue());
} else {
return updateUrlMap.get(updateKeyChooser.nextValue().intValue());
}
}
@Override
public void doTransactionRead(DB db) {
HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
db.read(null, getNextURL(1), null, result);
}
@Override
public void doTransactionInsert(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of insert data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.insert(null, getNextURL(2), value);
}
public void doTransactionDelete(DB db) {
db.delete(null, getNextURL(3));
}
@Override
public void doTransactionUpdate(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of update data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.update(null, getNextURL(4), value);
}
} |
// Re-using CoreWorkload method.
final DiscreteGenerator operationChooser = CoreWorkload.createOperationGenerator(p);
// Needs special handling for delete operations not supported in CoreWorkload.
double deleteproportion = Double
.parseDouble(p.getProperty(DELETE_PROPORTION_PROPERTY, DELETE_PROPORTION_PROPERTY_DEFAULT));
if (deleteproportion > 0) {
operationChooser.addValue(deleteproportion, "DELETE");
}
return operationChooser;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/RestWorkload.java | RestWorkload | getKeyChooser | class RestWorkload extends CoreWorkload {
/**
* The name of the property for the proportion of transactions that are
* delete.
*/
public static final String DELETE_PROPORTION_PROPERTY = "deleteproportion";
/**
* The default proportion of transactions that are delete.
*/
public static final String DELETE_PROPORTION_PROPERTY_DEFAULT = "0.00";
/**
* The name of the property for the file that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY = "fieldlengthdistfile";
/**
* The default file name that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY_DEFAULT = "fieldLengthDistFile.txt";
/**
* In web services even though the CRUD operations follow the same request
* distribution, they have different traces and distribution parameter
* values. Hence configuring the parameters of these operations separately
* makes the benchmark more flexible and capable of generating better
* realistic workloads.
*/
// Read related properties.
private static final String READ_TRACE_FILE = "url.trace.read";
private static final String READ_TRACE_FILE_DEFAULT = "readtrace.txt";
private static final String READ_ZIPFIAN_CONSTANT = "readzipfconstant";
private static final String READ_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String READ_RECORD_COUNT_PROPERTY = "readrecordcount";
// Insert related properties.
private static final String INSERT_TRACE_FILE = "url.trace.insert";
private static final String INSERT_TRACE_FILE_DEFAULT = "inserttrace.txt";
private static final String INSERT_ZIPFIAN_CONSTANT = "insertzipfconstant";
private static final String INSERT_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT = "insertsizezipfconstant";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_RECORD_COUNT_PROPERTY = "insertrecordcount";
// Delete related properties.
private static final String DELETE_TRACE_FILE = "url.trace.delete";
private static final String DELETE_TRACE_FILE_DEFAULT = "deletetrace.txt";
private static final String DELETE_ZIPFIAN_CONSTANT = "deletezipfconstant";
private static final String DELETE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String DELETE_RECORD_COUNT_PROPERTY = "deleterecordcount";
// Delete related properties.
private static final String UPDATE_TRACE_FILE = "url.trace.update";
private static final String UPDATE_TRACE_FILE_DEFAULT = "updatetrace.txt";
private static final String UPDATE_ZIPFIAN_CONSTANT = "updatezipfconstant";
private static final String UPDATE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String UPDATE_RECORD_COUNT_PROPERTY = "updaterecordcount";
private Map<Integer, String> readUrlMap;
private Map<Integer, String> insertUrlMap;
private Map<Integer, String> deleteUrlMap;
private Map<Integer, String> updateUrlMap;
private int readRecordCount;
private int insertRecordCount;
private int deleteRecordCount;
private int updateRecordCount;
private NumberGenerator readKeyChooser;
private NumberGenerator insertKeyChooser;
private NumberGenerator deleteKeyChooser;
private NumberGenerator updateKeyChooser;
private NumberGenerator fieldlengthgenerator;
private DiscreteGenerator operationchooser;
@Override
public void init(Properties p) throws WorkloadException {
readRecordCount = Integer.parseInt(p.getProperty(READ_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
insertRecordCount = Integer
.parseInt(p.getProperty(INSERT_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
deleteRecordCount = Integer
.parseInt(p.getProperty(DELETE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
updateRecordCount = Integer
.parseInt(p.getProperty(UPDATE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
readUrlMap = getTrace(p.getProperty(READ_TRACE_FILE, READ_TRACE_FILE_DEFAULT), readRecordCount);
insertUrlMap = getTrace(p.getProperty(INSERT_TRACE_FILE, INSERT_TRACE_FILE_DEFAULT), insertRecordCount);
deleteUrlMap = getTrace(p.getProperty(DELETE_TRACE_FILE, DELETE_TRACE_FILE_DEFAULT), deleteRecordCount);
updateUrlMap = getTrace(p.getProperty(UPDATE_TRACE_FILE, UPDATE_TRACE_FILE_DEFAULT), updateRecordCount);
operationchooser = createOperationGenerator(p);
// Common distribution for all operations.
String requestDistrib = p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
double readZipfconstant = Double.parseDouble(p.getProperty(READ_ZIPFIAN_CONSTANT, READ_ZIPFIAN_CONSTANT_DEAFULT));
readKeyChooser = getKeyChooser(requestDistrib, readUrlMap.size(), readZipfconstant, p);
double updateZipfconstant = Double
.parseDouble(p.getProperty(UPDATE_ZIPFIAN_CONSTANT, UPDATE_ZIPFIAN_CONSTANT_DEAFULT));
updateKeyChooser = getKeyChooser(requestDistrib, updateUrlMap.size(), updateZipfconstant, p);
double insertZipfconstant = Double
.parseDouble(p.getProperty(INSERT_ZIPFIAN_CONSTANT, INSERT_ZIPFIAN_CONSTANT_DEAFULT));
insertKeyChooser = getKeyChooser(requestDistrib, insertUrlMap.size(), insertZipfconstant, p);
double deleteZipfconstant = Double
.parseDouble(p.getProperty(DELETE_ZIPFIAN_CONSTANT, DELETE_ZIPFIAN_CONSTANT_DEAFULT));
deleteKeyChooser = getKeyChooser(requestDistrib, deleteUrlMap.size(), deleteZipfconstant, p);
fieldlengthgenerator = getFieldLengthGenerator(p);
}
public static DiscreteGenerator createOperationGenerator(final Properties p) {
// Re-using CoreWorkload method.
final DiscreteGenerator operationChooser = CoreWorkload.createOperationGenerator(p);
// Needs special handling for delete operations not supported in CoreWorkload.
double deleteproportion = Double
.parseDouble(p.getProperty(DELETE_PROPORTION_PROPERTY, DELETE_PROPORTION_PROPERTY_DEFAULT));
if (deleteproportion > 0) {
operationChooser.addValue(deleteproportion, "DELETE");
}
return operationChooser;
}
private static NumberGenerator getKeyChooser(String requestDistrib, int recordCount, double zipfContant,
Properties p) throws WorkloadException {<FILL_FUNCTION_BODY>}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
// Re-using CoreWorkload method.
NumberGenerator fieldLengthGenerator = CoreWorkload.getFieldLengthGenerator(p);
String fieldlengthdistribution = p.getProperty(FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
// Needs special handling for Zipfian distribution for variable Zipf Constant.
if (fieldlengthdistribution.compareTo("zipfian") == 0) {
int fieldlength = Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
double insertsizezipfconstant = Double
.parseDouble(p.getProperty(INSERT_SIZE_ZIPFIAN_CONSTANT, INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT));
fieldLengthGenerator = new ZipfianGenerator(1, fieldlength, insertsizezipfconstant);
}
return fieldLengthGenerator;
}
/**
* Reads the trace file and returns a URL map.
*/
private static Map<Integer, String> getTrace(String filePath, int recordCount)
throws WorkloadException {
Map<Integer, String> urlMap = new HashMap<Integer, String>();
int count = 0;
String line;
try {
FileReader inputFile = new FileReader(filePath);
BufferedReader bufferReader = new BufferedReader(inputFile);
while ((line = bufferReader.readLine()) != null) {
urlMap.put(count++, line.trim());
if (count >= recordCount) {
break;
}
}
bufferReader.close();
} catch (IOException e) {
throw new WorkloadException(
"Error while reading the trace. Please make sure the trace file path is correct. "
+ e.getLocalizedMessage());
}
return urlMap;
}
/**
* Not required for Rest Clients as data population is service specific.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
return false;
}
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if (operation == null) {
return false;
}
switch (operation) {
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "DELETE":
doTransactionDelete(db);
break;
default:
doTransactionRead(db);
}
return true;
}
/**
* Returns next URL to be called.
*/
private String getNextURL(int opType) {
if (opType == 1) {
return readUrlMap.get(readKeyChooser.nextValue().intValue());
} else if (opType == 2) {
return insertUrlMap.get(insertKeyChooser.nextValue().intValue());
} else if (opType == 3) {
return deleteUrlMap.get(deleteKeyChooser.nextValue().intValue());
} else {
return updateUrlMap.get(updateKeyChooser.nextValue().intValue());
}
}
@Override
public void doTransactionRead(DB db) {
HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
db.read(null, getNextURL(1), null, result);
}
@Override
public void doTransactionInsert(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of insert data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.insert(null, getNextURL(2), value);
}
public void doTransactionDelete(DB db) {
db.delete(null, getNextURL(3));
}
@Override
public void doTransactionUpdate(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of update data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.update(null, getNextURL(4), value);
}
} |
NumberGenerator keychooser;
switch (requestDistrib) {
case "exponential":
double percentile = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordCount * frac);
break;
case "uniform":
keychooser = new UniformLongGenerator(0, recordCount - 1);
break;
case "zipfian":
keychooser = new ZipfianGenerator(recordCount, zipfContant);
break;
case "latest":
throw new WorkloadException("Latest request distribution is not supported for RestWorkload.");
case "hotspot":
double hotsetfraction = Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction = Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(0, recordCount - 1, hotsetfraction, hotopnfraction);
break;
default:
throw new WorkloadException("Unknown request distribution \"" + requestDistrib + "\"");
}
return keychooser;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/RestWorkload.java | RestWorkload | getFieldLengthGenerator | class RestWorkload extends CoreWorkload {
/**
* The name of the property for the proportion of transactions that are
* delete.
*/
public static final String DELETE_PROPORTION_PROPERTY = "deleteproportion";
/**
* The default proportion of transactions that are delete.
*/
public static final String DELETE_PROPORTION_PROPERTY_DEFAULT = "0.00";
/**
* The name of the property for the file that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY = "fieldlengthdistfile";
/**
* The default file name that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY_DEFAULT = "fieldLengthDistFile.txt";
/**
* In web services even though the CRUD operations follow the same request
* distribution, they have different traces and distribution parameter
* values. Hence configuring the parameters of these operations separately
* makes the benchmark more flexible and capable of generating better
* realistic workloads.
*/
// Read related properties.
private static final String READ_TRACE_FILE = "url.trace.read";
private static final String READ_TRACE_FILE_DEFAULT = "readtrace.txt";
private static final String READ_ZIPFIAN_CONSTANT = "readzipfconstant";
private static final String READ_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String READ_RECORD_COUNT_PROPERTY = "readrecordcount";
// Insert related properties.
private static final String INSERT_TRACE_FILE = "url.trace.insert";
private static final String INSERT_TRACE_FILE_DEFAULT = "inserttrace.txt";
private static final String INSERT_ZIPFIAN_CONSTANT = "insertzipfconstant";
private static final String INSERT_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT = "insertsizezipfconstant";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_RECORD_COUNT_PROPERTY = "insertrecordcount";
// Delete related properties.
private static final String DELETE_TRACE_FILE = "url.trace.delete";
private static final String DELETE_TRACE_FILE_DEFAULT = "deletetrace.txt";
private static final String DELETE_ZIPFIAN_CONSTANT = "deletezipfconstant";
private static final String DELETE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String DELETE_RECORD_COUNT_PROPERTY = "deleterecordcount";
// Delete related properties.
private static final String UPDATE_TRACE_FILE = "url.trace.update";
private static final String UPDATE_TRACE_FILE_DEFAULT = "updatetrace.txt";
private static final String UPDATE_ZIPFIAN_CONSTANT = "updatezipfconstant";
private static final String UPDATE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String UPDATE_RECORD_COUNT_PROPERTY = "updaterecordcount";
private Map<Integer, String> readUrlMap;
private Map<Integer, String> insertUrlMap;
private Map<Integer, String> deleteUrlMap;
private Map<Integer, String> updateUrlMap;
private int readRecordCount;
private int insertRecordCount;
private int deleteRecordCount;
private int updateRecordCount;
private NumberGenerator readKeyChooser;
private NumberGenerator insertKeyChooser;
private NumberGenerator deleteKeyChooser;
private NumberGenerator updateKeyChooser;
private NumberGenerator fieldlengthgenerator;
private DiscreteGenerator operationchooser;
@Override
public void init(Properties p) throws WorkloadException {
readRecordCount = Integer.parseInt(p.getProperty(READ_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
insertRecordCount = Integer
.parseInt(p.getProperty(INSERT_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
deleteRecordCount = Integer
.parseInt(p.getProperty(DELETE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
updateRecordCount = Integer
.parseInt(p.getProperty(UPDATE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
readUrlMap = getTrace(p.getProperty(READ_TRACE_FILE, READ_TRACE_FILE_DEFAULT), readRecordCount);
insertUrlMap = getTrace(p.getProperty(INSERT_TRACE_FILE, INSERT_TRACE_FILE_DEFAULT), insertRecordCount);
deleteUrlMap = getTrace(p.getProperty(DELETE_TRACE_FILE, DELETE_TRACE_FILE_DEFAULT), deleteRecordCount);
updateUrlMap = getTrace(p.getProperty(UPDATE_TRACE_FILE, UPDATE_TRACE_FILE_DEFAULT), updateRecordCount);
operationchooser = createOperationGenerator(p);
// Common distribution for all operations.
String requestDistrib = p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
double readZipfconstant = Double.parseDouble(p.getProperty(READ_ZIPFIAN_CONSTANT, READ_ZIPFIAN_CONSTANT_DEAFULT));
readKeyChooser = getKeyChooser(requestDistrib, readUrlMap.size(), readZipfconstant, p);
double updateZipfconstant = Double
.parseDouble(p.getProperty(UPDATE_ZIPFIAN_CONSTANT, UPDATE_ZIPFIAN_CONSTANT_DEAFULT));
updateKeyChooser = getKeyChooser(requestDistrib, updateUrlMap.size(), updateZipfconstant, p);
double insertZipfconstant = Double
.parseDouble(p.getProperty(INSERT_ZIPFIAN_CONSTANT, INSERT_ZIPFIAN_CONSTANT_DEAFULT));
insertKeyChooser = getKeyChooser(requestDistrib, insertUrlMap.size(), insertZipfconstant, p);
double deleteZipfconstant = Double
.parseDouble(p.getProperty(DELETE_ZIPFIAN_CONSTANT, DELETE_ZIPFIAN_CONSTANT_DEAFULT));
deleteKeyChooser = getKeyChooser(requestDistrib, deleteUrlMap.size(), deleteZipfconstant, p);
fieldlengthgenerator = getFieldLengthGenerator(p);
}
public static DiscreteGenerator createOperationGenerator(final Properties p) {
// Re-using CoreWorkload method.
final DiscreteGenerator operationChooser = CoreWorkload.createOperationGenerator(p);
// Needs special handling for delete operations not supported in CoreWorkload.
double deleteproportion = Double
.parseDouble(p.getProperty(DELETE_PROPORTION_PROPERTY, DELETE_PROPORTION_PROPERTY_DEFAULT));
if (deleteproportion > 0) {
operationChooser.addValue(deleteproportion, "DELETE");
}
return operationChooser;
}
private static NumberGenerator getKeyChooser(String requestDistrib, int recordCount, double zipfContant,
Properties p) throws WorkloadException {
NumberGenerator keychooser;
switch (requestDistrib) {
case "exponential":
double percentile = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordCount * frac);
break;
case "uniform":
keychooser = new UniformLongGenerator(0, recordCount - 1);
break;
case "zipfian":
keychooser = new ZipfianGenerator(recordCount, zipfContant);
break;
case "latest":
throw new WorkloadException("Latest request distribution is not supported for RestWorkload.");
case "hotspot":
double hotsetfraction = Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction = Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(0, recordCount - 1, hotsetfraction, hotopnfraction);
break;
default:
throw new WorkloadException("Unknown request distribution \"" + requestDistrib + "\"");
}
return keychooser;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {<FILL_FUNCTION_BODY>}
/**
* Reads the trace file and returns a URL map.
*/
private static Map<Integer, String> getTrace(String filePath, int recordCount)
throws WorkloadException {
Map<Integer, String> urlMap = new HashMap<Integer, String>();
int count = 0;
String line;
try {
FileReader inputFile = new FileReader(filePath);
BufferedReader bufferReader = new BufferedReader(inputFile);
while ((line = bufferReader.readLine()) != null) {
urlMap.put(count++, line.trim());
if (count >= recordCount) {
break;
}
}
bufferReader.close();
} catch (IOException e) {
throw new WorkloadException(
"Error while reading the trace. Please make sure the trace file path is correct. "
+ e.getLocalizedMessage());
}
return urlMap;
}
/**
* Not required for Rest Clients as data population is service specific.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
return false;
}
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if (operation == null) {
return false;
}
switch (operation) {
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "DELETE":
doTransactionDelete(db);
break;
default:
doTransactionRead(db);
}
return true;
}
/**
* Returns next URL to be called.
*/
private String getNextURL(int opType) {
if (opType == 1) {
return readUrlMap.get(readKeyChooser.nextValue().intValue());
} else if (opType == 2) {
return insertUrlMap.get(insertKeyChooser.nextValue().intValue());
} else if (opType == 3) {
return deleteUrlMap.get(deleteKeyChooser.nextValue().intValue());
} else {
return updateUrlMap.get(updateKeyChooser.nextValue().intValue());
}
}
@Override
public void doTransactionRead(DB db) {
HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
db.read(null, getNextURL(1), null, result);
}
@Override
public void doTransactionInsert(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of insert data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.insert(null, getNextURL(2), value);
}
public void doTransactionDelete(DB db) {
db.delete(null, getNextURL(3));
}
@Override
public void doTransactionUpdate(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of update data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.update(null, getNextURL(4), value);
}
} |
// Re-using CoreWorkload method.
NumberGenerator fieldLengthGenerator = CoreWorkload.getFieldLengthGenerator(p);
String fieldlengthdistribution = p.getProperty(FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
// Needs special handling for Zipfian distribution for variable Zipf Constant.
if (fieldlengthdistribution.compareTo("zipfian") == 0) {
int fieldlength = Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
double insertsizezipfconstant = Double
.parseDouble(p.getProperty(INSERT_SIZE_ZIPFIAN_CONSTANT, INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT));
fieldLengthGenerator = new ZipfianGenerator(1, fieldlength, insertsizezipfconstant);
}
return fieldLengthGenerator;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/RestWorkload.java | RestWorkload | getTrace | class RestWorkload extends CoreWorkload {
/**
* The name of the property for the proportion of transactions that are
* delete.
*/
public static final String DELETE_PROPORTION_PROPERTY = "deleteproportion";
/**
* The default proportion of transactions that are delete.
*/
public static final String DELETE_PROPORTION_PROPERTY_DEFAULT = "0.00";
/**
* The name of the property for the file that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY = "fieldlengthdistfile";
/**
* The default file name that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY_DEFAULT = "fieldLengthDistFile.txt";
/**
* In web services even though the CRUD operations follow the same request
* distribution, they have different traces and distribution parameter
* values. Hence configuring the parameters of these operations separately
* makes the benchmark more flexible and capable of generating better
* realistic workloads.
*/
// Read related properties.
private static final String READ_TRACE_FILE = "url.trace.read";
private static final String READ_TRACE_FILE_DEFAULT = "readtrace.txt";
private static final String READ_ZIPFIAN_CONSTANT = "readzipfconstant";
private static final String READ_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String READ_RECORD_COUNT_PROPERTY = "readrecordcount";
// Insert related properties.
private static final String INSERT_TRACE_FILE = "url.trace.insert";
private static final String INSERT_TRACE_FILE_DEFAULT = "inserttrace.txt";
private static final String INSERT_ZIPFIAN_CONSTANT = "insertzipfconstant";
private static final String INSERT_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT = "insertsizezipfconstant";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_RECORD_COUNT_PROPERTY = "insertrecordcount";
// Delete related properties.
private static final String DELETE_TRACE_FILE = "url.trace.delete";
private static final String DELETE_TRACE_FILE_DEFAULT = "deletetrace.txt";
private static final String DELETE_ZIPFIAN_CONSTANT = "deletezipfconstant";
private static final String DELETE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String DELETE_RECORD_COUNT_PROPERTY = "deleterecordcount";
// Delete related properties.
private static final String UPDATE_TRACE_FILE = "url.trace.update";
private static final String UPDATE_TRACE_FILE_DEFAULT = "updatetrace.txt";
private static final String UPDATE_ZIPFIAN_CONSTANT = "updatezipfconstant";
private static final String UPDATE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String UPDATE_RECORD_COUNT_PROPERTY = "updaterecordcount";
private Map<Integer, String> readUrlMap;
private Map<Integer, String> insertUrlMap;
private Map<Integer, String> deleteUrlMap;
private Map<Integer, String> updateUrlMap;
private int readRecordCount;
private int insertRecordCount;
private int deleteRecordCount;
private int updateRecordCount;
private NumberGenerator readKeyChooser;
private NumberGenerator insertKeyChooser;
private NumberGenerator deleteKeyChooser;
private NumberGenerator updateKeyChooser;
private NumberGenerator fieldlengthgenerator;
private DiscreteGenerator operationchooser;
@Override
public void init(Properties p) throws WorkloadException {
readRecordCount = Integer.parseInt(p.getProperty(READ_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
insertRecordCount = Integer
.parseInt(p.getProperty(INSERT_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
deleteRecordCount = Integer
.parseInt(p.getProperty(DELETE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
updateRecordCount = Integer
.parseInt(p.getProperty(UPDATE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
readUrlMap = getTrace(p.getProperty(READ_TRACE_FILE, READ_TRACE_FILE_DEFAULT), readRecordCount);
insertUrlMap = getTrace(p.getProperty(INSERT_TRACE_FILE, INSERT_TRACE_FILE_DEFAULT), insertRecordCount);
deleteUrlMap = getTrace(p.getProperty(DELETE_TRACE_FILE, DELETE_TRACE_FILE_DEFAULT), deleteRecordCount);
updateUrlMap = getTrace(p.getProperty(UPDATE_TRACE_FILE, UPDATE_TRACE_FILE_DEFAULT), updateRecordCount);
operationchooser = createOperationGenerator(p);
// Common distribution for all operations.
String requestDistrib = p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
double readZipfconstant = Double.parseDouble(p.getProperty(READ_ZIPFIAN_CONSTANT, READ_ZIPFIAN_CONSTANT_DEAFULT));
readKeyChooser = getKeyChooser(requestDistrib, readUrlMap.size(), readZipfconstant, p);
double updateZipfconstant = Double
.parseDouble(p.getProperty(UPDATE_ZIPFIAN_CONSTANT, UPDATE_ZIPFIAN_CONSTANT_DEAFULT));
updateKeyChooser = getKeyChooser(requestDistrib, updateUrlMap.size(), updateZipfconstant, p);
double insertZipfconstant = Double
.parseDouble(p.getProperty(INSERT_ZIPFIAN_CONSTANT, INSERT_ZIPFIAN_CONSTANT_DEAFULT));
insertKeyChooser = getKeyChooser(requestDistrib, insertUrlMap.size(), insertZipfconstant, p);
double deleteZipfconstant = Double
.parseDouble(p.getProperty(DELETE_ZIPFIAN_CONSTANT, DELETE_ZIPFIAN_CONSTANT_DEAFULT));
deleteKeyChooser = getKeyChooser(requestDistrib, deleteUrlMap.size(), deleteZipfconstant, p);
fieldlengthgenerator = getFieldLengthGenerator(p);
}
public static DiscreteGenerator createOperationGenerator(final Properties p) {
// Re-using CoreWorkload method.
final DiscreteGenerator operationChooser = CoreWorkload.createOperationGenerator(p);
// Needs special handling for delete operations not supported in CoreWorkload.
double deleteproportion = Double
.parseDouble(p.getProperty(DELETE_PROPORTION_PROPERTY, DELETE_PROPORTION_PROPERTY_DEFAULT));
if (deleteproportion > 0) {
operationChooser.addValue(deleteproportion, "DELETE");
}
return operationChooser;
}
private static NumberGenerator getKeyChooser(String requestDistrib, int recordCount, double zipfContant,
Properties p) throws WorkloadException {
NumberGenerator keychooser;
switch (requestDistrib) {
case "exponential":
double percentile = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordCount * frac);
break;
case "uniform":
keychooser = new UniformLongGenerator(0, recordCount - 1);
break;
case "zipfian":
keychooser = new ZipfianGenerator(recordCount, zipfContant);
break;
case "latest":
throw new WorkloadException("Latest request distribution is not supported for RestWorkload.");
case "hotspot":
double hotsetfraction = Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction = Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(0, recordCount - 1, hotsetfraction, hotopnfraction);
break;
default:
throw new WorkloadException("Unknown request distribution \"" + requestDistrib + "\"");
}
return keychooser;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
// Re-using CoreWorkload method.
NumberGenerator fieldLengthGenerator = CoreWorkload.getFieldLengthGenerator(p);
String fieldlengthdistribution = p.getProperty(FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
// Needs special handling for Zipfian distribution for variable Zipf Constant.
if (fieldlengthdistribution.compareTo("zipfian") == 0) {
int fieldlength = Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
double insertsizezipfconstant = Double
.parseDouble(p.getProperty(INSERT_SIZE_ZIPFIAN_CONSTANT, INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT));
fieldLengthGenerator = new ZipfianGenerator(1, fieldlength, insertsizezipfconstant);
}
return fieldLengthGenerator;
}
/**
* Reads the trace file and returns a URL map.
*/
private static Map<Integer, String> getTrace(String filePath, int recordCount)
throws WorkloadException {<FILL_FUNCTION_BODY>}
/**
* Not required for Rest Clients as data population is service specific.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
return false;
}
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if (operation == null) {
return false;
}
switch (operation) {
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "DELETE":
doTransactionDelete(db);
break;
default:
doTransactionRead(db);
}
return true;
}
/**
* Returns next URL to be called.
*/
private String getNextURL(int opType) {
if (opType == 1) {
return readUrlMap.get(readKeyChooser.nextValue().intValue());
} else if (opType == 2) {
return insertUrlMap.get(insertKeyChooser.nextValue().intValue());
} else if (opType == 3) {
return deleteUrlMap.get(deleteKeyChooser.nextValue().intValue());
} else {
return updateUrlMap.get(updateKeyChooser.nextValue().intValue());
}
}
@Override
public void doTransactionRead(DB db) {
HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
db.read(null, getNextURL(1), null, result);
}
@Override
public void doTransactionInsert(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of insert data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.insert(null, getNextURL(2), value);
}
public void doTransactionDelete(DB db) {
db.delete(null, getNextURL(3));
}
@Override
public void doTransactionUpdate(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of update data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.update(null, getNextURL(4), value);
}
} |
Map<Integer, String> urlMap = new HashMap<Integer, String>();
int count = 0;
String line;
try {
FileReader inputFile = new FileReader(filePath);
BufferedReader bufferReader = new BufferedReader(inputFile);
while ((line = bufferReader.readLine()) != null) {
urlMap.put(count++, line.trim());
if (count >= recordCount) {
break;
}
}
bufferReader.close();
} catch (IOException e) {
throw new WorkloadException(
"Error while reading the trace. Please make sure the trace file path is correct. "
+ e.getLocalizedMessage());
}
return urlMap;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/RestWorkload.java | RestWorkload | doInsert | class RestWorkload extends CoreWorkload {
/**
* The name of the property for the proportion of transactions that are
* delete.
*/
public static final String DELETE_PROPORTION_PROPERTY = "deleteproportion";
/**
* The default proportion of transactions that are delete.
*/
public static final String DELETE_PROPORTION_PROPERTY_DEFAULT = "0.00";
/**
* The name of the property for the file that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY = "fieldlengthdistfile";
/**
* The default file name that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY_DEFAULT = "fieldLengthDistFile.txt";
/**
* In web services even though the CRUD operations follow the same request
* distribution, they have different traces and distribution parameter
* values. Hence configuring the parameters of these operations separately
* makes the benchmark more flexible and capable of generating better
* realistic workloads.
*/
// Read related properties.
private static final String READ_TRACE_FILE = "url.trace.read";
private static final String READ_TRACE_FILE_DEFAULT = "readtrace.txt";
private static final String READ_ZIPFIAN_CONSTANT = "readzipfconstant";
private static final String READ_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String READ_RECORD_COUNT_PROPERTY = "readrecordcount";
// Insert related properties.
private static final String INSERT_TRACE_FILE = "url.trace.insert";
private static final String INSERT_TRACE_FILE_DEFAULT = "inserttrace.txt";
private static final String INSERT_ZIPFIAN_CONSTANT = "insertzipfconstant";
private static final String INSERT_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT = "insertsizezipfconstant";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_RECORD_COUNT_PROPERTY = "insertrecordcount";
// Delete related properties.
private static final String DELETE_TRACE_FILE = "url.trace.delete";
private static final String DELETE_TRACE_FILE_DEFAULT = "deletetrace.txt";
private static final String DELETE_ZIPFIAN_CONSTANT = "deletezipfconstant";
private static final String DELETE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String DELETE_RECORD_COUNT_PROPERTY = "deleterecordcount";
// Delete related properties.
private static final String UPDATE_TRACE_FILE = "url.trace.update";
private static final String UPDATE_TRACE_FILE_DEFAULT = "updatetrace.txt";
private static final String UPDATE_ZIPFIAN_CONSTANT = "updatezipfconstant";
private static final String UPDATE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String UPDATE_RECORD_COUNT_PROPERTY = "updaterecordcount";
private Map<Integer, String> readUrlMap;
private Map<Integer, String> insertUrlMap;
private Map<Integer, String> deleteUrlMap;
private Map<Integer, String> updateUrlMap;
private int readRecordCount;
private int insertRecordCount;
private int deleteRecordCount;
private int updateRecordCount;
private NumberGenerator readKeyChooser;
private NumberGenerator insertKeyChooser;
private NumberGenerator deleteKeyChooser;
private NumberGenerator updateKeyChooser;
private NumberGenerator fieldlengthgenerator;
private DiscreteGenerator operationchooser;
@Override
public void init(Properties p) throws WorkloadException {
readRecordCount = Integer.parseInt(p.getProperty(READ_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
insertRecordCount = Integer
.parseInt(p.getProperty(INSERT_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
deleteRecordCount = Integer
.parseInt(p.getProperty(DELETE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
updateRecordCount = Integer
.parseInt(p.getProperty(UPDATE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
readUrlMap = getTrace(p.getProperty(READ_TRACE_FILE, READ_TRACE_FILE_DEFAULT), readRecordCount);
insertUrlMap = getTrace(p.getProperty(INSERT_TRACE_FILE, INSERT_TRACE_FILE_DEFAULT), insertRecordCount);
deleteUrlMap = getTrace(p.getProperty(DELETE_TRACE_FILE, DELETE_TRACE_FILE_DEFAULT), deleteRecordCount);
updateUrlMap = getTrace(p.getProperty(UPDATE_TRACE_FILE, UPDATE_TRACE_FILE_DEFAULT), updateRecordCount);
operationchooser = createOperationGenerator(p);
// Common distribution for all operations.
String requestDistrib = p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
double readZipfconstant = Double.parseDouble(p.getProperty(READ_ZIPFIAN_CONSTANT, READ_ZIPFIAN_CONSTANT_DEAFULT));
readKeyChooser = getKeyChooser(requestDistrib, readUrlMap.size(), readZipfconstant, p);
double updateZipfconstant = Double
.parseDouble(p.getProperty(UPDATE_ZIPFIAN_CONSTANT, UPDATE_ZIPFIAN_CONSTANT_DEAFULT));
updateKeyChooser = getKeyChooser(requestDistrib, updateUrlMap.size(), updateZipfconstant, p);
double insertZipfconstant = Double
.parseDouble(p.getProperty(INSERT_ZIPFIAN_CONSTANT, INSERT_ZIPFIAN_CONSTANT_DEAFULT));
insertKeyChooser = getKeyChooser(requestDistrib, insertUrlMap.size(), insertZipfconstant, p);
double deleteZipfconstant = Double
.parseDouble(p.getProperty(DELETE_ZIPFIAN_CONSTANT, DELETE_ZIPFIAN_CONSTANT_DEAFULT));
deleteKeyChooser = getKeyChooser(requestDistrib, deleteUrlMap.size(), deleteZipfconstant, p);
fieldlengthgenerator = getFieldLengthGenerator(p);
}
public static DiscreteGenerator createOperationGenerator(final Properties p) {
// Re-using CoreWorkload method.
final DiscreteGenerator operationChooser = CoreWorkload.createOperationGenerator(p);
// Needs special handling for delete operations not supported in CoreWorkload.
double deleteproportion = Double
.parseDouble(p.getProperty(DELETE_PROPORTION_PROPERTY, DELETE_PROPORTION_PROPERTY_DEFAULT));
if (deleteproportion > 0) {
operationChooser.addValue(deleteproportion, "DELETE");
}
return operationChooser;
}
private static NumberGenerator getKeyChooser(String requestDistrib, int recordCount, double zipfContant,
Properties p) throws WorkloadException {
NumberGenerator keychooser;
switch (requestDistrib) {
case "exponential":
double percentile = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordCount * frac);
break;
case "uniform":
keychooser = new UniformLongGenerator(0, recordCount - 1);
break;
case "zipfian":
keychooser = new ZipfianGenerator(recordCount, zipfContant);
break;
case "latest":
throw new WorkloadException("Latest request distribution is not supported for RestWorkload.");
case "hotspot":
double hotsetfraction = Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction = Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(0, recordCount - 1, hotsetfraction, hotopnfraction);
break;
default:
throw new WorkloadException("Unknown request distribution \"" + requestDistrib + "\"");
}
return keychooser;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
// Re-using CoreWorkload method.
NumberGenerator fieldLengthGenerator = CoreWorkload.getFieldLengthGenerator(p);
String fieldlengthdistribution = p.getProperty(FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
// Needs special handling for Zipfian distribution for variable Zipf Constant.
if (fieldlengthdistribution.compareTo("zipfian") == 0) {
int fieldlength = Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
double insertsizezipfconstant = Double
.parseDouble(p.getProperty(INSERT_SIZE_ZIPFIAN_CONSTANT, INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT));
fieldLengthGenerator = new ZipfianGenerator(1, fieldlength, insertsizezipfconstant);
}
return fieldLengthGenerator;
}
/**
* Reads the trace file and returns a URL map.
*/
private static Map<Integer, String> getTrace(String filePath, int recordCount)
throws WorkloadException {
Map<Integer, String> urlMap = new HashMap<Integer, String>();
int count = 0;
String line;
try {
FileReader inputFile = new FileReader(filePath);
BufferedReader bufferReader = new BufferedReader(inputFile);
while ((line = bufferReader.readLine()) != null) {
urlMap.put(count++, line.trim());
if (count >= recordCount) {
break;
}
}
bufferReader.close();
} catch (IOException e) {
throw new WorkloadException(
"Error while reading the trace. Please make sure the trace file path is correct. "
+ e.getLocalizedMessage());
}
return urlMap;
}
/**
* Not required for Rest Clients as data population is service specific.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {<FILL_FUNCTION_BODY>}
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if (operation == null) {
return false;
}
switch (operation) {
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "DELETE":
doTransactionDelete(db);
break;
default:
doTransactionRead(db);
}
return true;
}
/**
* Returns next URL to be called.
*/
private String getNextURL(int opType) {
if (opType == 1) {
return readUrlMap.get(readKeyChooser.nextValue().intValue());
} else if (opType == 2) {
return insertUrlMap.get(insertKeyChooser.nextValue().intValue());
} else if (opType == 3) {
return deleteUrlMap.get(deleteKeyChooser.nextValue().intValue());
} else {
return updateUrlMap.get(updateKeyChooser.nextValue().intValue());
}
}
@Override
public void doTransactionRead(DB db) {
HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
db.read(null, getNextURL(1), null, result);
}
@Override
public void doTransactionInsert(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of insert data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.insert(null, getNextURL(2), value);
}
public void doTransactionDelete(DB db) {
db.delete(null, getNextURL(3));
}
@Override
public void doTransactionUpdate(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of update data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.update(null, getNextURL(4), value);
}
} |
return false;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/RestWorkload.java | RestWorkload | doTransaction | class RestWorkload extends CoreWorkload {
/**
* The name of the property for the proportion of transactions that are
* delete.
*/
public static final String DELETE_PROPORTION_PROPERTY = "deleteproportion";
/**
* The default proportion of transactions that are delete.
*/
public static final String DELETE_PROPORTION_PROPERTY_DEFAULT = "0.00";
/**
* The name of the property for the file that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY = "fieldlengthdistfile";
/**
* The default file name that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY_DEFAULT = "fieldLengthDistFile.txt";
/**
* In web services even though the CRUD operations follow the same request
* distribution, they have different traces and distribution parameter
* values. Hence configuring the parameters of these operations separately
* makes the benchmark more flexible and capable of generating better
* realistic workloads.
*/
// Read related properties.
private static final String READ_TRACE_FILE = "url.trace.read";
private static final String READ_TRACE_FILE_DEFAULT = "readtrace.txt";
private static final String READ_ZIPFIAN_CONSTANT = "readzipfconstant";
private static final String READ_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String READ_RECORD_COUNT_PROPERTY = "readrecordcount";
// Insert related properties.
private static final String INSERT_TRACE_FILE = "url.trace.insert";
private static final String INSERT_TRACE_FILE_DEFAULT = "inserttrace.txt";
private static final String INSERT_ZIPFIAN_CONSTANT = "insertzipfconstant";
private static final String INSERT_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT = "insertsizezipfconstant";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_RECORD_COUNT_PROPERTY = "insertrecordcount";
// Delete related properties.
private static final String DELETE_TRACE_FILE = "url.trace.delete";
private static final String DELETE_TRACE_FILE_DEFAULT = "deletetrace.txt";
private static final String DELETE_ZIPFIAN_CONSTANT = "deletezipfconstant";
private static final String DELETE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String DELETE_RECORD_COUNT_PROPERTY = "deleterecordcount";
// Delete related properties.
private static final String UPDATE_TRACE_FILE = "url.trace.update";
private static final String UPDATE_TRACE_FILE_DEFAULT = "updatetrace.txt";
private static final String UPDATE_ZIPFIAN_CONSTANT = "updatezipfconstant";
private static final String UPDATE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String UPDATE_RECORD_COUNT_PROPERTY = "updaterecordcount";
private Map<Integer, String> readUrlMap;
private Map<Integer, String> insertUrlMap;
private Map<Integer, String> deleteUrlMap;
private Map<Integer, String> updateUrlMap;
private int readRecordCount;
private int insertRecordCount;
private int deleteRecordCount;
private int updateRecordCount;
private NumberGenerator readKeyChooser;
private NumberGenerator insertKeyChooser;
private NumberGenerator deleteKeyChooser;
private NumberGenerator updateKeyChooser;
private NumberGenerator fieldlengthgenerator;
private DiscreteGenerator operationchooser;
@Override
public void init(Properties p) throws WorkloadException {
readRecordCount = Integer.parseInt(p.getProperty(READ_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
insertRecordCount = Integer
.parseInt(p.getProperty(INSERT_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
deleteRecordCount = Integer
.parseInt(p.getProperty(DELETE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
updateRecordCount = Integer
.parseInt(p.getProperty(UPDATE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
readUrlMap = getTrace(p.getProperty(READ_TRACE_FILE, READ_TRACE_FILE_DEFAULT), readRecordCount);
insertUrlMap = getTrace(p.getProperty(INSERT_TRACE_FILE, INSERT_TRACE_FILE_DEFAULT), insertRecordCount);
deleteUrlMap = getTrace(p.getProperty(DELETE_TRACE_FILE, DELETE_TRACE_FILE_DEFAULT), deleteRecordCount);
updateUrlMap = getTrace(p.getProperty(UPDATE_TRACE_FILE, UPDATE_TRACE_FILE_DEFAULT), updateRecordCount);
operationchooser = createOperationGenerator(p);
// Common distribution for all operations.
String requestDistrib = p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
double readZipfconstant = Double.parseDouble(p.getProperty(READ_ZIPFIAN_CONSTANT, READ_ZIPFIAN_CONSTANT_DEAFULT));
readKeyChooser = getKeyChooser(requestDistrib, readUrlMap.size(), readZipfconstant, p);
double updateZipfconstant = Double
.parseDouble(p.getProperty(UPDATE_ZIPFIAN_CONSTANT, UPDATE_ZIPFIAN_CONSTANT_DEAFULT));
updateKeyChooser = getKeyChooser(requestDistrib, updateUrlMap.size(), updateZipfconstant, p);
double insertZipfconstant = Double
.parseDouble(p.getProperty(INSERT_ZIPFIAN_CONSTANT, INSERT_ZIPFIAN_CONSTANT_DEAFULT));
insertKeyChooser = getKeyChooser(requestDistrib, insertUrlMap.size(), insertZipfconstant, p);
double deleteZipfconstant = Double
.parseDouble(p.getProperty(DELETE_ZIPFIAN_CONSTANT, DELETE_ZIPFIAN_CONSTANT_DEAFULT));
deleteKeyChooser = getKeyChooser(requestDistrib, deleteUrlMap.size(), deleteZipfconstant, p);
fieldlengthgenerator = getFieldLengthGenerator(p);
}
public static DiscreteGenerator createOperationGenerator(final Properties p) {
// Re-using CoreWorkload method.
final DiscreteGenerator operationChooser = CoreWorkload.createOperationGenerator(p);
// Needs special handling for delete operations not supported in CoreWorkload.
double deleteproportion = Double
.parseDouble(p.getProperty(DELETE_PROPORTION_PROPERTY, DELETE_PROPORTION_PROPERTY_DEFAULT));
if (deleteproportion > 0) {
operationChooser.addValue(deleteproportion, "DELETE");
}
return operationChooser;
}
private static NumberGenerator getKeyChooser(String requestDistrib, int recordCount, double zipfContant,
Properties p) throws WorkloadException {
NumberGenerator keychooser;
switch (requestDistrib) {
case "exponential":
double percentile = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordCount * frac);
break;
case "uniform":
keychooser = new UniformLongGenerator(0, recordCount - 1);
break;
case "zipfian":
keychooser = new ZipfianGenerator(recordCount, zipfContant);
break;
case "latest":
throw new WorkloadException("Latest request distribution is not supported for RestWorkload.");
case "hotspot":
double hotsetfraction = Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction = Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(0, recordCount - 1, hotsetfraction, hotopnfraction);
break;
default:
throw new WorkloadException("Unknown request distribution \"" + requestDistrib + "\"");
}
return keychooser;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
// Re-using CoreWorkload method.
NumberGenerator fieldLengthGenerator = CoreWorkload.getFieldLengthGenerator(p);
String fieldlengthdistribution = p.getProperty(FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
// Needs special handling for Zipfian distribution for variable Zipf Constant.
if (fieldlengthdistribution.compareTo("zipfian") == 0) {
int fieldlength = Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
double insertsizezipfconstant = Double
.parseDouble(p.getProperty(INSERT_SIZE_ZIPFIAN_CONSTANT, INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT));
fieldLengthGenerator = new ZipfianGenerator(1, fieldlength, insertsizezipfconstant);
}
return fieldLengthGenerator;
}
/**
* Reads the trace file and returns a URL map.
*/
private static Map<Integer, String> getTrace(String filePath, int recordCount)
throws WorkloadException {
Map<Integer, String> urlMap = new HashMap<Integer, String>();
int count = 0;
String line;
try {
FileReader inputFile = new FileReader(filePath);
BufferedReader bufferReader = new BufferedReader(inputFile);
while ((line = bufferReader.readLine()) != null) {
urlMap.put(count++, line.trim());
if (count >= recordCount) {
break;
}
}
bufferReader.close();
} catch (IOException e) {
throw new WorkloadException(
"Error while reading the trace. Please make sure the trace file path is correct. "
+ e.getLocalizedMessage());
}
return urlMap;
}
/**
* Not required for Rest Clients as data population is service specific.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
return false;
}
@Override
public boolean doTransaction(DB db, Object threadstate) {<FILL_FUNCTION_BODY>}
/**
* Returns next URL to be called.
*/
private String getNextURL(int opType) {
if (opType == 1) {
return readUrlMap.get(readKeyChooser.nextValue().intValue());
} else if (opType == 2) {
return insertUrlMap.get(insertKeyChooser.nextValue().intValue());
} else if (opType == 3) {
return deleteUrlMap.get(deleteKeyChooser.nextValue().intValue());
} else {
return updateUrlMap.get(updateKeyChooser.nextValue().intValue());
}
}
@Override
public void doTransactionRead(DB db) {
HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
db.read(null, getNextURL(1), null, result);
}
@Override
public void doTransactionInsert(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of insert data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.insert(null, getNextURL(2), value);
}
public void doTransactionDelete(DB db) {
db.delete(null, getNextURL(3));
}
@Override
public void doTransactionUpdate(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of update data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.update(null, getNextURL(4), value);
}
} |
String operation = operationchooser.nextString();
if (operation == null) {
return false;
}
switch (operation) {
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "DELETE":
doTransactionDelete(db);
break;
default:
doTransactionRead(db);
}
return true;
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/RestWorkload.java | RestWorkload | getNextURL | class RestWorkload extends CoreWorkload {
/**
* The name of the property for the proportion of transactions that are
* delete.
*/
public static final String DELETE_PROPORTION_PROPERTY = "deleteproportion";
/**
* The default proportion of transactions that are delete.
*/
public static final String DELETE_PROPORTION_PROPERTY_DEFAULT = "0.00";
/**
* The name of the property for the file that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY = "fieldlengthdistfile";
/**
* The default file name that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY_DEFAULT = "fieldLengthDistFile.txt";
/**
* In web services even though the CRUD operations follow the same request
* distribution, they have different traces and distribution parameter
* values. Hence configuring the parameters of these operations separately
* makes the benchmark more flexible and capable of generating better
* realistic workloads.
*/
// Read related properties.
private static final String READ_TRACE_FILE = "url.trace.read";
private static final String READ_TRACE_FILE_DEFAULT = "readtrace.txt";
private static final String READ_ZIPFIAN_CONSTANT = "readzipfconstant";
private static final String READ_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String READ_RECORD_COUNT_PROPERTY = "readrecordcount";
// Insert related properties.
private static final String INSERT_TRACE_FILE = "url.trace.insert";
private static final String INSERT_TRACE_FILE_DEFAULT = "inserttrace.txt";
private static final String INSERT_ZIPFIAN_CONSTANT = "insertzipfconstant";
private static final String INSERT_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT = "insertsizezipfconstant";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_RECORD_COUNT_PROPERTY = "insertrecordcount";
// Delete related properties.
private static final String DELETE_TRACE_FILE = "url.trace.delete";
private static final String DELETE_TRACE_FILE_DEFAULT = "deletetrace.txt";
private static final String DELETE_ZIPFIAN_CONSTANT = "deletezipfconstant";
private static final String DELETE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String DELETE_RECORD_COUNT_PROPERTY = "deleterecordcount";
// Delete related properties.
private static final String UPDATE_TRACE_FILE = "url.trace.update";
private static final String UPDATE_TRACE_FILE_DEFAULT = "updatetrace.txt";
private static final String UPDATE_ZIPFIAN_CONSTANT = "updatezipfconstant";
private static final String UPDATE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String UPDATE_RECORD_COUNT_PROPERTY = "updaterecordcount";
private Map<Integer, String> readUrlMap;
private Map<Integer, String> insertUrlMap;
private Map<Integer, String> deleteUrlMap;
private Map<Integer, String> updateUrlMap;
private int readRecordCount;
private int insertRecordCount;
private int deleteRecordCount;
private int updateRecordCount;
private NumberGenerator readKeyChooser;
private NumberGenerator insertKeyChooser;
private NumberGenerator deleteKeyChooser;
private NumberGenerator updateKeyChooser;
private NumberGenerator fieldlengthgenerator;
private DiscreteGenerator operationchooser;
@Override
public void init(Properties p) throws WorkloadException {
readRecordCount = Integer.parseInt(p.getProperty(READ_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
insertRecordCount = Integer
.parseInt(p.getProperty(INSERT_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
deleteRecordCount = Integer
.parseInt(p.getProperty(DELETE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
updateRecordCount = Integer
.parseInt(p.getProperty(UPDATE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
readUrlMap = getTrace(p.getProperty(READ_TRACE_FILE, READ_TRACE_FILE_DEFAULT), readRecordCount);
insertUrlMap = getTrace(p.getProperty(INSERT_TRACE_FILE, INSERT_TRACE_FILE_DEFAULT), insertRecordCount);
deleteUrlMap = getTrace(p.getProperty(DELETE_TRACE_FILE, DELETE_TRACE_FILE_DEFAULT), deleteRecordCount);
updateUrlMap = getTrace(p.getProperty(UPDATE_TRACE_FILE, UPDATE_TRACE_FILE_DEFAULT), updateRecordCount);
operationchooser = createOperationGenerator(p);
// Common distribution for all operations.
String requestDistrib = p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
double readZipfconstant = Double.parseDouble(p.getProperty(READ_ZIPFIAN_CONSTANT, READ_ZIPFIAN_CONSTANT_DEAFULT));
readKeyChooser = getKeyChooser(requestDistrib, readUrlMap.size(), readZipfconstant, p);
double updateZipfconstant = Double
.parseDouble(p.getProperty(UPDATE_ZIPFIAN_CONSTANT, UPDATE_ZIPFIAN_CONSTANT_DEAFULT));
updateKeyChooser = getKeyChooser(requestDistrib, updateUrlMap.size(), updateZipfconstant, p);
double insertZipfconstant = Double
.parseDouble(p.getProperty(INSERT_ZIPFIAN_CONSTANT, INSERT_ZIPFIAN_CONSTANT_DEAFULT));
insertKeyChooser = getKeyChooser(requestDistrib, insertUrlMap.size(), insertZipfconstant, p);
double deleteZipfconstant = Double
.parseDouble(p.getProperty(DELETE_ZIPFIAN_CONSTANT, DELETE_ZIPFIAN_CONSTANT_DEAFULT));
deleteKeyChooser = getKeyChooser(requestDistrib, deleteUrlMap.size(), deleteZipfconstant, p);
fieldlengthgenerator = getFieldLengthGenerator(p);
}
public static DiscreteGenerator createOperationGenerator(final Properties p) {
// Re-using CoreWorkload method.
final DiscreteGenerator operationChooser = CoreWorkload.createOperationGenerator(p);
// Needs special handling for delete operations not supported in CoreWorkload.
double deleteproportion = Double
.parseDouble(p.getProperty(DELETE_PROPORTION_PROPERTY, DELETE_PROPORTION_PROPERTY_DEFAULT));
if (deleteproportion > 0) {
operationChooser.addValue(deleteproportion, "DELETE");
}
return operationChooser;
}
private static NumberGenerator getKeyChooser(String requestDistrib, int recordCount, double zipfContant,
Properties p) throws WorkloadException {
NumberGenerator keychooser;
switch (requestDistrib) {
case "exponential":
double percentile = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordCount * frac);
break;
case "uniform":
keychooser = new UniformLongGenerator(0, recordCount - 1);
break;
case "zipfian":
keychooser = new ZipfianGenerator(recordCount, zipfContant);
break;
case "latest":
throw new WorkloadException("Latest request distribution is not supported for RestWorkload.");
case "hotspot":
double hotsetfraction = Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction = Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(0, recordCount - 1, hotsetfraction, hotopnfraction);
break;
default:
throw new WorkloadException("Unknown request distribution \"" + requestDistrib + "\"");
}
return keychooser;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
// Re-using CoreWorkload method.
NumberGenerator fieldLengthGenerator = CoreWorkload.getFieldLengthGenerator(p);
String fieldlengthdistribution = p.getProperty(FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
// Needs special handling for Zipfian distribution for variable Zipf Constant.
if (fieldlengthdistribution.compareTo("zipfian") == 0) {
int fieldlength = Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
double insertsizezipfconstant = Double
.parseDouble(p.getProperty(INSERT_SIZE_ZIPFIAN_CONSTANT, INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT));
fieldLengthGenerator = new ZipfianGenerator(1, fieldlength, insertsizezipfconstant);
}
return fieldLengthGenerator;
}
/**
* Reads the trace file and returns a URL map.
*/
private static Map<Integer, String> getTrace(String filePath, int recordCount)
throws WorkloadException {
Map<Integer, String> urlMap = new HashMap<Integer, String>();
int count = 0;
String line;
try {
FileReader inputFile = new FileReader(filePath);
BufferedReader bufferReader = new BufferedReader(inputFile);
while ((line = bufferReader.readLine()) != null) {
urlMap.put(count++, line.trim());
if (count >= recordCount) {
break;
}
}
bufferReader.close();
} catch (IOException e) {
throw new WorkloadException(
"Error while reading the trace. Please make sure the trace file path is correct. "
+ e.getLocalizedMessage());
}
return urlMap;
}
/**
* Not required for Rest Clients as data population is service specific.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
return false;
}
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if (operation == null) {
return false;
}
switch (operation) {
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "DELETE":
doTransactionDelete(db);
break;
default:
doTransactionRead(db);
}
return true;
}
/**
* Returns next URL to be called.
*/
private String getNextURL(int opType) {<FILL_FUNCTION_BODY>}
@Override
public void doTransactionRead(DB db) {
HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
db.read(null, getNextURL(1), null, result);
}
@Override
public void doTransactionInsert(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of insert data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.insert(null, getNextURL(2), value);
}
public void doTransactionDelete(DB db) {
db.delete(null, getNextURL(3));
}
@Override
public void doTransactionUpdate(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of update data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.update(null, getNextURL(4), value);
}
} |
if (opType == 1) {
return readUrlMap.get(readKeyChooser.nextValue().intValue());
} else if (opType == 2) {
return insertUrlMap.get(insertKeyChooser.nextValue().intValue());
} else if (opType == 3) {
return deleteUrlMap.get(deleteKeyChooser.nextValue().intValue());
} else {
return updateUrlMap.get(updateKeyChooser.nextValue().intValue());
}
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/RestWorkload.java | RestWorkload | doTransactionRead | class RestWorkload extends CoreWorkload {
/**
* The name of the property for the proportion of transactions that are
* delete.
*/
public static final String DELETE_PROPORTION_PROPERTY = "deleteproportion";
/**
* The default proportion of transactions that are delete.
*/
public static final String DELETE_PROPORTION_PROPERTY_DEFAULT = "0.00";
/**
* The name of the property for the file that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY = "fieldlengthdistfile";
/**
* The default file name that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY_DEFAULT = "fieldLengthDistFile.txt";
/**
* In web services even though the CRUD operations follow the same request
* distribution, they have different traces and distribution parameter
* values. Hence configuring the parameters of these operations separately
* makes the benchmark more flexible and capable of generating better
* realistic workloads.
*/
// Read related properties.
private static final String READ_TRACE_FILE = "url.trace.read";
private static final String READ_TRACE_FILE_DEFAULT = "readtrace.txt";
private static final String READ_ZIPFIAN_CONSTANT = "readzipfconstant";
private static final String READ_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String READ_RECORD_COUNT_PROPERTY = "readrecordcount";
// Insert related properties.
private static final String INSERT_TRACE_FILE = "url.trace.insert";
private static final String INSERT_TRACE_FILE_DEFAULT = "inserttrace.txt";
private static final String INSERT_ZIPFIAN_CONSTANT = "insertzipfconstant";
private static final String INSERT_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT = "insertsizezipfconstant";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_RECORD_COUNT_PROPERTY = "insertrecordcount";
// Delete related properties.
private static final String DELETE_TRACE_FILE = "url.trace.delete";
private static final String DELETE_TRACE_FILE_DEFAULT = "deletetrace.txt";
private static final String DELETE_ZIPFIAN_CONSTANT = "deletezipfconstant";
private static final String DELETE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String DELETE_RECORD_COUNT_PROPERTY = "deleterecordcount";
// Delete related properties.
private static final String UPDATE_TRACE_FILE = "url.trace.update";
private static final String UPDATE_TRACE_FILE_DEFAULT = "updatetrace.txt";
private static final String UPDATE_ZIPFIAN_CONSTANT = "updatezipfconstant";
private static final String UPDATE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String UPDATE_RECORD_COUNT_PROPERTY = "updaterecordcount";
private Map<Integer, String> readUrlMap;
private Map<Integer, String> insertUrlMap;
private Map<Integer, String> deleteUrlMap;
private Map<Integer, String> updateUrlMap;
private int readRecordCount;
private int insertRecordCount;
private int deleteRecordCount;
private int updateRecordCount;
private NumberGenerator readKeyChooser;
private NumberGenerator insertKeyChooser;
private NumberGenerator deleteKeyChooser;
private NumberGenerator updateKeyChooser;
private NumberGenerator fieldlengthgenerator;
private DiscreteGenerator operationchooser;
@Override
public void init(Properties p) throws WorkloadException {
readRecordCount = Integer.parseInt(p.getProperty(READ_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
insertRecordCount = Integer
.parseInt(p.getProperty(INSERT_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
deleteRecordCount = Integer
.parseInt(p.getProperty(DELETE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
updateRecordCount = Integer
.parseInt(p.getProperty(UPDATE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
readUrlMap = getTrace(p.getProperty(READ_TRACE_FILE, READ_TRACE_FILE_DEFAULT), readRecordCount);
insertUrlMap = getTrace(p.getProperty(INSERT_TRACE_FILE, INSERT_TRACE_FILE_DEFAULT), insertRecordCount);
deleteUrlMap = getTrace(p.getProperty(DELETE_TRACE_FILE, DELETE_TRACE_FILE_DEFAULT), deleteRecordCount);
updateUrlMap = getTrace(p.getProperty(UPDATE_TRACE_FILE, UPDATE_TRACE_FILE_DEFAULT), updateRecordCount);
operationchooser = createOperationGenerator(p);
// Common distribution for all operations.
String requestDistrib = p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
double readZipfconstant = Double.parseDouble(p.getProperty(READ_ZIPFIAN_CONSTANT, READ_ZIPFIAN_CONSTANT_DEAFULT));
readKeyChooser = getKeyChooser(requestDistrib, readUrlMap.size(), readZipfconstant, p);
double updateZipfconstant = Double
.parseDouble(p.getProperty(UPDATE_ZIPFIAN_CONSTANT, UPDATE_ZIPFIAN_CONSTANT_DEAFULT));
updateKeyChooser = getKeyChooser(requestDistrib, updateUrlMap.size(), updateZipfconstant, p);
double insertZipfconstant = Double
.parseDouble(p.getProperty(INSERT_ZIPFIAN_CONSTANT, INSERT_ZIPFIAN_CONSTANT_DEAFULT));
insertKeyChooser = getKeyChooser(requestDistrib, insertUrlMap.size(), insertZipfconstant, p);
double deleteZipfconstant = Double
.parseDouble(p.getProperty(DELETE_ZIPFIAN_CONSTANT, DELETE_ZIPFIAN_CONSTANT_DEAFULT));
deleteKeyChooser = getKeyChooser(requestDistrib, deleteUrlMap.size(), deleteZipfconstant, p);
fieldlengthgenerator = getFieldLengthGenerator(p);
}
public static DiscreteGenerator createOperationGenerator(final Properties p) {
// Re-using CoreWorkload method.
final DiscreteGenerator operationChooser = CoreWorkload.createOperationGenerator(p);
// Needs special handling for delete operations not supported in CoreWorkload.
double deleteproportion = Double
.parseDouble(p.getProperty(DELETE_PROPORTION_PROPERTY, DELETE_PROPORTION_PROPERTY_DEFAULT));
if (deleteproportion > 0) {
operationChooser.addValue(deleteproportion, "DELETE");
}
return operationChooser;
}
private static NumberGenerator getKeyChooser(String requestDistrib, int recordCount, double zipfContant,
Properties p) throws WorkloadException {
NumberGenerator keychooser;
switch (requestDistrib) {
case "exponential":
double percentile = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordCount * frac);
break;
case "uniform":
keychooser = new UniformLongGenerator(0, recordCount - 1);
break;
case "zipfian":
keychooser = new ZipfianGenerator(recordCount, zipfContant);
break;
case "latest":
throw new WorkloadException("Latest request distribution is not supported for RestWorkload.");
case "hotspot":
double hotsetfraction = Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction = Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(0, recordCount - 1, hotsetfraction, hotopnfraction);
break;
default:
throw new WorkloadException("Unknown request distribution \"" + requestDistrib + "\"");
}
return keychooser;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
// Re-using CoreWorkload method.
NumberGenerator fieldLengthGenerator = CoreWorkload.getFieldLengthGenerator(p);
String fieldlengthdistribution = p.getProperty(FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
// Needs special handling for Zipfian distribution for variable Zipf Constant.
if (fieldlengthdistribution.compareTo("zipfian") == 0) {
int fieldlength = Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
double insertsizezipfconstant = Double
.parseDouble(p.getProperty(INSERT_SIZE_ZIPFIAN_CONSTANT, INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT));
fieldLengthGenerator = new ZipfianGenerator(1, fieldlength, insertsizezipfconstant);
}
return fieldLengthGenerator;
}
/**
* Reads the trace file and returns a URL map.
*/
private static Map<Integer, String> getTrace(String filePath, int recordCount)
throws WorkloadException {
Map<Integer, String> urlMap = new HashMap<Integer, String>();
int count = 0;
String line;
try {
FileReader inputFile = new FileReader(filePath);
BufferedReader bufferReader = new BufferedReader(inputFile);
while ((line = bufferReader.readLine()) != null) {
urlMap.put(count++, line.trim());
if (count >= recordCount) {
break;
}
}
bufferReader.close();
} catch (IOException e) {
throw new WorkloadException(
"Error while reading the trace. Please make sure the trace file path is correct. "
+ e.getLocalizedMessage());
}
return urlMap;
}
/**
* Not required for Rest Clients as data population is service specific.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
return false;
}
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if (operation == null) {
return false;
}
switch (operation) {
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "DELETE":
doTransactionDelete(db);
break;
default:
doTransactionRead(db);
}
return true;
}
/**
* Returns next URL to be called.
*/
private String getNextURL(int opType) {
if (opType == 1) {
return readUrlMap.get(readKeyChooser.nextValue().intValue());
} else if (opType == 2) {
return insertUrlMap.get(insertKeyChooser.nextValue().intValue());
} else if (opType == 3) {
return deleteUrlMap.get(deleteKeyChooser.nextValue().intValue());
} else {
return updateUrlMap.get(updateKeyChooser.nextValue().intValue());
}
}
@Override
public void doTransactionRead(DB db) {<FILL_FUNCTION_BODY>}
@Override
public void doTransactionInsert(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of insert data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.insert(null, getNextURL(2), value);
}
public void doTransactionDelete(DB db) {
db.delete(null, getNextURL(3));
}
@Override
public void doTransactionUpdate(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of update data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.update(null, getNextURL(4), value);
}
} |
HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
db.read(null, getNextURL(1), null, result);
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/RestWorkload.java | RestWorkload | doTransactionInsert | class RestWorkload extends CoreWorkload {
/**
* The name of the property for the proportion of transactions that are
* delete.
*/
public static final String DELETE_PROPORTION_PROPERTY = "deleteproportion";
/**
* The default proportion of transactions that are delete.
*/
public static final String DELETE_PROPORTION_PROPERTY_DEFAULT = "0.00";
/**
* The name of the property for the file that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY = "fieldlengthdistfile";
/**
* The default file name that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY_DEFAULT = "fieldLengthDistFile.txt";
/**
* In web services even though the CRUD operations follow the same request
* distribution, they have different traces and distribution parameter
* values. Hence configuring the parameters of these operations separately
* makes the benchmark more flexible and capable of generating better
* realistic workloads.
*/
// Read related properties.
private static final String READ_TRACE_FILE = "url.trace.read";
private static final String READ_TRACE_FILE_DEFAULT = "readtrace.txt";
private static final String READ_ZIPFIAN_CONSTANT = "readzipfconstant";
private static final String READ_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String READ_RECORD_COUNT_PROPERTY = "readrecordcount";
// Insert related properties.
private static final String INSERT_TRACE_FILE = "url.trace.insert";
private static final String INSERT_TRACE_FILE_DEFAULT = "inserttrace.txt";
private static final String INSERT_ZIPFIAN_CONSTANT = "insertzipfconstant";
private static final String INSERT_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT = "insertsizezipfconstant";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_RECORD_COUNT_PROPERTY = "insertrecordcount";
// Delete related properties.
private static final String DELETE_TRACE_FILE = "url.trace.delete";
private static final String DELETE_TRACE_FILE_DEFAULT = "deletetrace.txt";
private static final String DELETE_ZIPFIAN_CONSTANT = "deletezipfconstant";
private static final String DELETE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String DELETE_RECORD_COUNT_PROPERTY = "deleterecordcount";
// Delete related properties.
private static final String UPDATE_TRACE_FILE = "url.trace.update";
private static final String UPDATE_TRACE_FILE_DEFAULT = "updatetrace.txt";
private static final String UPDATE_ZIPFIAN_CONSTANT = "updatezipfconstant";
private static final String UPDATE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String UPDATE_RECORD_COUNT_PROPERTY = "updaterecordcount";
private Map<Integer, String> readUrlMap;
private Map<Integer, String> insertUrlMap;
private Map<Integer, String> deleteUrlMap;
private Map<Integer, String> updateUrlMap;
private int readRecordCount;
private int insertRecordCount;
private int deleteRecordCount;
private int updateRecordCount;
private NumberGenerator readKeyChooser;
private NumberGenerator insertKeyChooser;
private NumberGenerator deleteKeyChooser;
private NumberGenerator updateKeyChooser;
private NumberGenerator fieldlengthgenerator;
private DiscreteGenerator operationchooser;
@Override
public void init(Properties p) throws WorkloadException {
readRecordCount = Integer.parseInt(p.getProperty(READ_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
insertRecordCount = Integer
.parseInt(p.getProperty(INSERT_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
deleteRecordCount = Integer
.parseInt(p.getProperty(DELETE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
updateRecordCount = Integer
.parseInt(p.getProperty(UPDATE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
readUrlMap = getTrace(p.getProperty(READ_TRACE_FILE, READ_TRACE_FILE_DEFAULT), readRecordCount);
insertUrlMap = getTrace(p.getProperty(INSERT_TRACE_FILE, INSERT_TRACE_FILE_DEFAULT), insertRecordCount);
deleteUrlMap = getTrace(p.getProperty(DELETE_TRACE_FILE, DELETE_TRACE_FILE_DEFAULT), deleteRecordCount);
updateUrlMap = getTrace(p.getProperty(UPDATE_TRACE_FILE, UPDATE_TRACE_FILE_DEFAULT), updateRecordCount);
operationchooser = createOperationGenerator(p);
// Common distribution for all operations.
String requestDistrib = p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
double readZipfconstant = Double.parseDouble(p.getProperty(READ_ZIPFIAN_CONSTANT, READ_ZIPFIAN_CONSTANT_DEAFULT));
readKeyChooser = getKeyChooser(requestDistrib, readUrlMap.size(), readZipfconstant, p);
double updateZipfconstant = Double
.parseDouble(p.getProperty(UPDATE_ZIPFIAN_CONSTANT, UPDATE_ZIPFIAN_CONSTANT_DEAFULT));
updateKeyChooser = getKeyChooser(requestDistrib, updateUrlMap.size(), updateZipfconstant, p);
double insertZipfconstant = Double
.parseDouble(p.getProperty(INSERT_ZIPFIAN_CONSTANT, INSERT_ZIPFIAN_CONSTANT_DEAFULT));
insertKeyChooser = getKeyChooser(requestDistrib, insertUrlMap.size(), insertZipfconstant, p);
double deleteZipfconstant = Double
.parseDouble(p.getProperty(DELETE_ZIPFIAN_CONSTANT, DELETE_ZIPFIAN_CONSTANT_DEAFULT));
deleteKeyChooser = getKeyChooser(requestDistrib, deleteUrlMap.size(), deleteZipfconstant, p);
fieldlengthgenerator = getFieldLengthGenerator(p);
}
public static DiscreteGenerator createOperationGenerator(final Properties p) {
// Re-using CoreWorkload method.
final DiscreteGenerator operationChooser = CoreWorkload.createOperationGenerator(p);
// Needs special handling for delete operations not supported in CoreWorkload.
double deleteproportion = Double
.parseDouble(p.getProperty(DELETE_PROPORTION_PROPERTY, DELETE_PROPORTION_PROPERTY_DEFAULT));
if (deleteproportion > 0) {
operationChooser.addValue(deleteproportion, "DELETE");
}
return operationChooser;
}
private static NumberGenerator getKeyChooser(String requestDistrib, int recordCount, double zipfContant,
Properties p) throws WorkloadException {
NumberGenerator keychooser;
switch (requestDistrib) {
case "exponential":
double percentile = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordCount * frac);
break;
case "uniform":
keychooser = new UniformLongGenerator(0, recordCount - 1);
break;
case "zipfian":
keychooser = new ZipfianGenerator(recordCount, zipfContant);
break;
case "latest":
throw new WorkloadException("Latest request distribution is not supported for RestWorkload.");
case "hotspot":
double hotsetfraction = Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction = Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(0, recordCount - 1, hotsetfraction, hotopnfraction);
break;
default:
throw new WorkloadException("Unknown request distribution \"" + requestDistrib + "\"");
}
return keychooser;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
// Re-using CoreWorkload method.
NumberGenerator fieldLengthGenerator = CoreWorkload.getFieldLengthGenerator(p);
String fieldlengthdistribution = p.getProperty(FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
// Needs special handling for Zipfian distribution for variable Zipf Constant.
if (fieldlengthdistribution.compareTo("zipfian") == 0) {
int fieldlength = Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
double insertsizezipfconstant = Double
.parseDouble(p.getProperty(INSERT_SIZE_ZIPFIAN_CONSTANT, INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT));
fieldLengthGenerator = new ZipfianGenerator(1, fieldlength, insertsizezipfconstant);
}
return fieldLengthGenerator;
}
/**
* Reads the trace file and returns a URL map.
*/
private static Map<Integer, String> getTrace(String filePath, int recordCount)
throws WorkloadException {
Map<Integer, String> urlMap = new HashMap<Integer, String>();
int count = 0;
String line;
try {
FileReader inputFile = new FileReader(filePath);
BufferedReader bufferReader = new BufferedReader(inputFile);
while ((line = bufferReader.readLine()) != null) {
urlMap.put(count++, line.trim());
if (count >= recordCount) {
break;
}
}
bufferReader.close();
} catch (IOException e) {
throw new WorkloadException(
"Error while reading the trace. Please make sure the trace file path is correct. "
+ e.getLocalizedMessage());
}
return urlMap;
}
/**
* Not required for Rest Clients as data population is service specific.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
return false;
}
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if (operation == null) {
return false;
}
switch (operation) {
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "DELETE":
doTransactionDelete(db);
break;
default:
doTransactionRead(db);
}
return true;
}
/**
* Returns next URL to be called.
*/
private String getNextURL(int opType) {
if (opType == 1) {
return readUrlMap.get(readKeyChooser.nextValue().intValue());
} else if (opType == 2) {
return insertUrlMap.get(insertKeyChooser.nextValue().intValue());
} else if (opType == 3) {
return deleteUrlMap.get(deleteKeyChooser.nextValue().intValue());
} else {
return updateUrlMap.get(updateKeyChooser.nextValue().intValue());
}
}
@Override
public void doTransactionRead(DB db) {
HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
db.read(null, getNextURL(1), null, result);
}
@Override
public void doTransactionInsert(DB db) {<FILL_FUNCTION_BODY>}
public void doTransactionDelete(DB db) {
db.delete(null, getNextURL(3));
}
@Override
public void doTransactionUpdate(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of update data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.update(null, getNextURL(4), value);
}
} |
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of insert data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.insert(null, getNextURL(2), value);
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/RestWorkload.java | RestWorkload | doTransactionDelete | class RestWorkload extends CoreWorkload {
/**
* The name of the property for the proportion of transactions that are
* delete.
*/
public static final String DELETE_PROPORTION_PROPERTY = "deleteproportion";
/**
* The default proportion of transactions that are delete.
*/
public static final String DELETE_PROPORTION_PROPERTY_DEFAULT = "0.00";
/**
* The name of the property for the file that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY = "fieldlengthdistfile";
/**
* The default file name that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY_DEFAULT = "fieldLengthDistFile.txt";
/**
* In web services even though the CRUD operations follow the same request
* distribution, they have different traces and distribution parameter
* values. Hence configuring the parameters of these operations separately
* makes the benchmark more flexible and capable of generating better
* realistic workloads.
*/
// Read related properties.
private static final String READ_TRACE_FILE = "url.trace.read";
private static final String READ_TRACE_FILE_DEFAULT = "readtrace.txt";
private static final String READ_ZIPFIAN_CONSTANT = "readzipfconstant";
private static final String READ_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String READ_RECORD_COUNT_PROPERTY = "readrecordcount";
// Insert related properties.
private static final String INSERT_TRACE_FILE = "url.trace.insert";
private static final String INSERT_TRACE_FILE_DEFAULT = "inserttrace.txt";
private static final String INSERT_ZIPFIAN_CONSTANT = "insertzipfconstant";
private static final String INSERT_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT = "insertsizezipfconstant";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_RECORD_COUNT_PROPERTY = "insertrecordcount";
// Delete related properties.
private static final String DELETE_TRACE_FILE = "url.trace.delete";
private static final String DELETE_TRACE_FILE_DEFAULT = "deletetrace.txt";
private static final String DELETE_ZIPFIAN_CONSTANT = "deletezipfconstant";
private static final String DELETE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String DELETE_RECORD_COUNT_PROPERTY = "deleterecordcount";
// Delete related properties.
private static final String UPDATE_TRACE_FILE = "url.trace.update";
private static final String UPDATE_TRACE_FILE_DEFAULT = "updatetrace.txt";
private static final String UPDATE_ZIPFIAN_CONSTANT = "updatezipfconstant";
private static final String UPDATE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String UPDATE_RECORD_COUNT_PROPERTY = "updaterecordcount";
private Map<Integer, String> readUrlMap;
private Map<Integer, String> insertUrlMap;
private Map<Integer, String> deleteUrlMap;
private Map<Integer, String> updateUrlMap;
private int readRecordCount;
private int insertRecordCount;
private int deleteRecordCount;
private int updateRecordCount;
private NumberGenerator readKeyChooser;
private NumberGenerator insertKeyChooser;
private NumberGenerator deleteKeyChooser;
private NumberGenerator updateKeyChooser;
private NumberGenerator fieldlengthgenerator;
private DiscreteGenerator operationchooser;
@Override
public void init(Properties p) throws WorkloadException {
readRecordCount = Integer.parseInt(p.getProperty(READ_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
insertRecordCount = Integer
.parseInt(p.getProperty(INSERT_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
deleteRecordCount = Integer
.parseInt(p.getProperty(DELETE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
updateRecordCount = Integer
.parseInt(p.getProperty(UPDATE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
readUrlMap = getTrace(p.getProperty(READ_TRACE_FILE, READ_TRACE_FILE_DEFAULT), readRecordCount);
insertUrlMap = getTrace(p.getProperty(INSERT_TRACE_FILE, INSERT_TRACE_FILE_DEFAULT), insertRecordCount);
deleteUrlMap = getTrace(p.getProperty(DELETE_TRACE_FILE, DELETE_TRACE_FILE_DEFAULT), deleteRecordCount);
updateUrlMap = getTrace(p.getProperty(UPDATE_TRACE_FILE, UPDATE_TRACE_FILE_DEFAULT), updateRecordCount);
operationchooser = createOperationGenerator(p);
// Common distribution for all operations.
String requestDistrib = p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
double readZipfconstant = Double.parseDouble(p.getProperty(READ_ZIPFIAN_CONSTANT, READ_ZIPFIAN_CONSTANT_DEAFULT));
readKeyChooser = getKeyChooser(requestDistrib, readUrlMap.size(), readZipfconstant, p);
double updateZipfconstant = Double
.parseDouble(p.getProperty(UPDATE_ZIPFIAN_CONSTANT, UPDATE_ZIPFIAN_CONSTANT_DEAFULT));
updateKeyChooser = getKeyChooser(requestDistrib, updateUrlMap.size(), updateZipfconstant, p);
double insertZipfconstant = Double
.parseDouble(p.getProperty(INSERT_ZIPFIAN_CONSTANT, INSERT_ZIPFIAN_CONSTANT_DEAFULT));
insertKeyChooser = getKeyChooser(requestDistrib, insertUrlMap.size(), insertZipfconstant, p);
double deleteZipfconstant = Double
.parseDouble(p.getProperty(DELETE_ZIPFIAN_CONSTANT, DELETE_ZIPFIAN_CONSTANT_DEAFULT));
deleteKeyChooser = getKeyChooser(requestDistrib, deleteUrlMap.size(), deleteZipfconstant, p);
fieldlengthgenerator = getFieldLengthGenerator(p);
}
public static DiscreteGenerator createOperationGenerator(final Properties p) {
// Re-using CoreWorkload method.
final DiscreteGenerator operationChooser = CoreWorkload.createOperationGenerator(p);
// Needs special handling for delete operations not supported in CoreWorkload.
double deleteproportion = Double
.parseDouble(p.getProperty(DELETE_PROPORTION_PROPERTY, DELETE_PROPORTION_PROPERTY_DEFAULT));
if (deleteproportion > 0) {
operationChooser.addValue(deleteproportion, "DELETE");
}
return operationChooser;
}
private static NumberGenerator getKeyChooser(String requestDistrib, int recordCount, double zipfContant,
Properties p) throws WorkloadException {
NumberGenerator keychooser;
switch (requestDistrib) {
case "exponential":
double percentile = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordCount * frac);
break;
case "uniform":
keychooser = new UniformLongGenerator(0, recordCount - 1);
break;
case "zipfian":
keychooser = new ZipfianGenerator(recordCount, zipfContant);
break;
case "latest":
throw new WorkloadException("Latest request distribution is not supported for RestWorkload.");
case "hotspot":
double hotsetfraction = Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction = Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(0, recordCount - 1, hotsetfraction, hotopnfraction);
break;
default:
throw new WorkloadException("Unknown request distribution \"" + requestDistrib + "\"");
}
return keychooser;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
// Re-using CoreWorkload method.
NumberGenerator fieldLengthGenerator = CoreWorkload.getFieldLengthGenerator(p);
String fieldlengthdistribution = p.getProperty(FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
// Needs special handling for Zipfian distribution for variable Zipf Constant.
if (fieldlengthdistribution.compareTo("zipfian") == 0) {
int fieldlength = Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
double insertsizezipfconstant = Double
.parseDouble(p.getProperty(INSERT_SIZE_ZIPFIAN_CONSTANT, INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT));
fieldLengthGenerator = new ZipfianGenerator(1, fieldlength, insertsizezipfconstant);
}
return fieldLengthGenerator;
}
/**
* Reads the trace file and returns a URL map.
*/
private static Map<Integer, String> getTrace(String filePath, int recordCount)
throws WorkloadException {
Map<Integer, String> urlMap = new HashMap<Integer, String>();
int count = 0;
String line;
try {
FileReader inputFile = new FileReader(filePath);
BufferedReader bufferReader = new BufferedReader(inputFile);
while ((line = bufferReader.readLine()) != null) {
urlMap.put(count++, line.trim());
if (count >= recordCount) {
break;
}
}
bufferReader.close();
} catch (IOException e) {
throw new WorkloadException(
"Error while reading the trace. Please make sure the trace file path is correct. "
+ e.getLocalizedMessage());
}
return urlMap;
}
/**
* Not required for Rest Clients as data population is service specific.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
return false;
}
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if (operation == null) {
return false;
}
switch (operation) {
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "DELETE":
doTransactionDelete(db);
break;
default:
doTransactionRead(db);
}
return true;
}
/**
* Returns next URL to be called.
*/
private String getNextURL(int opType) {
if (opType == 1) {
return readUrlMap.get(readKeyChooser.nextValue().intValue());
} else if (opType == 2) {
return insertUrlMap.get(insertKeyChooser.nextValue().intValue());
} else if (opType == 3) {
return deleteUrlMap.get(deleteKeyChooser.nextValue().intValue());
} else {
return updateUrlMap.get(updateKeyChooser.nextValue().intValue());
}
}
@Override
public void doTransactionRead(DB db) {
HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
db.read(null, getNextURL(1), null, result);
}
@Override
public void doTransactionInsert(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of insert data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.insert(null, getNextURL(2), value);
}
public void doTransactionDelete(DB db) {<FILL_FUNCTION_BODY>}
@Override
public void doTransactionUpdate(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of update data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.update(null, getNextURL(4), value);
}
} |
db.delete(null, getNextURL(3));
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/RestWorkload.java | RestWorkload | doTransactionUpdate | class RestWorkload extends CoreWorkload {
/**
* The name of the property for the proportion of transactions that are
* delete.
*/
public static final String DELETE_PROPORTION_PROPERTY = "deleteproportion";
/**
* The default proportion of transactions that are delete.
*/
public static final String DELETE_PROPORTION_PROPERTY_DEFAULT = "0.00";
/**
* The name of the property for the file that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY = "fieldlengthdistfile";
/**
* The default file name that holds the field length size for insert operations.
*/
public static final String FIELD_LENGTH_DISTRIBUTION_FILE_PROPERTY_DEFAULT = "fieldLengthDistFile.txt";
/**
* In web services even though the CRUD operations follow the same request
* distribution, they have different traces and distribution parameter
* values. Hence configuring the parameters of these operations separately
* makes the benchmark more flexible and capable of generating better
* realistic workloads.
*/
// Read related properties.
private static final String READ_TRACE_FILE = "url.trace.read";
private static final String READ_TRACE_FILE_DEFAULT = "readtrace.txt";
private static final String READ_ZIPFIAN_CONSTANT = "readzipfconstant";
private static final String READ_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String READ_RECORD_COUNT_PROPERTY = "readrecordcount";
// Insert related properties.
private static final String INSERT_TRACE_FILE = "url.trace.insert";
private static final String INSERT_TRACE_FILE_DEFAULT = "inserttrace.txt";
private static final String INSERT_ZIPFIAN_CONSTANT = "insertzipfconstant";
private static final String INSERT_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT = "insertsizezipfconstant";
private static final String INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String INSERT_RECORD_COUNT_PROPERTY = "insertrecordcount";
// Delete related properties.
private static final String DELETE_TRACE_FILE = "url.trace.delete";
private static final String DELETE_TRACE_FILE_DEFAULT = "deletetrace.txt";
private static final String DELETE_ZIPFIAN_CONSTANT = "deletezipfconstant";
private static final String DELETE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String DELETE_RECORD_COUNT_PROPERTY = "deleterecordcount";
// Delete related properties.
private static final String UPDATE_TRACE_FILE = "url.trace.update";
private static final String UPDATE_TRACE_FILE_DEFAULT = "updatetrace.txt";
private static final String UPDATE_ZIPFIAN_CONSTANT = "updatezipfconstant";
private static final String UPDATE_ZIPFIAN_CONSTANT_DEAFULT = "0.99";
private static final String UPDATE_RECORD_COUNT_PROPERTY = "updaterecordcount";
private Map<Integer, String> readUrlMap;
private Map<Integer, String> insertUrlMap;
private Map<Integer, String> deleteUrlMap;
private Map<Integer, String> updateUrlMap;
private int readRecordCount;
private int insertRecordCount;
private int deleteRecordCount;
private int updateRecordCount;
private NumberGenerator readKeyChooser;
private NumberGenerator insertKeyChooser;
private NumberGenerator deleteKeyChooser;
private NumberGenerator updateKeyChooser;
private NumberGenerator fieldlengthgenerator;
private DiscreteGenerator operationchooser;
@Override
public void init(Properties p) throws WorkloadException {
readRecordCount = Integer.parseInt(p.getProperty(READ_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
insertRecordCount = Integer
.parseInt(p.getProperty(INSERT_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
deleteRecordCount = Integer
.parseInt(p.getProperty(DELETE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
updateRecordCount = Integer
.parseInt(p.getProperty(UPDATE_RECORD_COUNT_PROPERTY, String.valueOf(Integer.MAX_VALUE)));
readUrlMap = getTrace(p.getProperty(READ_TRACE_FILE, READ_TRACE_FILE_DEFAULT), readRecordCount);
insertUrlMap = getTrace(p.getProperty(INSERT_TRACE_FILE, INSERT_TRACE_FILE_DEFAULT), insertRecordCount);
deleteUrlMap = getTrace(p.getProperty(DELETE_TRACE_FILE, DELETE_TRACE_FILE_DEFAULT), deleteRecordCount);
updateUrlMap = getTrace(p.getProperty(UPDATE_TRACE_FILE, UPDATE_TRACE_FILE_DEFAULT), updateRecordCount);
operationchooser = createOperationGenerator(p);
// Common distribution for all operations.
String requestDistrib = p.getProperty(REQUEST_DISTRIBUTION_PROPERTY, REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
double readZipfconstant = Double.parseDouble(p.getProperty(READ_ZIPFIAN_CONSTANT, READ_ZIPFIAN_CONSTANT_DEAFULT));
readKeyChooser = getKeyChooser(requestDistrib, readUrlMap.size(), readZipfconstant, p);
double updateZipfconstant = Double
.parseDouble(p.getProperty(UPDATE_ZIPFIAN_CONSTANT, UPDATE_ZIPFIAN_CONSTANT_DEAFULT));
updateKeyChooser = getKeyChooser(requestDistrib, updateUrlMap.size(), updateZipfconstant, p);
double insertZipfconstant = Double
.parseDouble(p.getProperty(INSERT_ZIPFIAN_CONSTANT, INSERT_ZIPFIAN_CONSTANT_DEAFULT));
insertKeyChooser = getKeyChooser(requestDistrib, insertUrlMap.size(), insertZipfconstant, p);
double deleteZipfconstant = Double
.parseDouble(p.getProperty(DELETE_ZIPFIAN_CONSTANT, DELETE_ZIPFIAN_CONSTANT_DEAFULT));
deleteKeyChooser = getKeyChooser(requestDistrib, deleteUrlMap.size(), deleteZipfconstant, p);
fieldlengthgenerator = getFieldLengthGenerator(p);
}
public static DiscreteGenerator createOperationGenerator(final Properties p) {
// Re-using CoreWorkload method.
final DiscreteGenerator operationChooser = CoreWorkload.createOperationGenerator(p);
// Needs special handling for delete operations not supported in CoreWorkload.
double deleteproportion = Double
.parseDouble(p.getProperty(DELETE_PROPORTION_PROPERTY, DELETE_PROPORTION_PROPERTY_DEFAULT));
if (deleteproportion > 0) {
operationChooser.addValue(deleteproportion, "DELETE");
}
return operationChooser;
}
private static NumberGenerator getKeyChooser(String requestDistrib, int recordCount, double zipfContant,
Properties p) throws WorkloadException {
NumberGenerator keychooser;
switch (requestDistrib) {
case "exponential":
double percentile = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_PERCENTILE_PROPERTY,
ExponentialGenerator.EXPONENTIAL_PERCENTILE_DEFAULT));
double frac = Double.parseDouble(p.getProperty(ExponentialGenerator.EXPONENTIAL_FRAC_PROPERTY,
ExponentialGenerator.EXPONENTIAL_FRAC_DEFAULT));
keychooser = new ExponentialGenerator(percentile, recordCount * frac);
break;
case "uniform":
keychooser = new UniformLongGenerator(0, recordCount - 1);
break;
case "zipfian":
keychooser = new ZipfianGenerator(recordCount, zipfContant);
break;
case "latest":
throw new WorkloadException("Latest request distribution is not supported for RestWorkload.");
case "hotspot":
double hotsetfraction = Double.parseDouble(p.getProperty(HOTSPOT_DATA_FRACTION, HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction = Double.parseDouble(p.getProperty(HOTSPOT_OPN_FRACTION, HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(0, recordCount - 1, hotsetfraction, hotopnfraction);
break;
default:
throw new WorkloadException("Unknown request distribution \"" + requestDistrib + "\"");
}
return keychooser;
}
protected static NumberGenerator getFieldLengthGenerator(Properties p) throws WorkloadException {
// Re-using CoreWorkload method.
NumberGenerator fieldLengthGenerator = CoreWorkload.getFieldLengthGenerator(p);
String fieldlengthdistribution = p.getProperty(FIELD_LENGTH_DISTRIBUTION_PROPERTY,
FIELD_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
// Needs special handling for Zipfian distribution for variable Zipf Constant.
if (fieldlengthdistribution.compareTo("zipfian") == 0) {
int fieldlength = Integer.parseInt(p.getProperty(FIELD_LENGTH_PROPERTY, FIELD_LENGTH_PROPERTY_DEFAULT));
double insertsizezipfconstant = Double
.parseDouble(p.getProperty(INSERT_SIZE_ZIPFIAN_CONSTANT, INSERT_SIZE_ZIPFIAN_CONSTANT_DEAFULT));
fieldLengthGenerator = new ZipfianGenerator(1, fieldlength, insertsizezipfconstant);
}
return fieldLengthGenerator;
}
/**
* Reads the trace file and returns a URL map.
*/
private static Map<Integer, String> getTrace(String filePath, int recordCount)
throws WorkloadException {
Map<Integer, String> urlMap = new HashMap<Integer, String>();
int count = 0;
String line;
try {
FileReader inputFile = new FileReader(filePath);
BufferedReader bufferReader = new BufferedReader(inputFile);
while ((line = bufferReader.readLine()) != null) {
urlMap.put(count++, line.trim());
if (count >= recordCount) {
break;
}
}
bufferReader.close();
} catch (IOException e) {
throw new WorkloadException(
"Error while reading the trace. Please make sure the trace file path is correct. "
+ e.getLocalizedMessage());
}
return urlMap;
}
/**
* Not required for Rest Clients as data population is service specific.
*/
@Override
public boolean doInsert(DB db, Object threadstate) {
return false;
}
@Override
public boolean doTransaction(DB db, Object threadstate) {
String operation = operationchooser.nextString();
if (operation == null) {
return false;
}
switch (operation) {
case "UPDATE":
doTransactionUpdate(db);
break;
case "INSERT":
doTransactionInsert(db);
break;
case "DELETE":
doTransactionDelete(db);
break;
default:
doTransactionRead(db);
}
return true;
}
/**
* Returns next URL to be called.
*/
private String getNextURL(int opType) {
if (opType == 1) {
return readUrlMap.get(readKeyChooser.nextValue().intValue());
} else if (opType == 2) {
return insertUrlMap.get(insertKeyChooser.nextValue().intValue());
} else if (opType == 3) {
return deleteUrlMap.get(deleteKeyChooser.nextValue().intValue());
} else {
return updateUrlMap.get(updateKeyChooser.nextValue().intValue());
}
}
@Override
public void doTransactionRead(DB db) {
HashMap<String, ByteIterator> result = new HashMap<String, ByteIterator>();
db.read(null, getNextURL(1), null, result);
}
@Override
public void doTransactionInsert(DB db) {
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of insert data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.insert(null, getNextURL(2), value);
}
public void doTransactionDelete(DB db) {
db.delete(null, getNextURL(3));
}
@Override
public void doTransactionUpdate(DB db) {<FILL_FUNCTION_BODY>}
} |
HashMap<String, ByteIterator> value = new HashMap<String, ByteIterator>();
// Create random bytes of update data with a specific size.
value.put("data", new RandomByteIterator(fieldlengthgenerator.nextValue().longValue()));
db.update(null, getNextURL(4), value);
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/TimeSeriesWorkload.java | TimeSeriesWorkload | fromString | class TimeSeriesWorkload extends Workload {
/**
* The types of values written to the timeseries store.
*/
public enum ValueType {
INTEGERS("integers"),
FLOATS("floats"),
MIXED("mixednumbers");
protected final String name;
ValueType(final String name) {
this.name = name;
}
public static ValueType fromString(final String name) {<FILL_FUNCTION_BODY>}
}
/** Name and default value for the timestamp key property. */
public static final String TIMESTAMP_KEY_PROPERTY = "timestampkey";
public static final String TIMESTAMP_KEY_PROPERTY_DEFAULT = "YCSBTS";
/** Name and default value for the value key property. */
public static final String VALUE_KEY_PROPERTY = "valuekey";
public static final String VALUE_KEY_PROPERTY_DEFAULT = "YCSBV";
/** Name and default value for the timestamp interval property. */
public static final String TIMESTAMP_INTERVAL_PROPERTY = "timestampinterval";
public static final String TIMESTAMP_INTERVAL_PROPERTY_DEFAULT = "60";
/** Name and default value for the timestamp units property. */
public static final String TIMESTAMP_UNITS_PROPERTY = "timestampunits";
public static final String TIMESTAMP_UNITS_PROPERTY_DEFAULT = "SECONDS";
/** Name and default value for the number of tags property. */
public static final String TAG_COUNT_PROPERTY = "tagcount";
public static final String TAG_COUNT_PROPERTY_DEFAULT = "4";
/** Name and default value for the tag value cardinality map property. */
public static final String TAG_CARDINALITY_PROPERTY = "tagcardinality";
public static final String TAG_CARDINALITY_PROPERTY_DEFAULT = "1, 2, 4, 8";
/** Name and default value for the tag key length property. */
public static final String TAG_KEY_LENGTH_PROPERTY = "tagkeylength";
public static final String TAG_KEY_LENGTH_PROPERTY_DEFAULT = "8";
/** Name and default value for the tag value length property. */
public static final String TAG_VALUE_LENGTH_PROPERTY = "tagvaluelength";
public static final String TAG_VALUE_LENGTH_PROPERTY_DEFAULT = "8";
/** Name and default value for the tag pair delimiter property. */
public static final String PAIR_DELIMITER_PROPERTY = "tagpairdelimiter";
public static final String PAIR_DELIMITER_PROPERTY_DEFAULT = "=";
/** Name and default value for the delete string delimiter property. */
public static final String DELETE_DELIMITER_PROPERTY = "deletedelimiter";
public static final String DELETE_DELIMITER_PROPERTY_DEFAULT = ":";
/** Name and default value for the random timestamp write order property. */
public static final String RANDOMIZE_TIMESTAMP_ORDER_PROPERTY = "randomwritetimestamporder";
public static final String RANDOMIZE_TIMESTAMP_ORDER_PROPERTY_DEFAULT = "false";
/** Name and default value for the random time series write order property. */
public static final String RANDOMIZE_TIMESERIES_ORDER_PROPERTY = "randomtimeseriesorder";
public static final String RANDOMIZE_TIMESERIES_ORDER_PROPERTY_DEFAULT = "true";
/** Name and default value for the value types property. */
public static final String VALUE_TYPE_PROPERTY = "valuetype";
public static final String VALUE_TYPE_PROPERTY_DEFAULT = "floats";
/** Name and default value for the sparsity property. */
public static final String SPARSITY_PROPERTY = "sparsity";
public static final String SPARSITY_PROPERTY_DEFAULT = "0.00";
/** Name and default value for the delayed series percentage property. */
public static final String DELAYED_SERIES_PROPERTY = "delayedseries";
public static final String DELAYED_SERIES_PROPERTY_DEFAULT = "0.10";
/** Name and default value for the delayed series intervals property. */
public static final String DELAYED_INTERVALS_PROPERTY = "delayedintervals";
public static final String DELAYED_INTERVALS_PROPERTY_DEFAULT = "5";
/** Name and default value for the query time span property. */
public static final String QUERY_TIMESPAN_PROPERTY = "querytimespan";
public static final String QUERY_TIMESPAN_PROPERTY_DEFAULT = "0";
/** Name and default value for the randomized query time span property. */
public static final String QUERY_RANDOM_TIMESPAN_PROPERTY = "queryrandomtimespan";
public static final String QUERY_RANDOM_TIMESPAN_PROPERTY_DEFAULT = "false";
/** Name and default value for the query time stamp delimiter property. */
public static final String QUERY_TIMESPAN_DELIMITER_PROPERTY = "querytimespandelimiter";
public static final String QUERY_TIMESPAN_DELIMITER_PROPERTY_DEFAULT = ",";
/** Name and default value for the group-by key property. */
public static final String GROUPBY_KEY_PROPERTY = "groupbykey";
public static final String GROUPBY_KEY_PROPERTY_DEFAULT = "YCSBGB";
/** Name and default value for the group-by function property. */
public static final String GROUPBY_PROPERTY = "groupbyfunction";
/** Name and default value for the group-by key map property. */
public static final String GROUPBY_KEYS_PROPERTY = "groupbykeys";
/** Name and default value for the downsampling key property. */
public static final String DOWNSAMPLING_KEY_PROPERTY = "downsamplingkey";
public static final String DOWNSAMPLING_KEY_PROPERTY_DEFAULT = "YCSBDS";
/** Name and default value for the downsampling function property. */
public static final String DOWNSAMPLING_FUNCTION_PROPERTY = "downsamplingfunction";
/** Name and default value for the downsampling interval property. */
public static final String DOWNSAMPLING_INTERVAL_PROPERTY = "downsamplinginterval";
/** The properties to pull settings from. */
protected Properties properties;
/** Generators for keys, tag keys and tag values. */
protected Generator<String> keyGenerator;
protected Generator<String> tagKeyGenerator;
protected Generator<String> tagValueGenerator;
/** The timestamp key, defaults to "YCSBTS". */
protected String timestampKey;
/** The value key, defaults to "YCSBDS". */
protected String valueKey;
/** The number of time units in between timestamps. */
protected int timestampInterval;
/** The units of time the timestamp and various intervals represent. */
protected TimeUnit timeUnits;
/** Whether or not to randomize the timestamp order when writing. */
protected boolean randomizeTimestampOrder;
/** Whether or not to randomize (shuffle) the time series order. NOT compatible
* with data integrity. */
protected boolean randomizeTimeseriesOrder;
/** The type of values to generate when writing data. */
protected ValueType valueType;
/** Used to calculate an offset for each time series. */
protected int[] cumulativeCardinality;
/** The calculated total cardinality based on the config. */
protected int totalCardinality;
/** The calculated per-time-series-key cardinality. I.e. the number of unique
* tag key and value combinations. */
protected int perKeyCardinality;
/** How much data to scan for in each call. */
protected NumberGenerator scanlength;
/** A generator used to select a random time series key per read/scan. */
protected NumberGenerator keychooser;
/** A generator to select what operation to perform during the run phase. */
protected DiscreteGenerator operationchooser;
/** The maximum number of interval offsets from the starting timestamp. Calculated
* based on the number of records configured for the run. */
protected int maxOffsets;
/** The number of records or operations to perform for this run. */
protected int recordcount;
/** The number of tag pairs per time series. */
protected int tagPairs;
/** The table we'll write to. */
protected String table;
/** How many time series keys will be generated. */
protected int numKeys;
/** The generated list of possible time series key values. */
protected String[] keys;
/** The generated list of possible tag key values. */
protected String[] tagKeys;
/** The generated list of possible tag value values. */
protected String[] tagValues;
/** The cardinality for each tag key. */
protected int[] tagCardinality;
/** A helper to skip non-incrementing tag values. */
protected int firstIncrementableCardinality;
/** How sparse the data written should be. */
protected double sparsity;
/** The percentage of time series that should be delayed in writes. */
protected double delayedSeries;
/** The maximum number of intervals to delay a series. */
protected int delayedIntervals;
/** Optional query time interval during reads/scans. */
protected int queryTimeSpan;
/** Whether or not the actual interval should be randomly chosen, using
* queryTimeSpan as the maximum value. */
protected boolean queryRandomTimeSpan;
/** The delimiter for tag pairs in fields. */
protected String tagPairDelimiter;
/** The delimiter between parameters for the delete key. */
protected String deleteDelimiter;
/** The delimiter between timestamps for query time spans. */
protected String queryTimeSpanDelimiter;
/** Whether or not to issue group-by queries. */
protected boolean groupBy;
/** The key used for group-by tag keys. */
protected String groupByKey;
/** The function used for group-by's. */
protected String groupByFunction;
/** The tag keys to group on. */
protected boolean[] groupBys;
/** Whether or not to issue downsampling queries. */
protected boolean downsample;
/** The key used for downsampling tag keys. */
protected String downsampleKey;
/** The downsampling function. */
protected String downsampleFunction;
/** The downsampling interval. */
protected int downsampleInterval;
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
protected boolean dataintegrity;
/** Measurements to write data integrity results to. */
protected Measurements measurements = Measurements.getMeasurements();
@Override
public void init(final Properties p) throws WorkloadException {
properties = p;
recordcount =
Integer.parseInt(p.getProperty(Client.RECORD_COUNT_PROPERTY,
Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
timestampKey = p.getProperty(TIMESTAMP_KEY_PROPERTY, TIMESTAMP_KEY_PROPERTY_DEFAULT);
valueKey = p.getProperty(VALUE_KEY_PROPERTY, VALUE_KEY_PROPERTY_DEFAULT);
operationchooser = CoreWorkload.createOperationGenerator(properties);
final int maxscanlength =
Integer.parseInt(p.getProperty(CoreWorkload.MAX_SCAN_LENGTH_PROPERTY,
CoreWorkload.MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(CoreWorkload.SCAN_LENGTH_DISTRIBUTION_PROPERTY,
CoreWorkload.SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(1, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(1, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
randomizeTimestampOrder = Boolean.parseBoolean(p.getProperty(
RANDOMIZE_TIMESTAMP_ORDER_PROPERTY,
RANDOMIZE_TIMESTAMP_ORDER_PROPERTY_DEFAULT));
randomizeTimeseriesOrder = Boolean.parseBoolean(p.getProperty(
RANDOMIZE_TIMESERIES_ORDER_PROPERTY,
RANDOMIZE_TIMESERIES_ORDER_PROPERTY_DEFAULT));
// setup the cardinality
numKeys = Integer.parseInt(p.getProperty(CoreWorkload.FIELD_COUNT_PROPERTY,
CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT));
tagPairs = Integer.parseInt(p.getProperty(TAG_COUNT_PROPERTY,
TAG_COUNT_PROPERTY_DEFAULT));
sparsity = Double.parseDouble(p.getProperty(SPARSITY_PROPERTY, SPARSITY_PROPERTY_DEFAULT));
tagCardinality = new int[tagPairs];
final String requestdistrib =
p.getProperty(CoreWorkload.REQUEST_DISTRIBUTION_PROPERTY,
CoreWorkload.REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(0, numKeys - 1);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(0, numKeys - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
keychooser = new ScrambledZipfianGenerator(0, numKeys - 1);
//} else if (requestdistrib.compareTo("latest") == 0) {
// keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(CoreWorkload.HOTSPOT_DATA_FRACTION,
CoreWorkload.HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(CoreWorkload.HOTSPOT_OPN_FRACTION,
CoreWorkload.HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(0, numKeys - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
// figure out the start timestamp based on the units, cardinality and interval
try {
timestampInterval = Integer.parseInt(p.getProperty(
TIMESTAMP_INTERVAL_PROPERTY, TIMESTAMP_INTERVAL_PROPERTY_DEFAULT));
} catch (NumberFormatException nfe) {
throw new WorkloadException("Unable to parse the " +
TIMESTAMP_INTERVAL_PROPERTY, nfe);
}
try {
timeUnits = TimeUnit.valueOf(p.getProperty(TIMESTAMP_UNITS_PROPERTY,
TIMESTAMP_UNITS_PROPERTY_DEFAULT).toUpperCase());
} catch (IllegalArgumentException e) {
throw new WorkloadException("Unknown time unit type", e);
}
if (timeUnits == TimeUnit.NANOSECONDS || timeUnits == TimeUnit.MICROSECONDS) {
throw new WorkloadException("YCSB doesn't support " + timeUnits +
" at this time.");
}
tagPairDelimiter = p.getProperty(PAIR_DELIMITER_PROPERTY, PAIR_DELIMITER_PROPERTY_DEFAULT);
deleteDelimiter = p.getProperty(DELETE_DELIMITER_PROPERTY, DELETE_DELIMITER_PROPERTY_DEFAULT);
dataintegrity = Boolean.parseBoolean(
p.getProperty(CoreWorkload.DATA_INTEGRITY_PROPERTY,
CoreWorkload.DATA_INTEGRITY_PROPERTY_DEFAULT));
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
queryTimeSpan = Integer.parseInt(p.getProperty(QUERY_TIMESPAN_PROPERTY,
QUERY_TIMESPAN_PROPERTY_DEFAULT));
queryRandomTimeSpan = Boolean.parseBoolean(p.getProperty(QUERY_RANDOM_TIMESPAN_PROPERTY,
QUERY_RANDOM_TIMESPAN_PROPERTY_DEFAULT));
queryTimeSpanDelimiter = p.getProperty(QUERY_TIMESPAN_DELIMITER_PROPERTY,
QUERY_TIMESPAN_DELIMITER_PROPERTY_DEFAULT);
groupByKey = p.getProperty(GROUPBY_KEY_PROPERTY, GROUPBY_KEY_PROPERTY_DEFAULT);
groupByFunction = p.getProperty(GROUPBY_PROPERTY);
if (groupByFunction != null && !groupByFunction.isEmpty()) {
final String groupByKeys = p.getProperty(GROUPBY_KEYS_PROPERTY);
if (groupByKeys == null || groupByKeys.isEmpty()) {
throw new WorkloadException("Group by was enabled but no keys were specified.");
}
final String[] gbKeys = groupByKeys.split(",");
if (gbKeys.length != tagKeys.length) {
throw new WorkloadException("Only " + gbKeys.length + " group by keys "
+ "were specified but there were " + tagKeys.length + " tag keys given.");
}
groupBys = new boolean[gbKeys.length];
for (int i = 0; i < gbKeys.length; i++) {
groupBys[i] = Integer.parseInt(gbKeys[i].trim()) == 0 ? false : true;
}
groupBy = true;
}
downsampleKey = p.getProperty(DOWNSAMPLING_KEY_PROPERTY, DOWNSAMPLING_KEY_PROPERTY_DEFAULT);
downsampleFunction = p.getProperty(DOWNSAMPLING_FUNCTION_PROPERTY);
if (downsampleFunction != null && !downsampleFunction.isEmpty()) {
final String interval = p.getProperty(DOWNSAMPLING_INTERVAL_PROPERTY);
if (interval == null || interval.isEmpty()) {
throw new WorkloadException("'" + DOWNSAMPLING_INTERVAL_PROPERTY + "' was missing despite '"
+ DOWNSAMPLING_FUNCTION_PROPERTY + "' being set.");
}
downsampleInterval = Integer.parseInt(interval);
downsample = true;
}
delayedSeries = Double.parseDouble(p.getProperty(DELAYED_SERIES_PROPERTY, DELAYED_SERIES_PROPERTY_DEFAULT));
delayedIntervals = Integer.parseInt(p.getProperty(DELAYED_INTERVALS_PROPERTY, DELAYED_INTERVALS_PROPERTY_DEFAULT));
valueType = ValueType.fromString(p.getProperty(VALUE_TYPE_PROPERTY, VALUE_TYPE_PROPERTY_DEFAULT));
table = p.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT);
initKeysAndTags();
validateSettings();
}
@Override
public Object initThread(Properties p, int mythreadid, int threadcount) throws WorkloadException {
if (properties == null) {
throw new WorkloadException("Workload has not been initialized.");
}
return new ThreadState(mythreadid, threadcount);
}
@Override
public boolean doInsert(DB db, Object threadstate) {
if (threadstate == null) {
throw new IllegalStateException("Missing thread state.");
}
final Map<String, ByteIterator> tags = new TreeMap<String, ByteIterator>();
final String key = ((ThreadState)threadstate).nextDataPoint(tags, true);
if (db.insert(table, key, tags) == Status.OK) {
return true;
}
return false;
}
@Override
public boolean doTransaction(DB db, Object threadstate) {
if (threadstate == null) {
throw new IllegalStateException("Missing thread state.");
}
switch (operationchooser.nextString()) {
case "READ":
doTransactionRead(db, threadstate);
break;
case "UPDATE":
doTransactionUpdate(db, threadstate);
break;
case "INSERT":
doTransactionInsert(db, threadstate);
break;
case "SCAN":
doTransactionScan(db, threadstate);
break;
case "DELETE":
doTransactionDelete(db, threadstate);
break;
default:
return false;
}
return true;
}
protected void doTransactionRead(final DB db, Object threadstate) {
final ThreadState state = (ThreadState) threadstate;
final String keyname = keys[keychooser.nextValue().intValue()];
final Random random = ThreadLocalRandom.current();
int offsets = state.queryOffsetGenerator.nextValue().intValue();
//int offsets = random.nextInt(maxOffsets - 1);
final long startTimestamp;
if (offsets > 0) {
startTimestamp = state.startTimestamp + state.timestampGenerator.getOffset(offsets);
} else {
startTimestamp = state.startTimestamp;
}
// rando tags
Set<String> fields = new HashSet<String>();
for (int i = 0; i < tagPairs; ++i) {
if (groupBy && groupBys[i]) {
fields.add(tagKeys[i]);
} else {
fields.add(tagKeys[i] + tagPairDelimiter +
tagValues[random.nextInt(tagCardinality[i])]);
}
}
if (queryTimeSpan > 0) {
final long endTimestamp;
if (queryRandomTimeSpan) {
endTimestamp = startTimestamp + (timestampInterval * random.nextInt(queryTimeSpan / timestampInterval));
} else {
endTimestamp = startTimestamp + queryTimeSpan;
}
fields.add(timestampKey + tagPairDelimiter + startTimestamp + queryTimeSpanDelimiter + endTimestamp);
} else {
fields.add(timestampKey + tagPairDelimiter + startTimestamp);
}
if (groupBy) {
fields.add(groupByKey + tagPairDelimiter + groupByFunction);
}
if (downsample) {
fields.add(downsampleKey + tagPairDelimiter + downsampleFunction + downsampleInterval);
}
final Map<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
final Status status = db.read(table, keyname, fields, cells);
if (dataintegrity && status == Status.OK) {
verifyRow(keyname, cells);
}
}
protected void doTransactionUpdate(final DB db, Object threadstate) {
if (threadstate == null) {
throw new IllegalStateException("Missing thread state.");
}
final Map<String, ByteIterator> tags = new TreeMap<String, ByteIterator>();
final String key = ((ThreadState)threadstate).nextDataPoint(tags, false);
db.update(table, key, tags);
}
protected void doTransactionInsert(final DB db, Object threadstate) {
doInsert(db, threadstate);
}
protected void doTransactionScan(final DB db, Object threadstate) {
final ThreadState state = (ThreadState) threadstate;
final Random random = ThreadLocalRandom.current();
final String keyname = keys[random.nextInt(keys.length)];
// choose a random scan length
int len = scanlength.nextValue().intValue();
int offsets = random.nextInt(maxOffsets - 1);
final long startTimestamp;
if (offsets > 0) {
startTimestamp = state.startTimestamp + state.timestampGenerator.getOffset(offsets);
} else {
startTimestamp = state.startTimestamp;
}
// rando tags
Set<String> fields = new HashSet<String>();
for (int i = 0; i < tagPairs; ++i) {
if (groupBy && groupBys[i]) {
fields.add(tagKeys[i]);
} else {
fields.add(tagKeys[i] + tagPairDelimiter +
tagValues[random.nextInt(tagCardinality[i])]);
}
}
if (queryTimeSpan > 0) {
final long endTimestamp;
if (queryRandomTimeSpan) {
endTimestamp = startTimestamp + (timestampInterval * random.nextInt(queryTimeSpan / timestampInterval));
} else {
endTimestamp = startTimestamp + queryTimeSpan;
}
fields.add(timestampKey + tagPairDelimiter + startTimestamp + queryTimeSpanDelimiter + endTimestamp);
} else {
fields.add(timestampKey + tagPairDelimiter + startTimestamp);
}
if (groupBy) {
fields.add(groupByKey + tagPairDelimiter + groupByFunction);
}
if (downsample) {
fields.add(downsampleKey + tagPairDelimiter + downsampleFunction + tagPairDelimiter + downsampleInterval);
}
final Vector<HashMap<String, ByteIterator>> results = new Vector<HashMap<String, ByteIterator>>();
db.scan(table, keyname, len, fields, results);
}
protected void doTransactionDelete(final DB db, Object threadstate) {
final ThreadState state = (ThreadState) threadstate;
final Random random = ThreadLocalRandom.current();
final StringBuilder buf = new StringBuilder().append(keys[random.nextInt(keys.length)]);
int offsets = random.nextInt(maxOffsets - 1);
final long startTimestamp;
if (offsets > 0) {
startTimestamp = state.startTimestamp + state.timestampGenerator.getOffset(offsets);
} else {
startTimestamp = state.startTimestamp;
}
// rando tags
for (int i = 0; i < tagPairs; ++i) {
if (groupBy && groupBys[i]) {
buf.append(deleteDelimiter)
.append(tagKeys[i]);
} else {
buf.append(deleteDelimiter).append(tagKeys[i] + tagPairDelimiter +
tagValues[random.nextInt(tagCardinality[i])]);
}
}
if (queryTimeSpan > 0) {
final long endTimestamp;
if (queryRandomTimeSpan) {
endTimestamp = startTimestamp + (timestampInterval * random.nextInt(queryTimeSpan / timestampInterval));
} else {
endTimestamp = startTimestamp + queryTimeSpan;
}
buf.append(deleteDelimiter)
.append(timestampKey + tagPairDelimiter + startTimestamp + queryTimeSpanDelimiter + endTimestamp);
} else {
buf.append(deleteDelimiter)
.append(timestampKey + tagPairDelimiter + startTimestamp);
}
db.delete(table, buf.toString());
}
/**
* Parses the values returned by a read or scan operation and determines whether
* or not the integer value matches the hash and timestamp of the original timestamp.
* Only works for raw data points, will not work for group-by's or downsampled data.
* @param key The time series key.
* @param cells The cells read by the DB.
* @return {@link Status#OK} if the data matched or {@link Status#UNEXPECTED_STATE} if
* the data did not match.
*/
protected Status verifyRow(final String key, final Map<String, ByteIterator> cells) {
Status verifyStatus = Status.UNEXPECTED_STATE;
long startTime = System.nanoTime();
double value = 0;
long timestamp = 0;
final TreeMap<String, String> validationTags = new TreeMap<String, String>();
for (final Entry<String, ByteIterator> entry : cells.entrySet()) {
if (entry.getKey().equals(timestampKey)) {
final NumericByteIterator it = (NumericByteIterator) entry.getValue();
timestamp = it.getLong();
} else if (entry.getKey().equals(valueKey)) {
final NumericByteIterator it = (NumericByteIterator) entry.getValue();
value = it.isFloatingPoint() ? it.getDouble() : it.getLong();
} else {
validationTags.put(entry.getKey(), entry.getValue().toString());
}
}
if (validationFunction(key, timestamp, validationTags) == value) {
verifyStatus = Status.OK;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
return verifyStatus;
}
/**
* Function used for generating a deterministic hash based on the combination
* of metric, tags and timestamp.
* @param key A non-null string representing the key.
* @param timestamp A timestamp in the proper units for the workload.
* @param tags A non-null map of tag keys and values NOT including the YCSB
* key or timestamp.
* @return A hash value as an 8 byte integer.
*/
protected long validationFunction(final String key, final long timestamp,
final TreeMap<String, String> tags) {
final StringBuilder validationBuffer = new StringBuilder(keys[0].length() +
(tagPairs * tagKeys[0].length()) + (tagPairs * tagCardinality[1]));
for (final Entry<String, String> pair : tags.entrySet()) {
validationBuffer.append(pair.getKey()).append(pair.getValue());
}
return (long) validationBuffer.toString().hashCode() ^ timestamp;
}
/**
* Breaks out the keys, tags and cardinality initialization in another method
* to keep CheckStyle happy.
* @throws WorkloadException If something goes pear shaped.
*/
protected void initKeysAndTags() throws WorkloadException {
final int keyLength = Integer.parseInt(properties.getProperty(
CoreWorkload.FIELD_LENGTH_PROPERTY,
CoreWorkload.FIELD_LENGTH_PROPERTY_DEFAULT));
final int tagKeyLength = Integer.parseInt(properties.getProperty(
TAG_KEY_LENGTH_PROPERTY, TAG_KEY_LENGTH_PROPERTY_DEFAULT));
final int tagValueLength = Integer.parseInt(properties.getProperty(
TAG_VALUE_LENGTH_PROPERTY, TAG_VALUE_LENGTH_PROPERTY_DEFAULT));
keyGenerator = new IncrementingPrintableStringGenerator(keyLength);
tagKeyGenerator = new IncrementingPrintableStringGenerator(tagKeyLength);
tagValueGenerator = new IncrementingPrintableStringGenerator(tagValueLength);
final int threads = Integer.parseInt(properties.getProperty(Client.THREAD_COUNT_PROPERTY, "1"));
final String tagCardinalityString = properties.getProperty(
TAG_CARDINALITY_PROPERTY,
TAG_CARDINALITY_PROPERTY_DEFAULT);
final String[] tagCardinalityParts = tagCardinalityString.split(",");
int idx = 0;
totalCardinality = numKeys;
perKeyCardinality = 1;
int maxCardinality = 0;
for (final String card : tagCardinalityParts) {
try {
tagCardinality[idx] = Integer.parseInt(card.trim());
} catch (NumberFormatException nfe) {
throw new WorkloadException("Unable to parse cardinality: " +
card, nfe);
}
if (tagCardinality[idx] < 1) {
throw new WorkloadException("Cardinality must be greater than zero: " +
tagCardinality[idx]);
}
totalCardinality *= tagCardinality[idx];
perKeyCardinality *= tagCardinality[idx];
if (tagCardinality[idx] > maxCardinality) {
maxCardinality = tagCardinality[idx];
}
++idx;
if (idx >= tagPairs) {
// we have more cardinalities than tag keys so bail at this point.
break;
}
}
if (numKeys < threads) {
throw new WorkloadException("Field count " + numKeys + " (keys for time "
+ "series workloads) must be greater or equal to the number of "
+ "threads " + threads);
}
// fill tags without explicit cardinality with 1
if (idx < tagPairs) {
tagCardinality[idx++] = 1;
}
for (int i = 0; i < tagCardinality.length; ++i) {
if (tagCardinality[i] > 1) {
firstIncrementableCardinality = i;
break;
}
}
keys = new String[numKeys];
tagKeys = new String[tagPairs];
tagValues = new String[maxCardinality];
for (int i = 0; i < numKeys; ++i) {
keys[i] = keyGenerator.nextString();
}
for (int i = 0; i < tagPairs; ++i) {
tagKeys[i] = tagKeyGenerator.nextString();
}
for (int i = 0; i < maxCardinality; i++) {
tagValues[i] = tagValueGenerator.nextString();
}
if (randomizeTimeseriesOrder) {
Utils.shuffleArray(keys);
Utils.shuffleArray(tagValues);
}
maxOffsets = (recordcount / totalCardinality) + 1;
final int[] keyAndTagCardinality = new int[tagPairs + 1];
keyAndTagCardinality[0] = numKeys;
for (int i = 0; i < tagPairs; i++) {
keyAndTagCardinality[i + 1] = tagCardinality[i];
}
cumulativeCardinality = new int[keyAndTagCardinality.length];
for (int i = 0; i < keyAndTagCardinality.length; i++) {
int cumulation = 1;
for (int x = i; x <= keyAndTagCardinality.length - 1; x++) {
cumulation *= keyAndTagCardinality[x];
}
if (i > 0) {
cumulativeCardinality[i - 1] = cumulation;
}
}
cumulativeCardinality[cumulativeCardinality.length - 1] = 1;
}
/**
* Makes sure the settings as given are compatible.
* @throws WorkloadException If one or more settings were invalid.
*/
protected void validateSettings() throws WorkloadException {
if (dataintegrity) {
if (valueType != ValueType.INTEGERS) {
throw new WorkloadException("Data integrity was enabled. 'valuetype' must "
+ "be set to 'integers'.");
}
if (groupBy) {
throw new WorkloadException("Data integrity was enabled. 'groupbyfunction' must "
+ "be empty or null.");
}
if (downsample) {
throw new WorkloadException("Data integrity was enabled. 'downsamplingfunction' must "
+ "be empty or null.");
}
if (queryTimeSpan > 0) {
throw new WorkloadException("Data integrity was enabled. 'querytimespan' must "
+ "be empty or 0.");
}
if (randomizeTimeseriesOrder) {
throw new WorkloadException("Data integrity was enabled. 'randomizetimeseriesorder' must "
+ "be false.");
}
final String startTimestamp = properties.getProperty(CoreWorkload.INSERT_START_PROPERTY);
if (startTimestamp == null || startTimestamp.isEmpty()) {
throw new WorkloadException("Data integrity was enabled. 'insertstart' must "
+ "be set to a Unix Epoch timestamp.");
}
}
}
/**
* Thread state class holding thread local generators and indices.
*/
protected class ThreadState {
/** The timestamp generator for this thread. */
protected final UnixEpochTimestampGenerator timestampGenerator;
/** An offset generator to select a random offset for queries. */
protected final NumberGenerator queryOffsetGenerator;
/** The current write key index. */
protected int keyIdx;
/** The starting fence for writing keys. */
protected int keyIdxStart;
/** The ending fence for writing keys. */
protected int keyIdxEnd;
/** Indices for each tag value for writes. */
protected int[] tagValueIdxs;
/** Whether or not all time series have written values for the current timestamp. */
protected boolean rollover;
/** The starting timestamp. */
protected long startTimestamp;
/**
* Default ctor.
* @param threadID The zero based thread ID.
* @param threadCount The total number of threads.
* @throws WorkloadException If something went pear shaped.
*/
protected ThreadState(final int threadID, final int threadCount) throws WorkloadException {
int totalThreads = threadCount > 0 ? threadCount : 1;
if (threadID >= totalThreads) {
throw new IllegalStateException("Thread ID " + threadID + " cannot be greater "
+ "than or equal than the thread count " + totalThreads);
}
if (keys.length < threadCount) {
throw new WorkloadException("Thread count " + totalThreads + " must be greater "
+ "than or equal to key count " + keys.length);
}
int keysPerThread = keys.length / totalThreads;
keyIdx = keysPerThread * threadID;
keyIdxStart = keyIdx;
if (totalThreads - 1 == threadID) {
keyIdxEnd = keys.length;
} else {
keyIdxEnd = keyIdxStart + keysPerThread;
}
tagValueIdxs = new int[tagPairs]; // all zeros
final String startingTimestamp =
properties.getProperty(CoreWorkload.INSERT_START_PROPERTY);
if (startingTimestamp == null || startingTimestamp.isEmpty()) {
timestampGenerator = randomizeTimestampOrder ?
new RandomDiscreteTimestampGenerator(timestampInterval, timeUnits, maxOffsets) :
new UnixEpochTimestampGenerator(timestampInterval, timeUnits);
} else {
try {
timestampGenerator = randomizeTimestampOrder ?
new RandomDiscreteTimestampGenerator(timestampInterval, timeUnits,
Long.parseLong(startingTimestamp), maxOffsets) :
new UnixEpochTimestampGenerator(timestampInterval, timeUnits,
Long.parseLong(startingTimestamp));
} catch (NumberFormatException nfe) {
throw new WorkloadException("Unable to parse the " +
CoreWorkload.INSERT_START_PROPERTY, nfe);
}
}
// Set the last value properly for the timestamp, otherwise it may start
// one interval ago.
startTimestamp = timestampGenerator.nextValue();
// TODO - pick it
queryOffsetGenerator = new UniformLongGenerator(0, maxOffsets - 2);
}
/**
* Generates the next write value for thread.
* @param map An initialized map to populate with tag keys and values as well
* as the timestamp and actual value.
* @param isInsert Whether or not it's an insert or an update. Updates will pick
* an older timestamp (if random isn't enabled).
* @return The next key to write.
*/
protected String nextDataPoint(final Map<String, ByteIterator> map, final boolean isInsert) {
final Random random = ThreadLocalRandom.current();
int iterations = sparsity <= 0 ? 1 : random.nextInt((int) ((double) perKeyCardinality * sparsity));
if (iterations < 1) {
iterations = 1;
}
while (true) {
iterations--;
if (rollover) {
timestampGenerator.nextValue();
rollover = false;
}
String key = null;
if (iterations <= 0) {
final TreeMap<String, String> validationTags;
if (dataintegrity) {
validationTags = new TreeMap<String, String>();
} else {
validationTags = null;
}
key = keys[keyIdx];
int overallIdx = keyIdx * cumulativeCardinality[0];
for (int i = 0; i < tagPairs; ++i) {
int tvidx = tagValueIdxs[i];
map.put(tagKeys[i], new StringByteIterator(tagValues[tvidx]));
if (dataintegrity) {
validationTags.put(tagKeys[i], tagValues[tvidx]);
}
if (delayedSeries > 0) {
overallIdx += (tvidx * cumulativeCardinality[i + 1]);
}
}
if (!isInsert) {
final long delta = (timestampGenerator.currentValue() - startTimestamp) / timestampInterval;
final int intervals = random.nextInt((int) delta);
map.put(timestampKey, new NumericByteIterator(startTimestamp + (intervals * timestampInterval)));
} else if (delayedSeries > 0) {
// See if the series falls in a delay bucket and calculate an offset earlier
// than the current timestamp value if so.
double pct = (double) overallIdx / (double) totalCardinality;
if (pct < delayedSeries) {
int modulo = overallIdx % delayedIntervals;
if (modulo < 0) {
modulo *= -1;
}
map.put(timestampKey, new NumericByteIterator(timestampGenerator.currentValue() -
timestampInterval * modulo));
} else {
map.put(timestampKey, new NumericByteIterator(timestampGenerator.currentValue()));
}
} else {
map.put(timestampKey, new NumericByteIterator(timestampGenerator.currentValue()));
}
if (dataintegrity) {
map.put(valueKey, new NumericByteIterator(validationFunction(key,
timestampGenerator.currentValue(), validationTags)));
} else {
switch (valueType) {
case INTEGERS:
map.put(valueKey, new NumericByteIterator(random.nextInt()));
break;
case FLOATS:
map.put(valueKey, new NumericByteIterator(random.nextDouble() * (double) 100000));
break;
case MIXED:
if (random.nextBoolean()) {
map.put(valueKey, new NumericByteIterator(random.nextInt()));
} else {
map.put(valueKey, new NumericByteIterator(random.nextDouble() * (double) 100000));
}
break;
default:
throw new IllegalStateException("Somehow we didn't have a value "
+ "type configured that we support: " + valueType);
}
}
}
boolean tagRollover = false;
for (int i = tagCardinality.length - 1; i >= 0; --i) {
if (tagCardinality[i] <= 1) {
tagRollover = true; // Only one tag so needs roll over.
continue;
}
if (tagValueIdxs[i] + 1 >= tagCardinality[i]) {
tagValueIdxs[i] = 0;
if (i == firstIncrementableCardinality) {
tagRollover = true;
}
} else {
++tagValueIdxs[i];
break;
}
}
if (tagRollover) {
if (keyIdx + 1 >= keyIdxEnd) {
keyIdx = keyIdxStart;
rollover = true;
} else {
++keyIdx;
}
}
if (iterations <= 0) {
return key;
}
}
}
}
} |
for (final ValueType type : ValueType.values()) {
if (type.name.equalsIgnoreCase(name)) {
return type;
}
}
throw new IllegalArgumentException("Unrecognized type: " + name);
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/TimeSeriesWorkload.java | TimeSeriesWorkload | init | class TimeSeriesWorkload extends Workload {
/**
* The types of values written to the timeseries store.
*/
public enum ValueType {
INTEGERS("integers"),
FLOATS("floats"),
MIXED("mixednumbers");
protected final String name;
ValueType(final String name) {
this.name = name;
}
public static ValueType fromString(final String name) {
for (final ValueType type : ValueType.values()) {
if (type.name.equalsIgnoreCase(name)) {
return type;
}
}
throw new IllegalArgumentException("Unrecognized type: " + name);
}
}
/** Name and default value for the timestamp key property. */
public static final String TIMESTAMP_KEY_PROPERTY = "timestampkey";
public static final String TIMESTAMP_KEY_PROPERTY_DEFAULT = "YCSBTS";
/** Name and default value for the value key property. */
public static final String VALUE_KEY_PROPERTY = "valuekey";
public static final String VALUE_KEY_PROPERTY_DEFAULT = "YCSBV";
/** Name and default value for the timestamp interval property. */
public static final String TIMESTAMP_INTERVAL_PROPERTY = "timestampinterval";
public static final String TIMESTAMP_INTERVAL_PROPERTY_DEFAULT = "60";
/** Name and default value for the timestamp units property. */
public static final String TIMESTAMP_UNITS_PROPERTY = "timestampunits";
public static final String TIMESTAMP_UNITS_PROPERTY_DEFAULT = "SECONDS";
/** Name and default value for the number of tags property. */
public static final String TAG_COUNT_PROPERTY = "tagcount";
public static final String TAG_COUNT_PROPERTY_DEFAULT = "4";
/** Name and default value for the tag value cardinality map property. */
public static final String TAG_CARDINALITY_PROPERTY = "tagcardinality";
public static final String TAG_CARDINALITY_PROPERTY_DEFAULT = "1, 2, 4, 8";
/** Name and default value for the tag key length property. */
public static final String TAG_KEY_LENGTH_PROPERTY = "tagkeylength";
public static final String TAG_KEY_LENGTH_PROPERTY_DEFAULT = "8";
/** Name and default value for the tag value length property. */
public static final String TAG_VALUE_LENGTH_PROPERTY = "tagvaluelength";
public static final String TAG_VALUE_LENGTH_PROPERTY_DEFAULT = "8";
/** Name and default value for the tag pair delimiter property. */
public static final String PAIR_DELIMITER_PROPERTY = "tagpairdelimiter";
public static final String PAIR_DELIMITER_PROPERTY_DEFAULT = "=";
/** Name and default value for the delete string delimiter property. */
public static final String DELETE_DELIMITER_PROPERTY = "deletedelimiter";
public static final String DELETE_DELIMITER_PROPERTY_DEFAULT = ":";
/** Name and default value for the random timestamp write order property. */
public static final String RANDOMIZE_TIMESTAMP_ORDER_PROPERTY = "randomwritetimestamporder";
public static final String RANDOMIZE_TIMESTAMP_ORDER_PROPERTY_DEFAULT = "false";
/** Name and default value for the random time series write order property. */
public static final String RANDOMIZE_TIMESERIES_ORDER_PROPERTY = "randomtimeseriesorder";
public static final String RANDOMIZE_TIMESERIES_ORDER_PROPERTY_DEFAULT = "true";
/** Name and default value for the value types property. */
public static final String VALUE_TYPE_PROPERTY = "valuetype";
public static final String VALUE_TYPE_PROPERTY_DEFAULT = "floats";
/** Name and default value for the sparsity property. */
public static final String SPARSITY_PROPERTY = "sparsity";
public static final String SPARSITY_PROPERTY_DEFAULT = "0.00";
/** Name and default value for the delayed series percentage property. */
public static final String DELAYED_SERIES_PROPERTY = "delayedseries";
public static final String DELAYED_SERIES_PROPERTY_DEFAULT = "0.10";
/** Name and default value for the delayed series intervals property. */
public static final String DELAYED_INTERVALS_PROPERTY = "delayedintervals";
public static final String DELAYED_INTERVALS_PROPERTY_DEFAULT = "5";
/** Name and default value for the query time span property. */
public static final String QUERY_TIMESPAN_PROPERTY = "querytimespan";
public static final String QUERY_TIMESPAN_PROPERTY_DEFAULT = "0";
/** Name and default value for the randomized query time span property. */
public static final String QUERY_RANDOM_TIMESPAN_PROPERTY = "queryrandomtimespan";
public static final String QUERY_RANDOM_TIMESPAN_PROPERTY_DEFAULT = "false";
/** Name and default value for the query time stamp delimiter property. */
public static final String QUERY_TIMESPAN_DELIMITER_PROPERTY = "querytimespandelimiter";
public static final String QUERY_TIMESPAN_DELIMITER_PROPERTY_DEFAULT = ",";
/** Name and default value for the group-by key property. */
public static final String GROUPBY_KEY_PROPERTY = "groupbykey";
public static final String GROUPBY_KEY_PROPERTY_DEFAULT = "YCSBGB";
/** Name and default value for the group-by function property. */
public static final String GROUPBY_PROPERTY = "groupbyfunction";
/** Name and default value for the group-by key map property. */
public static final String GROUPBY_KEYS_PROPERTY = "groupbykeys";
/** Name and default value for the downsampling key property. */
public static final String DOWNSAMPLING_KEY_PROPERTY = "downsamplingkey";
public static final String DOWNSAMPLING_KEY_PROPERTY_DEFAULT = "YCSBDS";
/** Name and default value for the downsampling function property. */
public static final String DOWNSAMPLING_FUNCTION_PROPERTY = "downsamplingfunction";
/** Name and default value for the downsampling interval property. */
public static final String DOWNSAMPLING_INTERVAL_PROPERTY = "downsamplinginterval";
/** The properties to pull settings from. */
protected Properties properties;
/** Generators for keys, tag keys and tag values. */
protected Generator<String> keyGenerator;
protected Generator<String> tagKeyGenerator;
protected Generator<String> tagValueGenerator;
/** The timestamp key, defaults to "YCSBTS". */
protected String timestampKey;
/** The value key, defaults to "YCSBDS". */
protected String valueKey;
/** The number of time units in between timestamps. */
protected int timestampInterval;
/** The units of time the timestamp and various intervals represent. */
protected TimeUnit timeUnits;
/** Whether or not to randomize the timestamp order when writing. */
protected boolean randomizeTimestampOrder;
/** Whether or not to randomize (shuffle) the time series order. NOT compatible
* with data integrity. */
protected boolean randomizeTimeseriesOrder;
/** The type of values to generate when writing data. */
protected ValueType valueType;
/** Used to calculate an offset for each time series. */
protected int[] cumulativeCardinality;
/** The calculated total cardinality based on the config. */
protected int totalCardinality;
/** The calculated per-time-series-key cardinality. I.e. the number of unique
* tag key and value combinations. */
protected int perKeyCardinality;
/** How much data to scan for in each call. */
protected NumberGenerator scanlength;
/** A generator used to select a random time series key per read/scan. */
protected NumberGenerator keychooser;
/** A generator to select what operation to perform during the run phase. */
protected DiscreteGenerator operationchooser;
/** The maximum number of interval offsets from the starting timestamp. Calculated
* based on the number of records configured for the run. */
protected int maxOffsets;
/** The number of records or operations to perform for this run. */
protected int recordcount;
/** The number of tag pairs per time series. */
protected int tagPairs;
/** The table we'll write to. */
protected String table;
/** How many time series keys will be generated. */
protected int numKeys;
/** The generated list of possible time series key values. */
protected String[] keys;
/** The generated list of possible tag key values. */
protected String[] tagKeys;
/** The generated list of possible tag value values. */
protected String[] tagValues;
/** The cardinality for each tag key. */
protected int[] tagCardinality;
/** A helper to skip non-incrementing tag values. */
protected int firstIncrementableCardinality;
/** How sparse the data written should be. */
protected double sparsity;
/** The percentage of time series that should be delayed in writes. */
protected double delayedSeries;
/** The maximum number of intervals to delay a series. */
protected int delayedIntervals;
/** Optional query time interval during reads/scans. */
protected int queryTimeSpan;
/** Whether or not the actual interval should be randomly chosen, using
* queryTimeSpan as the maximum value. */
protected boolean queryRandomTimeSpan;
/** The delimiter for tag pairs in fields. */
protected String tagPairDelimiter;
/** The delimiter between parameters for the delete key. */
protected String deleteDelimiter;
/** The delimiter between timestamps for query time spans. */
protected String queryTimeSpanDelimiter;
/** Whether or not to issue group-by queries. */
protected boolean groupBy;
/** The key used for group-by tag keys. */
protected String groupByKey;
/** The function used for group-by's. */
protected String groupByFunction;
/** The tag keys to group on. */
protected boolean[] groupBys;
/** Whether or not to issue downsampling queries. */
protected boolean downsample;
/** The key used for downsampling tag keys. */
protected String downsampleKey;
/** The downsampling function. */
protected String downsampleFunction;
/** The downsampling interval. */
protected int downsampleInterval;
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
protected boolean dataintegrity;
/** Measurements to write data integrity results to. */
protected Measurements measurements = Measurements.getMeasurements();
@Override
public void init(final Properties p) throws WorkloadException {<FILL_FUNCTION_BODY>}
@Override
public Object initThread(Properties p, int mythreadid, int threadcount) throws WorkloadException {
if (properties == null) {
throw new WorkloadException("Workload has not been initialized.");
}
return new ThreadState(mythreadid, threadcount);
}
@Override
public boolean doInsert(DB db, Object threadstate) {
if (threadstate == null) {
throw new IllegalStateException("Missing thread state.");
}
final Map<String, ByteIterator> tags = new TreeMap<String, ByteIterator>();
final String key = ((ThreadState)threadstate).nextDataPoint(tags, true);
if (db.insert(table, key, tags) == Status.OK) {
return true;
}
return false;
}
@Override
public boolean doTransaction(DB db, Object threadstate) {
if (threadstate == null) {
throw new IllegalStateException("Missing thread state.");
}
switch (operationchooser.nextString()) {
case "READ":
doTransactionRead(db, threadstate);
break;
case "UPDATE":
doTransactionUpdate(db, threadstate);
break;
case "INSERT":
doTransactionInsert(db, threadstate);
break;
case "SCAN":
doTransactionScan(db, threadstate);
break;
case "DELETE":
doTransactionDelete(db, threadstate);
break;
default:
return false;
}
return true;
}
protected void doTransactionRead(final DB db, Object threadstate) {
final ThreadState state = (ThreadState) threadstate;
final String keyname = keys[keychooser.nextValue().intValue()];
final Random random = ThreadLocalRandom.current();
int offsets = state.queryOffsetGenerator.nextValue().intValue();
//int offsets = random.nextInt(maxOffsets - 1);
final long startTimestamp;
if (offsets > 0) {
startTimestamp = state.startTimestamp + state.timestampGenerator.getOffset(offsets);
} else {
startTimestamp = state.startTimestamp;
}
// rando tags
Set<String> fields = new HashSet<String>();
for (int i = 0; i < tagPairs; ++i) {
if (groupBy && groupBys[i]) {
fields.add(tagKeys[i]);
} else {
fields.add(tagKeys[i] + tagPairDelimiter +
tagValues[random.nextInt(tagCardinality[i])]);
}
}
if (queryTimeSpan > 0) {
final long endTimestamp;
if (queryRandomTimeSpan) {
endTimestamp = startTimestamp + (timestampInterval * random.nextInt(queryTimeSpan / timestampInterval));
} else {
endTimestamp = startTimestamp + queryTimeSpan;
}
fields.add(timestampKey + tagPairDelimiter + startTimestamp + queryTimeSpanDelimiter + endTimestamp);
} else {
fields.add(timestampKey + tagPairDelimiter + startTimestamp);
}
if (groupBy) {
fields.add(groupByKey + tagPairDelimiter + groupByFunction);
}
if (downsample) {
fields.add(downsampleKey + tagPairDelimiter + downsampleFunction + downsampleInterval);
}
final Map<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
final Status status = db.read(table, keyname, fields, cells);
if (dataintegrity && status == Status.OK) {
verifyRow(keyname, cells);
}
}
protected void doTransactionUpdate(final DB db, Object threadstate) {
if (threadstate == null) {
throw new IllegalStateException("Missing thread state.");
}
final Map<String, ByteIterator> tags = new TreeMap<String, ByteIterator>();
final String key = ((ThreadState)threadstate).nextDataPoint(tags, false);
db.update(table, key, tags);
}
protected void doTransactionInsert(final DB db, Object threadstate) {
doInsert(db, threadstate);
}
protected void doTransactionScan(final DB db, Object threadstate) {
final ThreadState state = (ThreadState) threadstate;
final Random random = ThreadLocalRandom.current();
final String keyname = keys[random.nextInt(keys.length)];
// choose a random scan length
int len = scanlength.nextValue().intValue();
int offsets = random.nextInt(maxOffsets - 1);
final long startTimestamp;
if (offsets > 0) {
startTimestamp = state.startTimestamp + state.timestampGenerator.getOffset(offsets);
} else {
startTimestamp = state.startTimestamp;
}
// rando tags
Set<String> fields = new HashSet<String>();
for (int i = 0; i < tagPairs; ++i) {
if (groupBy && groupBys[i]) {
fields.add(tagKeys[i]);
} else {
fields.add(tagKeys[i] + tagPairDelimiter +
tagValues[random.nextInt(tagCardinality[i])]);
}
}
if (queryTimeSpan > 0) {
final long endTimestamp;
if (queryRandomTimeSpan) {
endTimestamp = startTimestamp + (timestampInterval * random.nextInt(queryTimeSpan / timestampInterval));
} else {
endTimestamp = startTimestamp + queryTimeSpan;
}
fields.add(timestampKey + tagPairDelimiter + startTimestamp + queryTimeSpanDelimiter + endTimestamp);
} else {
fields.add(timestampKey + tagPairDelimiter + startTimestamp);
}
if (groupBy) {
fields.add(groupByKey + tagPairDelimiter + groupByFunction);
}
if (downsample) {
fields.add(downsampleKey + tagPairDelimiter + downsampleFunction + tagPairDelimiter + downsampleInterval);
}
final Vector<HashMap<String, ByteIterator>> results = new Vector<HashMap<String, ByteIterator>>();
db.scan(table, keyname, len, fields, results);
}
protected void doTransactionDelete(final DB db, Object threadstate) {
final ThreadState state = (ThreadState) threadstate;
final Random random = ThreadLocalRandom.current();
final StringBuilder buf = new StringBuilder().append(keys[random.nextInt(keys.length)]);
int offsets = random.nextInt(maxOffsets - 1);
final long startTimestamp;
if (offsets > 0) {
startTimestamp = state.startTimestamp + state.timestampGenerator.getOffset(offsets);
} else {
startTimestamp = state.startTimestamp;
}
// rando tags
for (int i = 0; i < tagPairs; ++i) {
if (groupBy && groupBys[i]) {
buf.append(deleteDelimiter)
.append(tagKeys[i]);
} else {
buf.append(deleteDelimiter).append(tagKeys[i] + tagPairDelimiter +
tagValues[random.nextInt(tagCardinality[i])]);
}
}
if (queryTimeSpan > 0) {
final long endTimestamp;
if (queryRandomTimeSpan) {
endTimestamp = startTimestamp + (timestampInterval * random.nextInt(queryTimeSpan / timestampInterval));
} else {
endTimestamp = startTimestamp + queryTimeSpan;
}
buf.append(deleteDelimiter)
.append(timestampKey + tagPairDelimiter + startTimestamp + queryTimeSpanDelimiter + endTimestamp);
} else {
buf.append(deleteDelimiter)
.append(timestampKey + tagPairDelimiter + startTimestamp);
}
db.delete(table, buf.toString());
}
/**
* Parses the values returned by a read or scan operation and determines whether
* or not the integer value matches the hash and timestamp of the original timestamp.
* Only works for raw data points, will not work for group-by's or downsampled data.
* @param key The time series key.
* @param cells The cells read by the DB.
* @return {@link Status#OK} if the data matched or {@link Status#UNEXPECTED_STATE} if
* the data did not match.
*/
protected Status verifyRow(final String key, final Map<String, ByteIterator> cells) {
Status verifyStatus = Status.UNEXPECTED_STATE;
long startTime = System.nanoTime();
double value = 0;
long timestamp = 0;
final TreeMap<String, String> validationTags = new TreeMap<String, String>();
for (final Entry<String, ByteIterator> entry : cells.entrySet()) {
if (entry.getKey().equals(timestampKey)) {
final NumericByteIterator it = (NumericByteIterator) entry.getValue();
timestamp = it.getLong();
} else if (entry.getKey().equals(valueKey)) {
final NumericByteIterator it = (NumericByteIterator) entry.getValue();
value = it.isFloatingPoint() ? it.getDouble() : it.getLong();
} else {
validationTags.put(entry.getKey(), entry.getValue().toString());
}
}
if (validationFunction(key, timestamp, validationTags) == value) {
verifyStatus = Status.OK;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
return verifyStatus;
}
/**
* Function used for generating a deterministic hash based on the combination
* of metric, tags and timestamp.
* @param key A non-null string representing the key.
* @param timestamp A timestamp in the proper units for the workload.
* @param tags A non-null map of tag keys and values NOT including the YCSB
* key or timestamp.
* @return A hash value as an 8 byte integer.
*/
protected long validationFunction(final String key, final long timestamp,
final TreeMap<String, String> tags) {
final StringBuilder validationBuffer = new StringBuilder(keys[0].length() +
(tagPairs * tagKeys[0].length()) + (tagPairs * tagCardinality[1]));
for (final Entry<String, String> pair : tags.entrySet()) {
validationBuffer.append(pair.getKey()).append(pair.getValue());
}
return (long) validationBuffer.toString().hashCode() ^ timestamp;
}
/**
* Breaks out the keys, tags and cardinality initialization in another method
* to keep CheckStyle happy.
* @throws WorkloadException If something goes pear shaped.
*/
protected void initKeysAndTags() throws WorkloadException {
final int keyLength = Integer.parseInt(properties.getProperty(
CoreWorkload.FIELD_LENGTH_PROPERTY,
CoreWorkload.FIELD_LENGTH_PROPERTY_DEFAULT));
final int tagKeyLength = Integer.parseInt(properties.getProperty(
TAG_KEY_LENGTH_PROPERTY, TAG_KEY_LENGTH_PROPERTY_DEFAULT));
final int tagValueLength = Integer.parseInt(properties.getProperty(
TAG_VALUE_LENGTH_PROPERTY, TAG_VALUE_LENGTH_PROPERTY_DEFAULT));
keyGenerator = new IncrementingPrintableStringGenerator(keyLength);
tagKeyGenerator = new IncrementingPrintableStringGenerator(tagKeyLength);
tagValueGenerator = new IncrementingPrintableStringGenerator(tagValueLength);
final int threads = Integer.parseInt(properties.getProperty(Client.THREAD_COUNT_PROPERTY, "1"));
final String tagCardinalityString = properties.getProperty(
TAG_CARDINALITY_PROPERTY,
TAG_CARDINALITY_PROPERTY_DEFAULT);
final String[] tagCardinalityParts = tagCardinalityString.split(",");
int idx = 0;
totalCardinality = numKeys;
perKeyCardinality = 1;
int maxCardinality = 0;
for (final String card : tagCardinalityParts) {
try {
tagCardinality[idx] = Integer.parseInt(card.trim());
} catch (NumberFormatException nfe) {
throw new WorkloadException("Unable to parse cardinality: " +
card, nfe);
}
if (tagCardinality[idx] < 1) {
throw new WorkloadException("Cardinality must be greater than zero: " +
tagCardinality[idx]);
}
totalCardinality *= tagCardinality[idx];
perKeyCardinality *= tagCardinality[idx];
if (tagCardinality[idx] > maxCardinality) {
maxCardinality = tagCardinality[idx];
}
++idx;
if (idx >= tagPairs) {
// we have more cardinalities than tag keys so bail at this point.
break;
}
}
if (numKeys < threads) {
throw new WorkloadException("Field count " + numKeys + " (keys for time "
+ "series workloads) must be greater or equal to the number of "
+ "threads " + threads);
}
// fill tags without explicit cardinality with 1
if (idx < tagPairs) {
tagCardinality[idx++] = 1;
}
for (int i = 0; i < tagCardinality.length; ++i) {
if (tagCardinality[i] > 1) {
firstIncrementableCardinality = i;
break;
}
}
keys = new String[numKeys];
tagKeys = new String[tagPairs];
tagValues = new String[maxCardinality];
for (int i = 0; i < numKeys; ++i) {
keys[i] = keyGenerator.nextString();
}
for (int i = 0; i < tagPairs; ++i) {
tagKeys[i] = tagKeyGenerator.nextString();
}
for (int i = 0; i < maxCardinality; i++) {
tagValues[i] = tagValueGenerator.nextString();
}
if (randomizeTimeseriesOrder) {
Utils.shuffleArray(keys);
Utils.shuffleArray(tagValues);
}
maxOffsets = (recordcount / totalCardinality) + 1;
final int[] keyAndTagCardinality = new int[tagPairs + 1];
keyAndTagCardinality[0] = numKeys;
for (int i = 0; i < tagPairs; i++) {
keyAndTagCardinality[i + 1] = tagCardinality[i];
}
cumulativeCardinality = new int[keyAndTagCardinality.length];
for (int i = 0; i < keyAndTagCardinality.length; i++) {
int cumulation = 1;
for (int x = i; x <= keyAndTagCardinality.length - 1; x++) {
cumulation *= keyAndTagCardinality[x];
}
if (i > 0) {
cumulativeCardinality[i - 1] = cumulation;
}
}
cumulativeCardinality[cumulativeCardinality.length - 1] = 1;
}
/**
* Makes sure the settings as given are compatible.
* @throws WorkloadException If one or more settings were invalid.
*/
protected void validateSettings() throws WorkloadException {
if (dataintegrity) {
if (valueType != ValueType.INTEGERS) {
throw new WorkloadException("Data integrity was enabled. 'valuetype' must "
+ "be set to 'integers'.");
}
if (groupBy) {
throw new WorkloadException("Data integrity was enabled. 'groupbyfunction' must "
+ "be empty or null.");
}
if (downsample) {
throw new WorkloadException("Data integrity was enabled. 'downsamplingfunction' must "
+ "be empty or null.");
}
if (queryTimeSpan > 0) {
throw new WorkloadException("Data integrity was enabled. 'querytimespan' must "
+ "be empty or 0.");
}
if (randomizeTimeseriesOrder) {
throw new WorkloadException("Data integrity was enabled. 'randomizetimeseriesorder' must "
+ "be false.");
}
final String startTimestamp = properties.getProperty(CoreWorkload.INSERT_START_PROPERTY);
if (startTimestamp == null || startTimestamp.isEmpty()) {
throw new WorkloadException("Data integrity was enabled. 'insertstart' must "
+ "be set to a Unix Epoch timestamp.");
}
}
}
/**
* Thread state class holding thread local generators and indices.
*/
protected class ThreadState {
/** The timestamp generator for this thread. */
protected final UnixEpochTimestampGenerator timestampGenerator;
/** An offset generator to select a random offset for queries. */
protected final NumberGenerator queryOffsetGenerator;
/** The current write key index. */
protected int keyIdx;
/** The starting fence for writing keys. */
protected int keyIdxStart;
/** The ending fence for writing keys. */
protected int keyIdxEnd;
/** Indices for each tag value for writes. */
protected int[] tagValueIdxs;
/** Whether or not all time series have written values for the current timestamp. */
protected boolean rollover;
/** The starting timestamp. */
protected long startTimestamp;
/**
* Default ctor.
* @param threadID The zero based thread ID.
* @param threadCount The total number of threads.
* @throws WorkloadException If something went pear shaped.
*/
protected ThreadState(final int threadID, final int threadCount) throws WorkloadException {
int totalThreads = threadCount > 0 ? threadCount : 1;
if (threadID >= totalThreads) {
throw new IllegalStateException("Thread ID " + threadID + " cannot be greater "
+ "than or equal than the thread count " + totalThreads);
}
if (keys.length < threadCount) {
throw new WorkloadException("Thread count " + totalThreads + " must be greater "
+ "than or equal to key count " + keys.length);
}
int keysPerThread = keys.length / totalThreads;
keyIdx = keysPerThread * threadID;
keyIdxStart = keyIdx;
if (totalThreads - 1 == threadID) {
keyIdxEnd = keys.length;
} else {
keyIdxEnd = keyIdxStart + keysPerThread;
}
tagValueIdxs = new int[tagPairs]; // all zeros
final String startingTimestamp =
properties.getProperty(CoreWorkload.INSERT_START_PROPERTY);
if (startingTimestamp == null || startingTimestamp.isEmpty()) {
timestampGenerator = randomizeTimestampOrder ?
new RandomDiscreteTimestampGenerator(timestampInterval, timeUnits, maxOffsets) :
new UnixEpochTimestampGenerator(timestampInterval, timeUnits);
} else {
try {
timestampGenerator = randomizeTimestampOrder ?
new RandomDiscreteTimestampGenerator(timestampInterval, timeUnits,
Long.parseLong(startingTimestamp), maxOffsets) :
new UnixEpochTimestampGenerator(timestampInterval, timeUnits,
Long.parseLong(startingTimestamp));
} catch (NumberFormatException nfe) {
throw new WorkloadException("Unable to parse the " +
CoreWorkload.INSERT_START_PROPERTY, nfe);
}
}
// Set the last value properly for the timestamp, otherwise it may start
// one interval ago.
startTimestamp = timestampGenerator.nextValue();
// TODO - pick it
queryOffsetGenerator = new UniformLongGenerator(0, maxOffsets - 2);
}
/**
* Generates the next write value for thread.
* @param map An initialized map to populate with tag keys and values as well
* as the timestamp and actual value.
* @param isInsert Whether or not it's an insert or an update. Updates will pick
* an older timestamp (if random isn't enabled).
* @return The next key to write.
*/
protected String nextDataPoint(final Map<String, ByteIterator> map, final boolean isInsert) {
final Random random = ThreadLocalRandom.current();
int iterations = sparsity <= 0 ? 1 : random.nextInt((int) ((double) perKeyCardinality * sparsity));
if (iterations < 1) {
iterations = 1;
}
while (true) {
iterations--;
if (rollover) {
timestampGenerator.nextValue();
rollover = false;
}
String key = null;
if (iterations <= 0) {
final TreeMap<String, String> validationTags;
if (dataintegrity) {
validationTags = new TreeMap<String, String>();
} else {
validationTags = null;
}
key = keys[keyIdx];
int overallIdx = keyIdx * cumulativeCardinality[0];
for (int i = 0; i < tagPairs; ++i) {
int tvidx = tagValueIdxs[i];
map.put(tagKeys[i], new StringByteIterator(tagValues[tvidx]));
if (dataintegrity) {
validationTags.put(tagKeys[i], tagValues[tvidx]);
}
if (delayedSeries > 0) {
overallIdx += (tvidx * cumulativeCardinality[i + 1]);
}
}
if (!isInsert) {
final long delta = (timestampGenerator.currentValue() - startTimestamp) / timestampInterval;
final int intervals = random.nextInt((int) delta);
map.put(timestampKey, new NumericByteIterator(startTimestamp + (intervals * timestampInterval)));
} else if (delayedSeries > 0) {
// See if the series falls in a delay bucket and calculate an offset earlier
// than the current timestamp value if so.
double pct = (double) overallIdx / (double) totalCardinality;
if (pct < delayedSeries) {
int modulo = overallIdx % delayedIntervals;
if (modulo < 0) {
modulo *= -1;
}
map.put(timestampKey, new NumericByteIterator(timestampGenerator.currentValue() -
timestampInterval * modulo));
} else {
map.put(timestampKey, new NumericByteIterator(timestampGenerator.currentValue()));
}
} else {
map.put(timestampKey, new NumericByteIterator(timestampGenerator.currentValue()));
}
if (dataintegrity) {
map.put(valueKey, new NumericByteIterator(validationFunction(key,
timestampGenerator.currentValue(), validationTags)));
} else {
switch (valueType) {
case INTEGERS:
map.put(valueKey, new NumericByteIterator(random.nextInt()));
break;
case FLOATS:
map.put(valueKey, new NumericByteIterator(random.nextDouble() * (double) 100000));
break;
case MIXED:
if (random.nextBoolean()) {
map.put(valueKey, new NumericByteIterator(random.nextInt()));
} else {
map.put(valueKey, new NumericByteIterator(random.nextDouble() * (double) 100000));
}
break;
default:
throw new IllegalStateException("Somehow we didn't have a value "
+ "type configured that we support: " + valueType);
}
}
}
boolean tagRollover = false;
for (int i = tagCardinality.length - 1; i >= 0; --i) {
if (tagCardinality[i] <= 1) {
tagRollover = true; // Only one tag so needs roll over.
continue;
}
if (tagValueIdxs[i] + 1 >= tagCardinality[i]) {
tagValueIdxs[i] = 0;
if (i == firstIncrementableCardinality) {
tagRollover = true;
}
} else {
++tagValueIdxs[i];
break;
}
}
if (tagRollover) {
if (keyIdx + 1 >= keyIdxEnd) {
keyIdx = keyIdxStart;
rollover = true;
} else {
++keyIdx;
}
}
if (iterations <= 0) {
return key;
}
}
}
}
} |
properties = p;
recordcount =
Integer.parseInt(p.getProperty(Client.RECORD_COUNT_PROPERTY,
Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
timestampKey = p.getProperty(TIMESTAMP_KEY_PROPERTY, TIMESTAMP_KEY_PROPERTY_DEFAULT);
valueKey = p.getProperty(VALUE_KEY_PROPERTY, VALUE_KEY_PROPERTY_DEFAULT);
operationchooser = CoreWorkload.createOperationGenerator(properties);
final int maxscanlength =
Integer.parseInt(p.getProperty(CoreWorkload.MAX_SCAN_LENGTH_PROPERTY,
CoreWorkload.MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(CoreWorkload.SCAN_LENGTH_DISTRIBUTION_PROPERTY,
CoreWorkload.SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(1, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(1, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
randomizeTimestampOrder = Boolean.parseBoolean(p.getProperty(
RANDOMIZE_TIMESTAMP_ORDER_PROPERTY,
RANDOMIZE_TIMESTAMP_ORDER_PROPERTY_DEFAULT));
randomizeTimeseriesOrder = Boolean.parseBoolean(p.getProperty(
RANDOMIZE_TIMESERIES_ORDER_PROPERTY,
RANDOMIZE_TIMESERIES_ORDER_PROPERTY_DEFAULT));
// setup the cardinality
numKeys = Integer.parseInt(p.getProperty(CoreWorkload.FIELD_COUNT_PROPERTY,
CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT));
tagPairs = Integer.parseInt(p.getProperty(TAG_COUNT_PROPERTY,
TAG_COUNT_PROPERTY_DEFAULT));
sparsity = Double.parseDouble(p.getProperty(SPARSITY_PROPERTY, SPARSITY_PROPERTY_DEFAULT));
tagCardinality = new int[tagPairs];
final String requestdistrib =
p.getProperty(CoreWorkload.REQUEST_DISTRIBUTION_PROPERTY,
CoreWorkload.REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(0, numKeys - 1);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(0, numKeys - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
keychooser = new ScrambledZipfianGenerator(0, numKeys - 1);
//} else if (requestdistrib.compareTo("latest") == 0) {
// keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(CoreWorkload.HOTSPOT_DATA_FRACTION,
CoreWorkload.HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(CoreWorkload.HOTSPOT_OPN_FRACTION,
CoreWorkload.HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(0, numKeys - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
// figure out the start timestamp based on the units, cardinality and interval
try {
timestampInterval = Integer.parseInt(p.getProperty(
TIMESTAMP_INTERVAL_PROPERTY, TIMESTAMP_INTERVAL_PROPERTY_DEFAULT));
} catch (NumberFormatException nfe) {
throw new WorkloadException("Unable to parse the " +
TIMESTAMP_INTERVAL_PROPERTY, nfe);
}
try {
timeUnits = TimeUnit.valueOf(p.getProperty(TIMESTAMP_UNITS_PROPERTY,
TIMESTAMP_UNITS_PROPERTY_DEFAULT).toUpperCase());
} catch (IllegalArgumentException e) {
throw new WorkloadException("Unknown time unit type", e);
}
if (timeUnits == TimeUnit.NANOSECONDS || timeUnits == TimeUnit.MICROSECONDS) {
throw new WorkloadException("YCSB doesn't support " + timeUnits +
" at this time.");
}
tagPairDelimiter = p.getProperty(PAIR_DELIMITER_PROPERTY, PAIR_DELIMITER_PROPERTY_DEFAULT);
deleteDelimiter = p.getProperty(DELETE_DELIMITER_PROPERTY, DELETE_DELIMITER_PROPERTY_DEFAULT);
dataintegrity = Boolean.parseBoolean(
p.getProperty(CoreWorkload.DATA_INTEGRITY_PROPERTY,
CoreWorkload.DATA_INTEGRITY_PROPERTY_DEFAULT));
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
queryTimeSpan = Integer.parseInt(p.getProperty(QUERY_TIMESPAN_PROPERTY,
QUERY_TIMESPAN_PROPERTY_DEFAULT));
queryRandomTimeSpan = Boolean.parseBoolean(p.getProperty(QUERY_RANDOM_TIMESPAN_PROPERTY,
QUERY_RANDOM_TIMESPAN_PROPERTY_DEFAULT));
queryTimeSpanDelimiter = p.getProperty(QUERY_TIMESPAN_DELIMITER_PROPERTY,
QUERY_TIMESPAN_DELIMITER_PROPERTY_DEFAULT);
groupByKey = p.getProperty(GROUPBY_KEY_PROPERTY, GROUPBY_KEY_PROPERTY_DEFAULT);
groupByFunction = p.getProperty(GROUPBY_PROPERTY);
if (groupByFunction != null && !groupByFunction.isEmpty()) {
final String groupByKeys = p.getProperty(GROUPBY_KEYS_PROPERTY);
if (groupByKeys == null || groupByKeys.isEmpty()) {
throw new WorkloadException("Group by was enabled but no keys were specified.");
}
final String[] gbKeys = groupByKeys.split(",");
if (gbKeys.length != tagKeys.length) {
throw new WorkloadException("Only " + gbKeys.length + " group by keys "
+ "were specified but there were " + tagKeys.length + " tag keys given.");
}
groupBys = new boolean[gbKeys.length];
for (int i = 0; i < gbKeys.length; i++) {
groupBys[i] = Integer.parseInt(gbKeys[i].trim()) == 0 ? false : true;
}
groupBy = true;
}
downsampleKey = p.getProperty(DOWNSAMPLING_KEY_PROPERTY, DOWNSAMPLING_KEY_PROPERTY_DEFAULT);
downsampleFunction = p.getProperty(DOWNSAMPLING_FUNCTION_PROPERTY);
if (downsampleFunction != null && !downsampleFunction.isEmpty()) {
final String interval = p.getProperty(DOWNSAMPLING_INTERVAL_PROPERTY);
if (interval == null || interval.isEmpty()) {
throw new WorkloadException("'" + DOWNSAMPLING_INTERVAL_PROPERTY + "' was missing despite '"
+ DOWNSAMPLING_FUNCTION_PROPERTY + "' being set.");
}
downsampleInterval = Integer.parseInt(interval);
downsample = true;
}
delayedSeries = Double.parseDouble(p.getProperty(DELAYED_SERIES_PROPERTY, DELAYED_SERIES_PROPERTY_DEFAULT));
delayedIntervals = Integer.parseInt(p.getProperty(DELAYED_INTERVALS_PROPERTY, DELAYED_INTERVALS_PROPERTY_DEFAULT));
valueType = ValueType.fromString(p.getProperty(VALUE_TYPE_PROPERTY, VALUE_TYPE_PROPERTY_DEFAULT));
table = p.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT);
initKeysAndTags();
validateSettings();
|
brianfrankcooper_YCSB | YCSB/core/src/main/java/site/ycsb/workloads/TimeSeriesWorkload.java | TimeSeriesWorkload | initThread | class TimeSeriesWorkload extends Workload {
/**
* The types of values written to the timeseries store.
*/
public enum ValueType {
INTEGERS("integers"),
FLOATS("floats"),
MIXED("mixednumbers");
protected final String name;
ValueType(final String name) {
this.name = name;
}
public static ValueType fromString(final String name) {
for (final ValueType type : ValueType.values()) {
if (type.name.equalsIgnoreCase(name)) {
return type;
}
}
throw new IllegalArgumentException("Unrecognized type: " + name);
}
}
/** Name and default value for the timestamp key property. */
public static final String TIMESTAMP_KEY_PROPERTY = "timestampkey";
public static final String TIMESTAMP_KEY_PROPERTY_DEFAULT = "YCSBTS";
/** Name and default value for the value key property. */
public static final String VALUE_KEY_PROPERTY = "valuekey";
public static final String VALUE_KEY_PROPERTY_DEFAULT = "YCSBV";
/** Name and default value for the timestamp interval property. */
public static final String TIMESTAMP_INTERVAL_PROPERTY = "timestampinterval";
public static final String TIMESTAMP_INTERVAL_PROPERTY_DEFAULT = "60";
/** Name and default value for the timestamp units property. */
public static final String TIMESTAMP_UNITS_PROPERTY = "timestampunits";
public static final String TIMESTAMP_UNITS_PROPERTY_DEFAULT = "SECONDS";
/** Name and default value for the number of tags property. */
public static final String TAG_COUNT_PROPERTY = "tagcount";
public static final String TAG_COUNT_PROPERTY_DEFAULT = "4";
/** Name and default value for the tag value cardinality map property. */
public static final String TAG_CARDINALITY_PROPERTY = "tagcardinality";
public static final String TAG_CARDINALITY_PROPERTY_DEFAULT = "1, 2, 4, 8";
/** Name and default value for the tag key length property. */
public static final String TAG_KEY_LENGTH_PROPERTY = "tagkeylength";
public static final String TAG_KEY_LENGTH_PROPERTY_DEFAULT = "8";
/** Name and default value for the tag value length property. */
public static final String TAG_VALUE_LENGTH_PROPERTY = "tagvaluelength";
public static final String TAG_VALUE_LENGTH_PROPERTY_DEFAULT = "8";
/** Name and default value for the tag pair delimiter property. */
public static final String PAIR_DELIMITER_PROPERTY = "tagpairdelimiter";
public static final String PAIR_DELIMITER_PROPERTY_DEFAULT = "=";
/** Name and default value for the delete string delimiter property. */
public static final String DELETE_DELIMITER_PROPERTY = "deletedelimiter";
public static final String DELETE_DELIMITER_PROPERTY_DEFAULT = ":";
/** Name and default value for the random timestamp write order property. */
public static final String RANDOMIZE_TIMESTAMP_ORDER_PROPERTY = "randomwritetimestamporder";
public static final String RANDOMIZE_TIMESTAMP_ORDER_PROPERTY_DEFAULT = "false";
/** Name and default value for the random time series write order property. */
public static final String RANDOMIZE_TIMESERIES_ORDER_PROPERTY = "randomtimeseriesorder";
public static final String RANDOMIZE_TIMESERIES_ORDER_PROPERTY_DEFAULT = "true";
/** Name and default value for the value types property. */
public static final String VALUE_TYPE_PROPERTY = "valuetype";
public static final String VALUE_TYPE_PROPERTY_DEFAULT = "floats";
/** Name and default value for the sparsity property. */
public static final String SPARSITY_PROPERTY = "sparsity";
public static final String SPARSITY_PROPERTY_DEFAULT = "0.00";
/** Name and default value for the delayed series percentage property. */
public static final String DELAYED_SERIES_PROPERTY = "delayedseries";
public static final String DELAYED_SERIES_PROPERTY_DEFAULT = "0.10";
/** Name and default value for the delayed series intervals property. */
public static final String DELAYED_INTERVALS_PROPERTY = "delayedintervals";
public static final String DELAYED_INTERVALS_PROPERTY_DEFAULT = "5";
/** Name and default value for the query time span property. */
public static final String QUERY_TIMESPAN_PROPERTY = "querytimespan";
public static final String QUERY_TIMESPAN_PROPERTY_DEFAULT = "0";
/** Name and default value for the randomized query time span property. */
public static final String QUERY_RANDOM_TIMESPAN_PROPERTY = "queryrandomtimespan";
public static final String QUERY_RANDOM_TIMESPAN_PROPERTY_DEFAULT = "false";
/** Name and default value for the query time stamp delimiter property. */
public static final String QUERY_TIMESPAN_DELIMITER_PROPERTY = "querytimespandelimiter";
public static final String QUERY_TIMESPAN_DELIMITER_PROPERTY_DEFAULT = ",";
/** Name and default value for the group-by key property. */
public static final String GROUPBY_KEY_PROPERTY = "groupbykey";
public static final String GROUPBY_KEY_PROPERTY_DEFAULT = "YCSBGB";
/** Name and default value for the group-by function property. */
public static final String GROUPBY_PROPERTY = "groupbyfunction";
/** Name and default value for the group-by key map property. */
public static final String GROUPBY_KEYS_PROPERTY = "groupbykeys";
/** Name and default value for the downsampling key property. */
public static final String DOWNSAMPLING_KEY_PROPERTY = "downsamplingkey";
public static final String DOWNSAMPLING_KEY_PROPERTY_DEFAULT = "YCSBDS";
/** Name and default value for the downsampling function property. */
public static final String DOWNSAMPLING_FUNCTION_PROPERTY = "downsamplingfunction";
/** Name and default value for the downsampling interval property. */
public static final String DOWNSAMPLING_INTERVAL_PROPERTY = "downsamplinginterval";
/** The properties to pull settings from. */
protected Properties properties;
/** Generators for keys, tag keys and tag values. */
protected Generator<String> keyGenerator;
protected Generator<String> tagKeyGenerator;
protected Generator<String> tagValueGenerator;
/** The timestamp key, defaults to "YCSBTS". */
protected String timestampKey;
/** The value key, defaults to "YCSBDS". */
protected String valueKey;
/** The number of time units in between timestamps. */
protected int timestampInterval;
/** The units of time the timestamp and various intervals represent. */
protected TimeUnit timeUnits;
/** Whether or not to randomize the timestamp order when writing. */
protected boolean randomizeTimestampOrder;
/** Whether or not to randomize (shuffle) the time series order. NOT compatible
* with data integrity. */
protected boolean randomizeTimeseriesOrder;
/** The type of values to generate when writing data. */
protected ValueType valueType;
/** Used to calculate an offset for each time series. */
protected int[] cumulativeCardinality;
/** The calculated total cardinality based on the config. */
protected int totalCardinality;
/** The calculated per-time-series-key cardinality. I.e. the number of unique
* tag key and value combinations. */
protected int perKeyCardinality;
/** How much data to scan for in each call. */
protected NumberGenerator scanlength;
/** A generator used to select a random time series key per read/scan. */
protected NumberGenerator keychooser;
/** A generator to select what operation to perform during the run phase. */
protected DiscreteGenerator operationchooser;
/** The maximum number of interval offsets from the starting timestamp. Calculated
* based on the number of records configured for the run. */
protected int maxOffsets;
/** The number of records or operations to perform for this run. */
protected int recordcount;
/** The number of tag pairs per time series. */
protected int tagPairs;
/** The table we'll write to. */
protected String table;
/** How many time series keys will be generated. */
protected int numKeys;
/** The generated list of possible time series key values. */
protected String[] keys;
/** The generated list of possible tag key values. */
protected String[] tagKeys;
/** The generated list of possible tag value values. */
protected String[] tagValues;
/** The cardinality for each tag key. */
protected int[] tagCardinality;
/** A helper to skip non-incrementing tag values. */
protected int firstIncrementableCardinality;
/** How sparse the data written should be. */
protected double sparsity;
/** The percentage of time series that should be delayed in writes. */
protected double delayedSeries;
/** The maximum number of intervals to delay a series. */
protected int delayedIntervals;
/** Optional query time interval during reads/scans. */
protected int queryTimeSpan;
/** Whether or not the actual interval should be randomly chosen, using
* queryTimeSpan as the maximum value. */
protected boolean queryRandomTimeSpan;
/** The delimiter for tag pairs in fields. */
protected String tagPairDelimiter;
/** The delimiter between parameters for the delete key. */
protected String deleteDelimiter;
/** The delimiter between timestamps for query time spans. */
protected String queryTimeSpanDelimiter;
/** Whether or not to issue group-by queries. */
protected boolean groupBy;
/** The key used for group-by tag keys. */
protected String groupByKey;
/** The function used for group-by's. */
protected String groupByFunction;
/** The tag keys to group on. */
protected boolean[] groupBys;
/** Whether or not to issue downsampling queries. */
protected boolean downsample;
/** The key used for downsampling tag keys. */
protected String downsampleKey;
/** The downsampling function. */
protected String downsampleFunction;
/** The downsampling interval. */
protected int downsampleInterval;
/**
* Set to true if want to check correctness of reads. Must also
* be set to true during loading phase to function.
*/
protected boolean dataintegrity;
/** Measurements to write data integrity results to. */
protected Measurements measurements = Measurements.getMeasurements();
@Override
public void init(final Properties p) throws WorkloadException {
properties = p;
recordcount =
Integer.parseInt(p.getProperty(Client.RECORD_COUNT_PROPERTY,
Client.DEFAULT_RECORD_COUNT));
if (recordcount == 0) {
recordcount = Integer.MAX_VALUE;
}
timestampKey = p.getProperty(TIMESTAMP_KEY_PROPERTY, TIMESTAMP_KEY_PROPERTY_DEFAULT);
valueKey = p.getProperty(VALUE_KEY_PROPERTY, VALUE_KEY_PROPERTY_DEFAULT);
operationchooser = CoreWorkload.createOperationGenerator(properties);
final int maxscanlength =
Integer.parseInt(p.getProperty(CoreWorkload.MAX_SCAN_LENGTH_PROPERTY,
CoreWorkload.MAX_SCAN_LENGTH_PROPERTY_DEFAULT));
String scanlengthdistrib =
p.getProperty(CoreWorkload.SCAN_LENGTH_DISTRIBUTION_PROPERTY,
CoreWorkload.SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT);
if (scanlengthdistrib.compareTo("uniform") == 0) {
scanlength = new UniformLongGenerator(1, maxscanlength);
} else if (scanlengthdistrib.compareTo("zipfian") == 0) {
scanlength = new ZipfianGenerator(1, maxscanlength);
} else {
throw new WorkloadException(
"Distribution \"" + scanlengthdistrib + "\" not allowed for scan length");
}
randomizeTimestampOrder = Boolean.parseBoolean(p.getProperty(
RANDOMIZE_TIMESTAMP_ORDER_PROPERTY,
RANDOMIZE_TIMESTAMP_ORDER_PROPERTY_DEFAULT));
randomizeTimeseriesOrder = Boolean.parseBoolean(p.getProperty(
RANDOMIZE_TIMESERIES_ORDER_PROPERTY,
RANDOMIZE_TIMESERIES_ORDER_PROPERTY_DEFAULT));
// setup the cardinality
numKeys = Integer.parseInt(p.getProperty(CoreWorkload.FIELD_COUNT_PROPERTY,
CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT));
tagPairs = Integer.parseInt(p.getProperty(TAG_COUNT_PROPERTY,
TAG_COUNT_PROPERTY_DEFAULT));
sparsity = Double.parseDouble(p.getProperty(SPARSITY_PROPERTY, SPARSITY_PROPERTY_DEFAULT));
tagCardinality = new int[tagPairs];
final String requestdistrib =
p.getProperty(CoreWorkload.REQUEST_DISTRIBUTION_PROPERTY,
CoreWorkload.REQUEST_DISTRIBUTION_PROPERTY_DEFAULT);
if (requestdistrib.compareTo("uniform") == 0) {
keychooser = new UniformLongGenerator(0, numKeys - 1);
} else if (requestdistrib.compareTo("sequential") == 0) {
keychooser = new SequentialGenerator(0, numKeys - 1);
} else if (requestdistrib.compareTo("zipfian") == 0) {
keychooser = new ScrambledZipfianGenerator(0, numKeys - 1);
//} else if (requestdistrib.compareTo("latest") == 0) {
// keychooser = new SkewedLatestGenerator(transactioninsertkeysequence);
} else if (requestdistrib.equals("hotspot")) {
double hotsetfraction =
Double.parseDouble(p.getProperty(CoreWorkload.HOTSPOT_DATA_FRACTION,
CoreWorkload.HOTSPOT_DATA_FRACTION_DEFAULT));
double hotopnfraction =
Double.parseDouble(p.getProperty(CoreWorkload.HOTSPOT_OPN_FRACTION,
CoreWorkload.HOTSPOT_OPN_FRACTION_DEFAULT));
keychooser = new HotspotIntegerGenerator(0, numKeys - 1,
hotsetfraction, hotopnfraction);
} else {
throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\"");
}
// figure out the start timestamp based on the units, cardinality and interval
try {
timestampInterval = Integer.parseInt(p.getProperty(
TIMESTAMP_INTERVAL_PROPERTY, TIMESTAMP_INTERVAL_PROPERTY_DEFAULT));
} catch (NumberFormatException nfe) {
throw new WorkloadException("Unable to parse the " +
TIMESTAMP_INTERVAL_PROPERTY, nfe);
}
try {
timeUnits = TimeUnit.valueOf(p.getProperty(TIMESTAMP_UNITS_PROPERTY,
TIMESTAMP_UNITS_PROPERTY_DEFAULT).toUpperCase());
} catch (IllegalArgumentException e) {
throw new WorkloadException("Unknown time unit type", e);
}
if (timeUnits == TimeUnit.NANOSECONDS || timeUnits == TimeUnit.MICROSECONDS) {
throw new WorkloadException("YCSB doesn't support " + timeUnits +
" at this time.");
}
tagPairDelimiter = p.getProperty(PAIR_DELIMITER_PROPERTY, PAIR_DELIMITER_PROPERTY_DEFAULT);
deleteDelimiter = p.getProperty(DELETE_DELIMITER_PROPERTY, DELETE_DELIMITER_PROPERTY_DEFAULT);
dataintegrity = Boolean.parseBoolean(
p.getProperty(CoreWorkload.DATA_INTEGRITY_PROPERTY,
CoreWorkload.DATA_INTEGRITY_PROPERTY_DEFAULT));
if (dataintegrity) {
System.out.println("Data integrity is enabled.");
}
queryTimeSpan = Integer.parseInt(p.getProperty(QUERY_TIMESPAN_PROPERTY,
QUERY_TIMESPAN_PROPERTY_DEFAULT));
queryRandomTimeSpan = Boolean.parseBoolean(p.getProperty(QUERY_RANDOM_TIMESPAN_PROPERTY,
QUERY_RANDOM_TIMESPAN_PROPERTY_DEFAULT));
queryTimeSpanDelimiter = p.getProperty(QUERY_TIMESPAN_DELIMITER_PROPERTY,
QUERY_TIMESPAN_DELIMITER_PROPERTY_DEFAULT);
groupByKey = p.getProperty(GROUPBY_KEY_PROPERTY, GROUPBY_KEY_PROPERTY_DEFAULT);
groupByFunction = p.getProperty(GROUPBY_PROPERTY);
if (groupByFunction != null && !groupByFunction.isEmpty()) {
final String groupByKeys = p.getProperty(GROUPBY_KEYS_PROPERTY);
if (groupByKeys == null || groupByKeys.isEmpty()) {
throw new WorkloadException("Group by was enabled but no keys were specified.");
}
final String[] gbKeys = groupByKeys.split(",");
if (gbKeys.length != tagKeys.length) {
throw new WorkloadException("Only " + gbKeys.length + " group by keys "
+ "were specified but there were " + tagKeys.length + " tag keys given.");
}
groupBys = new boolean[gbKeys.length];
for (int i = 0; i < gbKeys.length; i++) {
groupBys[i] = Integer.parseInt(gbKeys[i].trim()) == 0 ? false : true;
}
groupBy = true;
}
downsampleKey = p.getProperty(DOWNSAMPLING_KEY_PROPERTY, DOWNSAMPLING_KEY_PROPERTY_DEFAULT);
downsampleFunction = p.getProperty(DOWNSAMPLING_FUNCTION_PROPERTY);
if (downsampleFunction != null && !downsampleFunction.isEmpty()) {
final String interval = p.getProperty(DOWNSAMPLING_INTERVAL_PROPERTY);
if (interval == null || interval.isEmpty()) {
throw new WorkloadException("'" + DOWNSAMPLING_INTERVAL_PROPERTY + "' was missing despite '"
+ DOWNSAMPLING_FUNCTION_PROPERTY + "' being set.");
}
downsampleInterval = Integer.parseInt(interval);
downsample = true;
}
delayedSeries = Double.parseDouble(p.getProperty(DELAYED_SERIES_PROPERTY, DELAYED_SERIES_PROPERTY_DEFAULT));
delayedIntervals = Integer.parseInt(p.getProperty(DELAYED_INTERVALS_PROPERTY, DELAYED_INTERVALS_PROPERTY_DEFAULT));
valueType = ValueType.fromString(p.getProperty(VALUE_TYPE_PROPERTY, VALUE_TYPE_PROPERTY_DEFAULT));
table = p.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT);
initKeysAndTags();
validateSettings();
}
@Override
public Object initThread(Properties p, int mythreadid, int threadcount) throws WorkloadException {<FILL_FUNCTION_BODY>}
@Override
public boolean doInsert(DB db, Object threadstate) {
if (threadstate == null) {
throw new IllegalStateException("Missing thread state.");
}
final Map<String, ByteIterator> tags = new TreeMap<String, ByteIterator>();
final String key = ((ThreadState)threadstate).nextDataPoint(tags, true);
if (db.insert(table, key, tags) == Status.OK) {
return true;
}
return false;
}
@Override
public boolean doTransaction(DB db, Object threadstate) {
if (threadstate == null) {
throw new IllegalStateException("Missing thread state.");
}
switch (operationchooser.nextString()) {
case "READ":
doTransactionRead(db, threadstate);
break;
case "UPDATE":
doTransactionUpdate(db, threadstate);
break;
case "INSERT":
doTransactionInsert(db, threadstate);
break;
case "SCAN":
doTransactionScan(db, threadstate);
break;
case "DELETE":
doTransactionDelete(db, threadstate);
break;
default:
return false;
}
return true;
}
protected void doTransactionRead(final DB db, Object threadstate) {
final ThreadState state = (ThreadState) threadstate;
final String keyname = keys[keychooser.nextValue().intValue()];
final Random random = ThreadLocalRandom.current();
int offsets = state.queryOffsetGenerator.nextValue().intValue();
//int offsets = random.nextInt(maxOffsets - 1);
final long startTimestamp;
if (offsets > 0) {
startTimestamp = state.startTimestamp + state.timestampGenerator.getOffset(offsets);
} else {
startTimestamp = state.startTimestamp;
}
// rando tags
Set<String> fields = new HashSet<String>();
for (int i = 0; i < tagPairs; ++i) {
if (groupBy && groupBys[i]) {
fields.add(tagKeys[i]);
} else {
fields.add(tagKeys[i] + tagPairDelimiter +
tagValues[random.nextInt(tagCardinality[i])]);
}
}
if (queryTimeSpan > 0) {
final long endTimestamp;
if (queryRandomTimeSpan) {
endTimestamp = startTimestamp + (timestampInterval * random.nextInt(queryTimeSpan / timestampInterval));
} else {
endTimestamp = startTimestamp + queryTimeSpan;
}
fields.add(timestampKey + tagPairDelimiter + startTimestamp + queryTimeSpanDelimiter + endTimestamp);
} else {
fields.add(timestampKey + tagPairDelimiter + startTimestamp);
}
if (groupBy) {
fields.add(groupByKey + tagPairDelimiter + groupByFunction);
}
if (downsample) {
fields.add(downsampleKey + tagPairDelimiter + downsampleFunction + downsampleInterval);
}
final Map<String, ByteIterator> cells = new HashMap<String, ByteIterator>();
final Status status = db.read(table, keyname, fields, cells);
if (dataintegrity && status == Status.OK) {
verifyRow(keyname, cells);
}
}
protected void doTransactionUpdate(final DB db, Object threadstate) {
if (threadstate == null) {
throw new IllegalStateException("Missing thread state.");
}
final Map<String, ByteIterator> tags = new TreeMap<String, ByteIterator>();
final String key = ((ThreadState)threadstate).nextDataPoint(tags, false);
db.update(table, key, tags);
}
protected void doTransactionInsert(final DB db, Object threadstate) {
doInsert(db, threadstate);
}
protected void doTransactionScan(final DB db, Object threadstate) {
final ThreadState state = (ThreadState) threadstate;
final Random random = ThreadLocalRandom.current();
final String keyname = keys[random.nextInt(keys.length)];
// choose a random scan length
int len = scanlength.nextValue().intValue();
int offsets = random.nextInt(maxOffsets - 1);
final long startTimestamp;
if (offsets > 0) {
startTimestamp = state.startTimestamp + state.timestampGenerator.getOffset(offsets);
} else {
startTimestamp = state.startTimestamp;
}
// rando tags
Set<String> fields = new HashSet<String>();
for (int i = 0; i < tagPairs; ++i) {
if (groupBy && groupBys[i]) {
fields.add(tagKeys[i]);
} else {
fields.add(tagKeys[i] + tagPairDelimiter +
tagValues[random.nextInt(tagCardinality[i])]);
}
}
if (queryTimeSpan > 0) {
final long endTimestamp;
if (queryRandomTimeSpan) {
endTimestamp = startTimestamp + (timestampInterval * random.nextInt(queryTimeSpan / timestampInterval));
} else {
endTimestamp = startTimestamp + queryTimeSpan;
}
fields.add(timestampKey + tagPairDelimiter + startTimestamp + queryTimeSpanDelimiter + endTimestamp);
} else {
fields.add(timestampKey + tagPairDelimiter + startTimestamp);
}
if (groupBy) {
fields.add(groupByKey + tagPairDelimiter + groupByFunction);
}
if (downsample) {
fields.add(downsampleKey + tagPairDelimiter + downsampleFunction + tagPairDelimiter + downsampleInterval);
}
final Vector<HashMap<String, ByteIterator>> results = new Vector<HashMap<String, ByteIterator>>();
db.scan(table, keyname, len, fields, results);
}
protected void doTransactionDelete(final DB db, Object threadstate) {
final ThreadState state = (ThreadState) threadstate;
final Random random = ThreadLocalRandom.current();
final StringBuilder buf = new StringBuilder().append(keys[random.nextInt(keys.length)]);
int offsets = random.nextInt(maxOffsets - 1);
final long startTimestamp;
if (offsets > 0) {
startTimestamp = state.startTimestamp + state.timestampGenerator.getOffset(offsets);
} else {
startTimestamp = state.startTimestamp;
}
// rando tags
for (int i = 0; i < tagPairs; ++i) {
if (groupBy && groupBys[i]) {
buf.append(deleteDelimiter)
.append(tagKeys[i]);
} else {
buf.append(deleteDelimiter).append(tagKeys[i] + tagPairDelimiter +
tagValues[random.nextInt(tagCardinality[i])]);
}
}
if (queryTimeSpan > 0) {
final long endTimestamp;
if (queryRandomTimeSpan) {
endTimestamp = startTimestamp + (timestampInterval * random.nextInt(queryTimeSpan / timestampInterval));
} else {
endTimestamp = startTimestamp + queryTimeSpan;
}
buf.append(deleteDelimiter)
.append(timestampKey + tagPairDelimiter + startTimestamp + queryTimeSpanDelimiter + endTimestamp);
} else {
buf.append(deleteDelimiter)
.append(timestampKey + tagPairDelimiter + startTimestamp);
}
db.delete(table, buf.toString());
}
/**
* Parses the values returned by a read or scan operation and determines whether
* or not the integer value matches the hash and timestamp of the original timestamp.
* Only works for raw data points, will not work for group-by's or downsampled data.
* @param key The time series key.
* @param cells The cells read by the DB.
* @return {@link Status#OK} if the data matched or {@link Status#UNEXPECTED_STATE} if
* the data did not match.
*/
protected Status verifyRow(final String key, final Map<String, ByteIterator> cells) {
Status verifyStatus = Status.UNEXPECTED_STATE;
long startTime = System.nanoTime();
double value = 0;
long timestamp = 0;
final TreeMap<String, String> validationTags = new TreeMap<String, String>();
for (final Entry<String, ByteIterator> entry : cells.entrySet()) {
if (entry.getKey().equals(timestampKey)) {
final NumericByteIterator it = (NumericByteIterator) entry.getValue();
timestamp = it.getLong();
} else if (entry.getKey().equals(valueKey)) {
final NumericByteIterator it = (NumericByteIterator) entry.getValue();
value = it.isFloatingPoint() ? it.getDouble() : it.getLong();
} else {
validationTags.put(entry.getKey(), entry.getValue().toString());
}
}
if (validationFunction(key, timestamp, validationTags) == value) {
verifyStatus = Status.OK;
}
long endTime = System.nanoTime();
measurements.measure("VERIFY", (int) (endTime - startTime) / 1000);
measurements.reportStatus("VERIFY", verifyStatus);
return verifyStatus;
}
/**
* Function used for generating a deterministic hash based on the combination
* of metric, tags and timestamp.
* @param key A non-null string representing the key.
* @param timestamp A timestamp in the proper units for the workload.
* @param tags A non-null map of tag keys and values NOT including the YCSB
* key or timestamp.
* @return A hash value as an 8 byte integer.
*/
protected long validationFunction(final String key, final long timestamp,
final TreeMap<String, String> tags) {
final StringBuilder validationBuffer = new StringBuilder(keys[0].length() +
(tagPairs * tagKeys[0].length()) + (tagPairs * tagCardinality[1]));
for (final Entry<String, String> pair : tags.entrySet()) {
validationBuffer.append(pair.getKey()).append(pair.getValue());
}
return (long) validationBuffer.toString().hashCode() ^ timestamp;
}
/**
* Breaks out the keys, tags and cardinality initialization in another method
* to keep CheckStyle happy.
* @throws WorkloadException If something goes pear shaped.
*/
protected void initKeysAndTags() throws WorkloadException {
final int keyLength = Integer.parseInt(properties.getProperty(
CoreWorkload.FIELD_LENGTH_PROPERTY,
CoreWorkload.FIELD_LENGTH_PROPERTY_DEFAULT));
final int tagKeyLength = Integer.parseInt(properties.getProperty(
TAG_KEY_LENGTH_PROPERTY, TAG_KEY_LENGTH_PROPERTY_DEFAULT));
final int tagValueLength = Integer.parseInt(properties.getProperty(
TAG_VALUE_LENGTH_PROPERTY, TAG_VALUE_LENGTH_PROPERTY_DEFAULT));
keyGenerator = new IncrementingPrintableStringGenerator(keyLength);
tagKeyGenerator = new IncrementingPrintableStringGenerator(tagKeyLength);
tagValueGenerator = new IncrementingPrintableStringGenerator(tagValueLength);
final int threads = Integer.parseInt(properties.getProperty(Client.THREAD_COUNT_PROPERTY, "1"));
final String tagCardinalityString = properties.getProperty(
TAG_CARDINALITY_PROPERTY,
TAG_CARDINALITY_PROPERTY_DEFAULT);
final String[] tagCardinalityParts = tagCardinalityString.split(",");
int idx = 0;
totalCardinality = numKeys;
perKeyCardinality = 1;
int maxCardinality = 0;
for (final String card : tagCardinalityParts) {
try {
tagCardinality[idx] = Integer.parseInt(card.trim());
} catch (NumberFormatException nfe) {
throw new WorkloadException("Unable to parse cardinality: " +
card, nfe);
}
if (tagCardinality[idx] < 1) {
throw new WorkloadException("Cardinality must be greater than zero: " +
tagCardinality[idx]);
}
totalCardinality *= tagCardinality[idx];
perKeyCardinality *= tagCardinality[idx];
if (tagCardinality[idx] > maxCardinality) {
maxCardinality = tagCardinality[idx];
}
++idx;
if (idx >= tagPairs) {
// we have more cardinalities than tag keys so bail at this point.
break;
}
}
if (numKeys < threads) {
throw new WorkloadException("Field count " + numKeys + " (keys for time "
+ "series workloads) must be greater or equal to the number of "
+ "threads " + threads);
}
// fill tags without explicit cardinality with 1
if (idx < tagPairs) {
tagCardinality[idx++] = 1;
}
for (int i = 0; i < tagCardinality.length; ++i) {
if (tagCardinality[i] > 1) {
firstIncrementableCardinality = i;
break;
}
}
keys = new String[numKeys];
tagKeys = new String[tagPairs];
tagValues = new String[maxCardinality];
for (int i = 0; i < numKeys; ++i) {
keys[i] = keyGenerator.nextString();
}
for (int i = 0; i < tagPairs; ++i) {
tagKeys[i] = tagKeyGenerator.nextString();
}
for (int i = 0; i < maxCardinality; i++) {
tagValues[i] = tagValueGenerator.nextString();
}
if (randomizeTimeseriesOrder) {
Utils.shuffleArray(keys);
Utils.shuffleArray(tagValues);
}
maxOffsets = (recordcount / totalCardinality) + 1;
final int[] keyAndTagCardinality = new int[tagPairs + 1];
keyAndTagCardinality[0] = numKeys;
for (int i = 0; i < tagPairs; i++) {
keyAndTagCardinality[i + 1] = tagCardinality[i];
}
cumulativeCardinality = new int[keyAndTagCardinality.length];
for (int i = 0; i < keyAndTagCardinality.length; i++) {
int cumulation = 1;
for (int x = i; x <= keyAndTagCardinality.length - 1; x++) {
cumulation *= keyAndTagCardinality[x];
}
if (i > 0) {
cumulativeCardinality[i - 1] = cumulation;
}
}
cumulativeCardinality[cumulativeCardinality.length - 1] = 1;
}
/**
* Makes sure the settings as given are compatible.
* @throws WorkloadException If one or more settings were invalid.
*/
protected void validateSettings() throws WorkloadException {
if (dataintegrity) {
if (valueType != ValueType.INTEGERS) {
throw new WorkloadException("Data integrity was enabled. 'valuetype' must "
+ "be set to 'integers'.");
}
if (groupBy) {
throw new WorkloadException("Data integrity was enabled. 'groupbyfunction' must "
+ "be empty or null.");
}
if (downsample) {
throw new WorkloadException("Data integrity was enabled. 'downsamplingfunction' must "
+ "be empty or null.");
}
if (queryTimeSpan > 0) {
throw new WorkloadException("Data integrity was enabled. 'querytimespan' must "
+ "be empty or 0.");
}
if (randomizeTimeseriesOrder) {
throw new WorkloadException("Data integrity was enabled. 'randomizetimeseriesorder' must "
+ "be false.");
}
final String startTimestamp = properties.getProperty(CoreWorkload.INSERT_START_PROPERTY);
if (startTimestamp == null || startTimestamp.isEmpty()) {
throw new WorkloadException("Data integrity was enabled. 'insertstart' must "
+ "be set to a Unix Epoch timestamp.");
}
}
}
/**
* Thread state class holding thread local generators and indices.
*/
protected class ThreadState {
/** The timestamp generator for this thread. */
protected final UnixEpochTimestampGenerator timestampGenerator;
/** An offset generator to select a random offset for queries. */
protected final NumberGenerator queryOffsetGenerator;
/** The current write key index. */
protected int keyIdx;
/** The starting fence for writing keys. */
protected int keyIdxStart;
/** The ending fence for writing keys. */
protected int keyIdxEnd;
/** Indices for each tag value for writes. */
protected int[] tagValueIdxs;
/** Whether or not all time series have written values for the current timestamp. */
protected boolean rollover;
/** The starting timestamp. */
protected long startTimestamp;
/**
* Default ctor.
* @param threadID The zero based thread ID.
* @param threadCount The total number of threads.
* @throws WorkloadException If something went pear shaped.
*/
protected ThreadState(final int threadID, final int threadCount) throws WorkloadException {
int totalThreads = threadCount > 0 ? threadCount : 1;
if (threadID >= totalThreads) {
throw new IllegalStateException("Thread ID " + threadID + " cannot be greater "
+ "than or equal than the thread count " + totalThreads);
}
if (keys.length < threadCount) {
throw new WorkloadException("Thread count " + totalThreads + " must be greater "
+ "than or equal to key count " + keys.length);
}
int keysPerThread = keys.length / totalThreads;
keyIdx = keysPerThread * threadID;
keyIdxStart = keyIdx;
if (totalThreads - 1 == threadID) {
keyIdxEnd = keys.length;
} else {
keyIdxEnd = keyIdxStart + keysPerThread;
}
tagValueIdxs = new int[tagPairs]; // all zeros
final String startingTimestamp =
properties.getProperty(CoreWorkload.INSERT_START_PROPERTY);
if (startingTimestamp == null || startingTimestamp.isEmpty()) {
timestampGenerator = randomizeTimestampOrder ?
new RandomDiscreteTimestampGenerator(timestampInterval, timeUnits, maxOffsets) :
new UnixEpochTimestampGenerator(timestampInterval, timeUnits);
} else {
try {
timestampGenerator = randomizeTimestampOrder ?
new RandomDiscreteTimestampGenerator(timestampInterval, timeUnits,
Long.parseLong(startingTimestamp), maxOffsets) :
new UnixEpochTimestampGenerator(timestampInterval, timeUnits,
Long.parseLong(startingTimestamp));
} catch (NumberFormatException nfe) {
throw new WorkloadException("Unable to parse the " +
CoreWorkload.INSERT_START_PROPERTY, nfe);
}
}
// Set the last value properly for the timestamp, otherwise it may start
// one interval ago.
startTimestamp = timestampGenerator.nextValue();
// TODO - pick it
queryOffsetGenerator = new UniformLongGenerator(0, maxOffsets - 2);
}
/**
* Generates the next write value for thread.
* @param map An initialized map to populate with tag keys and values as well
* as the timestamp and actual value.
* @param isInsert Whether or not it's an insert or an update. Updates will pick
* an older timestamp (if random isn't enabled).
* @return The next key to write.
*/
protected String nextDataPoint(final Map<String, ByteIterator> map, final boolean isInsert) {
final Random random = ThreadLocalRandom.current();
int iterations = sparsity <= 0 ? 1 : random.nextInt((int) ((double) perKeyCardinality * sparsity));
if (iterations < 1) {
iterations = 1;
}
while (true) {
iterations--;
if (rollover) {
timestampGenerator.nextValue();
rollover = false;
}
String key = null;
if (iterations <= 0) {
final TreeMap<String, String> validationTags;
if (dataintegrity) {
validationTags = new TreeMap<String, String>();
} else {
validationTags = null;
}
key = keys[keyIdx];
int overallIdx = keyIdx * cumulativeCardinality[0];
for (int i = 0; i < tagPairs; ++i) {
int tvidx = tagValueIdxs[i];
map.put(tagKeys[i], new StringByteIterator(tagValues[tvidx]));
if (dataintegrity) {
validationTags.put(tagKeys[i], tagValues[tvidx]);
}
if (delayedSeries > 0) {
overallIdx += (tvidx * cumulativeCardinality[i + 1]);
}
}
if (!isInsert) {
final long delta = (timestampGenerator.currentValue() - startTimestamp) / timestampInterval;
final int intervals = random.nextInt((int) delta);
map.put(timestampKey, new NumericByteIterator(startTimestamp + (intervals * timestampInterval)));
} else if (delayedSeries > 0) {
// See if the series falls in a delay bucket and calculate an offset earlier
// than the current timestamp value if so.
double pct = (double) overallIdx / (double) totalCardinality;
if (pct < delayedSeries) {
int modulo = overallIdx % delayedIntervals;
if (modulo < 0) {
modulo *= -1;
}
map.put(timestampKey, new NumericByteIterator(timestampGenerator.currentValue() -
timestampInterval * modulo));
} else {
map.put(timestampKey, new NumericByteIterator(timestampGenerator.currentValue()));
}
} else {
map.put(timestampKey, new NumericByteIterator(timestampGenerator.currentValue()));
}
if (dataintegrity) {
map.put(valueKey, new NumericByteIterator(validationFunction(key,
timestampGenerator.currentValue(), validationTags)));
} else {
switch (valueType) {
case INTEGERS:
map.put(valueKey, new NumericByteIterator(random.nextInt()));
break;
case FLOATS:
map.put(valueKey, new NumericByteIterator(random.nextDouble() * (double) 100000));
break;
case MIXED:
if (random.nextBoolean()) {
map.put(valueKey, new NumericByteIterator(random.nextInt()));
} else {
map.put(valueKey, new NumericByteIterator(random.nextDouble() * (double) 100000));
}
break;
default:
throw new IllegalStateException("Somehow we didn't have a value "
+ "type configured that we support: " + valueType);
}
}
}
boolean tagRollover = false;
for (int i = tagCardinality.length - 1; i >= 0; --i) {
if (tagCardinality[i] <= 1) {
tagRollover = true; // Only one tag so needs roll over.
continue;
}
if (tagValueIdxs[i] + 1 >= tagCardinality[i]) {
tagValueIdxs[i] = 0;
if (i == firstIncrementableCardinality) {
tagRollover = true;
}
} else {
++tagValueIdxs[i];
break;
}
}
if (tagRollover) {
if (keyIdx + 1 >= keyIdxEnd) {
keyIdx = keyIdxStart;
rollover = true;
} else {
++keyIdx;
}
}
if (iterations <= 0) {
return key;
}
}
}
}
} |
if (properties == null) {
throw new WorkloadException("Workload has not been initialized.");
}
return new ThreadState(mythreadid, threadcount);
|