proj_name
stringclasses
110 values
relative_path
stringlengths
40
228
class_name
stringlengths
1
68
func_name
stringlengths
1
98
masked_class
stringlengths
58
2.52M
func_body
stringlengths
0
166k
brianfrankcooper_YCSB
YCSB/accumulo1.9/src/main/java/site/ycsb/db/accumulo/AccumuloClient.java
AccumuloClient
run
class AccumuloClient extends DB { private ZooKeeperInstance inst; private Connector connector; private Text colFam = new Text(""); private byte[] colFamBytes = new byte[0]; private final ConcurrentHashMap<String, BatchWriter> writers = new ConcurrentHashMap<>(); static { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() {<FILL_FUNCTION_BODY>} }); } @Override public void init() throws DBException { colFam = new Text(getProperties().getProperty("accumulo.columnFamily")); colFamBytes = colFam.toString().getBytes(UTF_8); inst = new ZooKeeperInstance(new ClientConfiguration() .withInstance(getProperties().getProperty("accumulo.instanceName")) .withZkHosts(getProperties().getProperty("accumulo.zooKeepers"))); try { String principal = getProperties().getProperty("accumulo.username"); AuthenticationToken token = new PasswordToken(getProperties().getProperty("accumulo.password")); connector = inst.getConnector(principal, token); } catch (AccumuloException | AccumuloSecurityException e) { throw new DBException(e); } if (!(getProperties().getProperty("accumulo.pcFlag", "none").equals("none"))) { System.err.println("Sorry, the ZK based producer/consumer implementation has been removed. " + "Please see YCSB issue #416 for work on adding a general solution to coordinated work."); } } @Override public void cleanup() throws DBException { try { Iterator<BatchWriter> iterator = writers.values().iterator(); while (iterator.hasNext()) { BatchWriter writer = iterator.next(); writer.close(); iterator.remove(); } } catch (MutationsRejectedException e) { throw new DBException(e); } } /** * Called when the user specifies a table that isn't the same as the existing * table. Connect to it and if necessary, close our current connection. * * @param table * The table to open. */ public BatchWriter getWriter(String table) throws TableNotFoundException { // tl;dr We're paying a cost for the ConcurrentHashMap here to deal with the DB api. // We know that YCSB is really only ever going to send us data for one table, so using // a concurrent data structure is overkill (especially in such a hot code path). // However, the impact seems to be relatively negligible in trivial local tests and it's // "more correct" WRT to the API. BatchWriter writer = writers.get(table); if (null == writer) { BatchWriter newWriter = createBatchWriter(table); BatchWriter oldWriter = writers.putIfAbsent(table, newWriter); // Someone beat us to creating a BatchWriter for this table, use their BatchWriters if (null != oldWriter) { try { // Make sure to clean up our new batchwriter! newWriter.close(); } catch (MutationsRejectedException e) { throw new RuntimeException(e); } writer = oldWriter; } else { writer = newWriter; } } return writer; } /** * Creates a BatchWriter with the expected configuration. * * @param table The table to write to */ private BatchWriter createBatchWriter(String table) throws TableNotFoundException { BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency( Long.parseLong(getProperties() .getProperty("accumulo.batchWriterMaxLatency", "30000")), TimeUnit.MILLISECONDS); bwc.setMaxMemory(Long.parseLong( getProperties().getProperty("accumulo.batchWriterSize", "100000"))); final String numThreadsValue = getProperties().getProperty("accumulo.batchWriterThreads"); // Try to saturate the client machine. int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2); if (null != numThreadsValue) { numThreads = Integer.parseInt(numThreadsValue); } System.err.println("Using " + numThreads + " threads to write data"); bwc.setMaxWriteThreads(numThreads); return connector.createBatchWriter(table, bwc); } /** * Gets a scanner from Accumulo over one row. * * @param row the row to scan * @param fields the set of columns to scan * @return an Accumulo {@link Scanner} bound to the given row and columns */ private Scanner getRow(String table, Text row, Set<String> fields) throws TableNotFoundException { Scanner scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(row)); if (fields != null) { for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } return scanner; } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Scanner scanner = null; try { scanner = getRow(table, new Text(key), null); // Pick out the results we care about. final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { entry.getKey().getColumnQualifier(cq); Value v = entry.getValue(); byte[] buf = v.get(); result.put(cq.toString(), new ByteArrayByteIterator(buf)); } } catch (Exception e) { System.err.println("Error trying to reading Accumulo table " + table + " " + key); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { // Just make the end 'infinity' and only read as much as we need. Scanner scanner = null; try { scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(new Text(startkey), null)); // Have Accumulo send us complete rows, serialized in a single Key-Value pair IteratorSetting cfg = new IteratorSetting(100, WholeRowIterator.class); scanner.addScanIterator(cfg); // If no fields are provided, we assume one column/row. if (fields != null) { // And add each of them as fields we want. for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } int count = 0; for (Entry<Key, Value> entry : scanner) { // Deserialize the row SortedMap<Key, Value> row = WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()); HashMap<String, ByteIterator> rowData; if (null != fields) { rowData = new HashMap<>(fields.size()); } else { rowData = new HashMap<>(); } result.add(rowData); // Parse the data in the row, avoid unnecessary Text object creation final Text cq = new Text(); for (Entry<Key, Value> rowEntry : row.entrySet()) { rowEntry.getKey().getColumnQualifier(cq); rowData.put(cq.toString(), new ByteArrayByteIterator(rowEntry.getValue().get())); } if (count++ == recordcount) { // Done reading the last row. break; } } } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } catch (IOException e) { System.err.println("Error deserializing data from Accumulo."); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { BatchWriter bw = null; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error opening batch writer to Accumulo table " + table); e.printStackTrace(); return Status.ERROR; } Mutation mutInsert = new Mutation(key.getBytes(UTF_8)); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { mutInsert.put(colFamBytes, entry.getKey().getBytes(UTF_8), entry.getValue().toArray()); } try { bw.addMutation(mutInsert); } catch (MutationsRejectedException e) { System.err.println("Error performing update."); e.printStackTrace(); return Status.ERROR; } return Status.BATCHED_OK; } @Override public Status insert(String t, String key, Map<String, ByteIterator> values) { return update(t, key, values); } @Override public Status delete(String table, String key) { BatchWriter bw; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } try { deleteRow(table, new Text(key), bw); } catch (TableNotFoundException | MutationsRejectedException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } catch (RuntimeException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } return Status.OK; } // These functions are adapted from RowOperations.java: private void deleteRow(String table, Text row, BatchWriter bw) throws MutationsRejectedException, TableNotFoundException { // TODO Use a batchDeleter instead deleteRow(getRow(table, row, null), bw); } /** * Deletes a row, given a Scanner of JUST that row. */ private void deleteRow(Scanner scanner, BatchWriter bw) throws MutationsRejectedException { Mutation deleter = null; // iterate through the keys final Text row = new Text(); final Text cf = new Text(); final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { // create a mutation for the row if (deleter == null) { entry.getKey().getRow(row); deleter = new Mutation(row); } entry.getKey().getColumnFamily(cf); entry.getKey().getColumnQualifier(cq); // the remove function adds the key with the delete flag set to true deleter.putDelete(cf, cq); } bw.addMutation(deleter); } }
CleanUp.shutdownNow();
brianfrankcooper_YCSB
YCSB/accumulo1.9/src/main/java/site/ycsb/db/accumulo/AccumuloClient.java
AccumuloClient
init
class AccumuloClient extends DB { private ZooKeeperInstance inst; private Connector connector; private Text colFam = new Text(""); private byte[] colFamBytes = new byte[0]; private final ConcurrentHashMap<String, BatchWriter> writers = new ConcurrentHashMap<>(); static { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { CleanUp.shutdownNow(); } }); } @Override public void init() throws DBException {<FILL_FUNCTION_BODY>} @Override public void cleanup() throws DBException { try { Iterator<BatchWriter> iterator = writers.values().iterator(); while (iterator.hasNext()) { BatchWriter writer = iterator.next(); writer.close(); iterator.remove(); } } catch (MutationsRejectedException e) { throw new DBException(e); } } /** * Called when the user specifies a table that isn't the same as the existing * table. Connect to it and if necessary, close our current connection. * * @param table * The table to open. */ public BatchWriter getWriter(String table) throws TableNotFoundException { // tl;dr We're paying a cost for the ConcurrentHashMap here to deal with the DB api. // We know that YCSB is really only ever going to send us data for one table, so using // a concurrent data structure is overkill (especially in such a hot code path). // However, the impact seems to be relatively negligible in trivial local tests and it's // "more correct" WRT to the API. BatchWriter writer = writers.get(table); if (null == writer) { BatchWriter newWriter = createBatchWriter(table); BatchWriter oldWriter = writers.putIfAbsent(table, newWriter); // Someone beat us to creating a BatchWriter for this table, use their BatchWriters if (null != oldWriter) { try { // Make sure to clean up our new batchwriter! newWriter.close(); } catch (MutationsRejectedException e) { throw new RuntimeException(e); } writer = oldWriter; } else { writer = newWriter; } } return writer; } /** * Creates a BatchWriter with the expected configuration. * * @param table The table to write to */ private BatchWriter createBatchWriter(String table) throws TableNotFoundException { BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency( Long.parseLong(getProperties() .getProperty("accumulo.batchWriterMaxLatency", "30000")), TimeUnit.MILLISECONDS); bwc.setMaxMemory(Long.parseLong( getProperties().getProperty("accumulo.batchWriterSize", "100000"))); final String numThreadsValue = getProperties().getProperty("accumulo.batchWriterThreads"); // Try to saturate the client machine. int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2); if (null != numThreadsValue) { numThreads = Integer.parseInt(numThreadsValue); } System.err.println("Using " + numThreads + " threads to write data"); bwc.setMaxWriteThreads(numThreads); return connector.createBatchWriter(table, bwc); } /** * Gets a scanner from Accumulo over one row. * * @param row the row to scan * @param fields the set of columns to scan * @return an Accumulo {@link Scanner} bound to the given row and columns */ private Scanner getRow(String table, Text row, Set<String> fields) throws TableNotFoundException { Scanner scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(row)); if (fields != null) { for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } return scanner; } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Scanner scanner = null; try { scanner = getRow(table, new Text(key), null); // Pick out the results we care about. final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { entry.getKey().getColumnQualifier(cq); Value v = entry.getValue(); byte[] buf = v.get(); result.put(cq.toString(), new ByteArrayByteIterator(buf)); } } catch (Exception e) { System.err.println("Error trying to reading Accumulo table " + table + " " + key); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { // Just make the end 'infinity' and only read as much as we need. Scanner scanner = null; try { scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(new Text(startkey), null)); // Have Accumulo send us complete rows, serialized in a single Key-Value pair IteratorSetting cfg = new IteratorSetting(100, WholeRowIterator.class); scanner.addScanIterator(cfg); // If no fields are provided, we assume one column/row. if (fields != null) { // And add each of them as fields we want. for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } int count = 0; for (Entry<Key, Value> entry : scanner) { // Deserialize the row SortedMap<Key, Value> row = WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()); HashMap<String, ByteIterator> rowData; if (null != fields) { rowData = new HashMap<>(fields.size()); } else { rowData = new HashMap<>(); } result.add(rowData); // Parse the data in the row, avoid unnecessary Text object creation final Text cq = new Text(); for (Entry<Key, Value> rowEntry : row.entrySet()) { rowEntry.getKey().getColumnQualifier(cq); rowData.put(cq.toString(), new ByteArrayByteIterator(rowEntry.getValue().get())); } if (count++ == recordcount) { // Done reading the last row. break; } } } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } catch (IOException e) { System.err.println("Error deserializing data from Accumulo."); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { BatchWriter bw = null; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error opening batch writer to Accumulo table " + table); e.printStackTrace(); return Status.ERROR; } Mutation mutInsert = new Mutation(key.getBytes(UTF_8)); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { mutInsert.put(colFamBytes, entry.getKey().getBytes(UTF_8), entry.getValue().toArray()); } try { bw.addMutation(mutInsert); } catch (MutationsRejectedException e) { System.err.println("Error performing update."); e.printStackTrace(); return Status.ERROR; } return Status.BATCHED_OK; } @Override public Status insert(String t, String key, Map<String, ByteIterator> values) { return update(t, key, values); } @Override public Status delete(String table, String key) { BatchWriter bw; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } try { deleteRow(table, new Text(key), bw); } catch (TableNotFoundException | MutationsRejectedException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } catch (RuntimeException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } return Status.OK; } // These functions are adapted from RowOperations.java: private void deleteRow(String table, Text row, BatchWriter bw) throws MutationsRejectedException, TableNotFoundException { // TODO Use a batchDeleter instead deleteRow(getRow(table, row, null), bw); } /** * Deletes a row, given a Scanner of JUST that row. */ private void deleteRow(Scanner scanner, BatchWriter bw) throws MutationsRejectedException { Mutation deleter = null; // iterate through the keys final Text row = new Text(); final Text cf = new Text(); final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { // create a mutation for the row if (deleter == null) { entry.getKey().getRow(row); deleter = new Mutation(row); } entry.getKey().getColumnFamily(cf); entry.getKey().getColumnQualifier(cq); // the remove function adds the key with the delete flag set to true deleter.putDelete(cf, cq); } bw.addMutation(deleter); } }
colFam = new Text(getProperties().getProperty("accumulo.columnFamily")); colFamBytes = colFam.toString().getBytes(UTF_8); inst = new ZooKeeperInstance(new ClientConfiguration() .withInstance(getProperties().getProperty("accumulo.instanceName")) .withZkHosts(getProperties().getProperty("accumulo.zooKeepers"))); try { String principal = getProperties().getProperty("accumulo.username"); AuthenticationToken token = new PasswordToken(getProperties().getProperty("accumulo.password")); connector = inst.getConnector(principal, token); } catch (AccumuloException | AccumuloSecurityException e) { throw new DBException(e); } if (!(getProperties().getProperty("accumulo.pcFlag", "none").equals("none"))) { System.err.println("Sorry, the ZK based producer/consumer implementation has been removed. " + "Please see YCSB issue #416 for work on adding a general solution to coordinated work."); }
brianfrankcooper_YCSB
YCSB/accumulo1.9/src/main/java/site/ycsb/db/accumulo/AccumuloClient.java
AccumuloClient
cleanup
class AccumuloClient extends DB { private ZooKeeperInstance inst; private Connector connector; private Text colFam = new Text(""); private byte[] colFamBytes = new byte[0]; private final ConcurrentHashMap<String, BatchWriter> writers = new ConcurrentHashMap<>(); static { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { CleanUp.shutdownNow(); } }); } @Override public void init() throws DBException { colFam = new Text(getProperties().getProperty("accumulo.columnFamily")); colFamBytes = colFam.toString().getBytes(UTF_8); inst = new ZooKeeperInstance(new ClientConfiguration() .withInstance(getProperties().getProperty("accumulo.instanceName")) .withZkHosts(getProperties().getProperty("accumulo.zooKeepers"))); try { String principal = getProperties().getProperty("accumulo.username"); AuthenticationToken token = new PasswordToken(getProperties().getProperty("accumulo.password")); connector = inst.getConnector(principal, token); } catch (AccumuloException | AccumuloSecurityException e) { throw new DBException(e); } if (!(getProperties().getProperty("accumulo.pcFlag", "none").equals("none"))) { System.err.println("Sorry, the ZK based producer/consumer implementation has been removed. " + "Please see YCSB issue #416 for work on adding a general solution to coordinated work."); } } @Override public void cleanup() throws DBException {<FILL_FUNCTION_BODY>} /** * Called when the user specifies a table that isn't the same as the existing * table. Connect to it and if necessary, close our current connection. * * @param table * The table to open. */ public BatchWriter getWriter(String table) throws TableNotFoundException { // tl;dr We're paying a cost for the ConcurrentHashMap here to deal with the DB api. // We know that YCSB is really only ever going to send us data for one table, so using // a concurrent data structure is overkill (especially in such a hot code path). // However, the impact seems to be relatively negligible in trivial local tests and it's // "more correct" WRT to the API. BatchWriter writer = writers.get(table); if (null == writer) { BatchWriter newWriter = createBatchWriter(table); BatchWriter oldWriter = writers.putIfAbsent(table, newWriter); // Someone beat us to creating a BatchWriter for this table, use their BatchWriters if (null != oldWriter) { try { // Make sure to clean up our new batchwriter! newWriter.close(); } catch (MutationsRejectedException e) { throw new RuntimeException(e); } writer = oldWriter; } else { writer = newWriter; } } return writer; } /** * Creates a BatchWriter with the expected configuration. * * @param table The table to write to */ private BatchWriter createBatchWriter(String table) throws TableNotFoundException { BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency( Long.parseLong(getProperties() .getProperty("accumulo.batchWriterMaxLatency", "30000")), TimeUnit.MILLISECONDS); bwc.setMaxMemory(Long.parseLong( getProperties().getProperty("accumulo.batchWriterSize", "100000"))); final String numThreadsValue = getProperties().getProperty("accumulo.batchWriterThreads"); // Try to saturate the client machine. int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2); if (null != numThreadsValue) { numThreads = Integer.parseInt(numThreadsValue); } System.err.println("Using " + numThreads + " threads to write data"); bwc.setMaxWriteThreads(numThreads); return connector.createBatchWriter(table, bwc); } /** * Gets a scanner from Accumulo over one row. * * @param row the row to scan * @param fields the set of columns to scan * @return an Accumulo {@link Scanner} bound to the given row and columns */ private Scanner getRow(String table, Text row, Set<String> fields) throws TableNotFoundException { Scanner scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(row)); if (fields != null) { for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } return scanner; } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Scanner scanner = null; try { scanner = getRow(table, new Text(key), null); // Pick out the results we care about. final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { entry.getKey().getColumnQualifier(cq); Value v = entry.getValue(); byte[] buf = v.get(); result.put(cq.toString(), new ByteArrayByteIterator(buf)); } } catch (Exception e) { System.err.println("Error trying to reading Accumulo table " + table + " " + key); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { // Just make the end 'infinity' and only read as much as we need. Scanner scanner = null; try { scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(new Text(startkey), null)); // Have Accumulo send us complete rows, serialized in a single Key-Value pair IteratorSetting cfg = new IteratorSetting(100, WholeRowIterator.class); scanner.addScanIterator(cfg); // If no fields are provided, we assume one column/row. if (fields != null) { // And add each of them as fields we want. for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } int count = 0; for (Entry<Key, Value> entry : scanner) { // Deserialize the row SortedMap<Key, Value> row = WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()); HashMap<String, ByteIterator> rowData; if (null != fields) { rowData = new HashMap<>(fields.size()); } else { rowData = new HashMap<>(); } result.add(rowData); // Parse the data in the row, avoid unnecessary Text object creation final Text cq = new Text(); for (Entry<Key, Value> rowEntry : row.entrySet()) { rowEntry.getKey().getColumnQualifier(cq); rowData.put(cq.toString(), new ByteArrayByteIterator(rowEntry.getValue().get())); } if (count++ == recordcount) { // Done reading the last row. break; } } } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } catch (IOException e) { System.err.println("Error deserializing data from Accumulo."); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { BatchWriter bw = null; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error opening batch writer to Accumulo table " + table); e.printStackTrace(); return Status.ERROR; } Mutation mutInsert = new Mutation(key.getBytes(UTF_8)); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { mutInsert.put(colFamBytes, entry.getKey().getBytes(UTF_8), entry.getValue().toArray()); } try { bw.addMutation(mutInsert); } catch (MutationsRejectedException e) { System.err.println("Error performing update."); e.printStackTrace(); return Status.ERROR; } return Status.BATCHED_OK; } @Override public Status insert(String t, String key, Map<String, ByteIterator> values) { return update(t, key, values); } @Override public Status delete(String table, String key) { BatchWriter bw; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } try { deleteRow(table, new Text(key), bw); } catch (TableNotFoundException | MutationsRejectedException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } catch (RuntimeException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } return Status.OK; } // These functions are adapted from RowOperations.java: private void deleteRow(String table, Text row, BatchWriter bw) throws MutationsRejectedException, TableNotFoundException { // TODO Use a batchDeleter instead deleteRow(getRow(table, row, null), bw); } /** * Deletes a row, given a Scanner of JUST that row. */ private void deleteRow(Scanner scanner, BatchWriter bw) throws MutationsRejectedException { Mutation deleter = null; // iterate through the keys final Text row = new Text(); final Text cf = new Text(); final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { // create a mutation for the row if (deleter == null) { entry.getKey().getRow(row); deleter = new Mutation(row); } entry.getKey().getColumnFamily(cf); entry.getKey().getColumnQualifier(cq); // the remove function adds the key with the delete flag set to true deleter.putDelete(cf, cq); } bw.addMutation(deleter); } }
try { Iterator<BatchWriter> iterator = writers.values().iterator(); while (iterator.hasNext()) { BatchWriter writer = iterator.next(); writer.close(); iterator.remove(); } } catch (MutationsRejectedException e) { throw new DBException(e); }
brianfrankcooper_YCSB
YCSB/accumulo1.9/src/main/java/site/ycsb/db/accumulo/AccumuloClient.java
AccumuloClient
getWriter
class AccumuloClient extends DB { private ZooKeeperInstance inst; private Connector connector; private Text colFam = new Text(""); private byte[] colFamBytes = new byte[0]; private final ConcurrentHashMap<String, BatchWriter> writers = new ConcurrentHashMap<>(); static { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { CleanUp.shutdownNow(); } }); } @Override public void init() throws DBException { colFam = new Text(getProperties().getProperty("accumulo.columnFamily")); colFamBytes = colFam.toString().getBytes(UTF_8); inst = new ZooKeeperInstance(new ClientConfiguration() .withInstance(getProperties().getProperty("accumulo.instanceName")) .withZkHosts(getProperties().getProperty("accumulo.zooKeepers"))); try { String principal = getProperties().getProperty("accumulo.username"); AuthenticationToken token = new PasswordToken(getProperties().getProperty("accumulo.password")); connector = inst.getConnector(principal, token); } catch (AccumuloException | AccumuloSecurityException e) { throw new DBException(e); } if (!(getProperties().getProperty("accumulo.pcFlag", "none").equals("none"))) { System.err.println("Sorry, the ZK based producer/consumer implementation has been removed. " + "Please see YCSB issue #416 for work on adding a general solution to coordinated work."); } } @Override public void cleanup() throws DBException { try { Iterator<BatchWriter> iterator = writers.values().iterator(); while (iterator.hasNext()) { BatchWriter writer = iterator.next(); writer.close(); iterator.remove(); } } catch (MutationsRejectedException e) { throw new DBException(e); } } /** * Called when the user specifies a table that isn't the same as the existing * table. Connect to it and if necessary, close our current connection. * * @param table * The table to open. */ public BatchWriter getWriter(String table) throws TableNotFoundException {<FILL_FUNCTION_BODY>} /** * Creates a BatchWriter with the expected configuration. * * @param table The table to write to */ private BatchWriter createBatchWriter(String table) throws TableNotFoundException { BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency( Long.parseLong(getProperties() .getProperty("accumulo.batchWriterMaxLatency", "30000")), TimeUnit.MILLISECONDS); bwc.setMaxMemory(Long.parseLong( getProperties().getProperty("accumulo.batchWriterSize", "100000"))); final String numThreadsValue = getProperties().getProperty("accumulo.batchWriterThreads"); // Try to saturate the client machine. int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2); if (null != numThreadsValue) { numThreads = Integer.parseInt(numThreadsValue); } System.err.println("Using " + numThreads + " threads to write data"); bwc.setMaxWriteThreads(numThreads); return connector.createBatchWriter(table, bwc); } /** * Gets a scanner from Accumulo over one row. * * @param row the row to scan * @param fields the set of columns to scan * @return an Accumulo {@link Scanner} bound to the given row and columns */ private Scanner getRow(String table, Text row, Set<String> fields) throws TableNotFoundException { Scanner scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(row)); if (fields != null) { for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } return scanner; } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Scanner scanner = null; try { scanner = getRow(table, new Text(key), null); // Pick out the results we care about. final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { entry.getKey().getColumnQualifier(cq); Value v = entry.getValue(); byte[] buf = v.get(); result.put(cq.toString(), new ByteArrayByteIterator(buf)); } } catch (Exception e) { System.err.println("Error trying to reading Accumulo table " + table + " " + key); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { // Just make the end 'infinity' and only read as much as we need. Scanner scanner = null; try { scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(new Text(startkey), null)); // Have Accumulo send us complete rows, serialized in a single Key-Value pair IteratorSetting cfg = new IteratorSetting(100, WholeRowIterator.class); scanner.addScanIterator(cfg); // If no fields are provided, we assume one column/row. if (fields != null) { // And add each of them as fields we want. for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } int count = 0; for (Entry<Key, Value> entry : scanner) { // Deserialize the row SortedMap<Key, Value> row = WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()); HashMap<String, ByteIterator> rowData; if (null != fields) { rowData = new HashMap<>(fields.size()); } else { rowData = new HashMap<>(); } result.add(rowData); // Parse the data in the row, avoid unnecessary Text object creation final Text cq = new Text(); for (Entry<Key, Value> rowEntry : row.entrySet()) { rowEntry.getKey().getColumnQualifier(cq); rowData.put(cq.toString(), new ByteArrayByteIterator(rowEntry.getValue().get())); } if (count++ == recordcount) { // Done reading the last row. break; } } } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } catch (IOException e) { System.err.println("Error deserializing data from Accumulo."); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { BatchWriter bw = null; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error opening batch writer to Accumulo table " + table); e.printStackTrace(); return Status.ERROR; } Mutation mutInsert = new Mutation(key.getBytes(UTF_8)); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { mutInsert.put(colFamBytes, entry.getKey().getBytes(UTF_8), entry.getValue().toArray()); } try { bw.addMutation(mutInsert); } catch (MutationsRejectedException e) { System.err.println("Error performing update."); e.printStackTrace(); return Status.ERROR; } return Status.BATCHED_OK; } @Override public Status insert(String t, String key, Map<String, ByteIterator> values) { return update(t, key, values); } @Override public Status delete(String table, String key) { BatchWriter bw; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } try { deleteRow(table, new Text(key), bw); } catch (TableNotFoundException | MutationsRejectedException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } catch (RuntimeException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } return Status.OK; } // These functions are adapted from RowOperations.java: private void deleteRow(String table, Text row, BatchWriter bw) throws MutationsRejectedException, TableNotFoundException { // TODO Use a batchDeleter instead deleteRow(getRow(table, row, null), bw); } /** * Deletes a row, given a Scanner of JUST that row. */ private void deleteRow(Scanner scanner, BatchWriter bw) throws MutationsRejectedException { Mutation deleter = null; // iterate through the keys final Text row = new Text(); final Text cf = new Text(); final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { // create a mutation for the row if (deleter == null) { entry.getKey().getRow(row); deleter = new Mutation(row); } entry.getKey().getColumnFamily(cf); entry.getKey().getColumnQualifier(cq); // the remove function adds the key with the delete flag set to true deleter.putDelete(cf, cq); } bw.addMutation(deleter); } }
// tl;dr We're paying a cost for the ConcurrentHashMap here to deal with the DB api. // We know that YCSB is really only ever going to send us data for one table, so using // a concurrent data structure is overkill (especially in such a hot code path). // However, the impact seems to be relatively negligible in trivial local tests and it's // "more correct" WRT to the API. BatchWriter writer = writers.get(table); if (null == writer) { BatchWriter newWriter = createBatchWriter(table); BatchWriter oldWriter = writers.putIfAbsent(table, newWriter); // Someone beat us to creating a BatchWriter for this table, use their BatchWriters if (null != oldWriter) { try { // Make sure to clean up our new batchwriter! newWriter.close(); } catch (MutationsRejectedException e) { throw new RuntimeException(e); } writer = oldWriter; } else { writer = newWriter; } } return writer;
brianfrankcooper_YCSB
YCSB/accumulo1.9/src/main/java/site/ycsb/db/accumulo/AccumuloClient.java
AccumuloClient
createBatchWriter
class AccumuloClient extends DB { private ZooKeeperInstance inst; private Connector connector; private Text colFam = new Text(""); private byte[] colFamBytes = new byte[0]; private final ConcurrentHashMap<String, BatchWriter> writers = new ConcurrentHashMap<>(); static { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { CleanUp.shutdownNow(); } }); } @Override public void init() throws DBException { colFam = new Text(getProperties().getProperty("accumulo.columnFamily")); colFamBytes = colFam.toString().getBytes(UTF_8); inst = new ZooKeeperInstance(new ClientConfiguration() .withInstance(getProperties().getProperty("accumulo.instanceName")) .withZkHosts(getProperties().getProperty("accumulo.zooKeepers"))); try { String principal = getProperties().getProperty("accumulo.username"); AuthenticationToken token = new PasswordToken(getProperties().getProperty("accumulo.password")); connector = inst.getConnector(principal, token); } catch (AccumuloException | AccumuloSecurityException e) { throw new DBException(e); } if (!(getProperties().getProperty("accumulo.pcFlag", "none").equals("none"))) { System.err.println("Sorry, the ZK based producer/consumer implementation has been removed. " + "Please see YCSB issue #416 for work on adding a general solution to coordinated work."); } } @Override public void cleanup() throws DBException { try { Iterator<BatchWriter> iterator = writers.values().iterator(); while (iterator.hasNext()) { BatchWriter writer = iterator.next(); writer.close(); iterator.remove(); } } catch (MutationsRejectedException e) { throw new DBException(e); } } /** * Called when the user specifies a table that isn't the same as the existing * table. Connect to it and if necessary, close our current connection. * * @param table * The table to open. */ public BatchWriter getWriter(String table) throws TableNotFoundException { // tl;dr We're paying a cost for the ConcurrentHashMap here to deal with the DB api. // We know that YCSB is really only ever going to send us data for one table, so using // a concurrent data structure is overkill (especially in such a hot code path). // However, the impact seems to be relatively negligible in trivial local tests and it's // "more correct" WRT to the API. BatchWriter writer = writers.get(table); if (null == writer) { BatchWriter newWriter = createBatchWriter(table); BatchWriter oldWriter = writers.putIfAbsent(table, newWriter); // Someone beat us to creating a BatchWriter for this table, use their BatchWriters if (null != oldWriter) { try { // Make sure to clean up our new batchwriter! newWriter.close(); } catch (MutationsRejectedException e) { throw new RuntimeException(e); } writer = oldWriter; } else { writer = newWriter; } } return writer; } /** * Creates a BatchWriter with the expected configuration. * * @param table The table to write to */ private BatchWriter createBatchWriter(String table) throws TableNotFoundException {<FILL_FUNCTION_BODY>} /** * Gets a scanner from Accumulo over one row. * * @param row the row to scan * @param fields the set of columns to scan * @return an Accumulo {@link Scanner} bound to the given row and columns */ private Scanner getRow(String table, Text row, Set<String> fields) throws TableNotFoundException { Scanner scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(row)); if (fields != null) { for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } return scanner; } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Scanner scanner = null; try { scanner = getRow(table, new Text(key), null); // Pick out the results we care about. final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { entry.getKey().getColumnQualifier(cq); Value v = entry.getValue(); byte[] buf = v.get(); result.put(cq.toString(), new ByteArrayByteIterator(buf)); } } catch (Exception e) { System.err.println("Error trying to reading Accumulo table " + table + " " + key); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { // Just make the end 'infinity' and only read as much as we need. Scanner scanner = null; try { scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(new Text(startkey), null)); // Have Accumulo send us complete rows, serialized in a single Key-Value pair IteratorSetting cfg = new IteratorSetting(100, WholeRowIterator.class); scanner.addScanIterator(cfg); // If no fields are provided, we assume one column/row. if (fields != null) { // And add each of them as fields we want. for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } int count = 0; for (Entry<Key, Value> entry : scanner) { // Deserialize the row SortedMap<Key, Value> row = WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()); HashMap<String, ByteIterator> rowData; if (null != fields) { rowData = new HashMap<>(fields.size()); } else { rowData = new HashMap<>(); } result.add(rowData); // Parse the data in the row, avoid unnecessary Text object creation final Text cq = new Text(); for (Entry<Key, Value> rowEntry : row.entrySet()) { rowEntry.getKey().getColumnQualifier(cq); rowData.put(cq.toString(), new ByteArrayByteIterator(rowEntry.getValue().get())); } if (count++ == recordcount) { // Done reading the last row. break; } } } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } catch (IOException e) { System.err.println("Error deserializing data from Accumulo."); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { BatchWriter bw = null; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error opening batch writer to Accumulo table " + table); e.printStackTrace(); return Status.ERROR; } Mutation mutInsert = new Mutation(key.getBytes(UTF_8)); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { mutInsert.put(colFamBytes, entry.getKey().getBytes(UTF_8), entry.getValue().toArray()); } try { bw.addMutation(mutInsert); } catch (MutationsRejectedException e) { System.err.println("Error performing update."); e.printStackTrace(); return Status.ERROR; } return Status.BATCHED_OK; } @Override public Status insert(String t, String key, Map<String, ByteIterator> values) { return update(t, key, values); } @Override public Status delete(String table, String key) { BatchWriter bw; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } try { deleteRow(table, new Text(key), bw); } catch (TableNotFoundException | MutationsRejectedException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } catch (RuntimeException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } return Status.OK; } // These functions are adapted from RowOperations.java: private void deleteRow(String table, Text row, BatchWriter bw) throws MutationsRejectedException, TableNotFoundException { // TODO Use a batchDeleter instead deleteRow(getRow(table, row, null), bw); } /** * Deletes a row, given a Scanner of JUST that row. */ private void deleteRow(Scanner scanner, BatchWriter bw) throws MutationsRejectedException { Mutation deleter = null; // iterate through the keys final Text row = new Text(); final Text cf = new Text(); final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { // create a mutation for the row if (deleter == null) { entry.getKey().getRow(row); deleter = new Mutation(row); } entry.getKey().getColumnFamily(cf); entry.getKey().getColumnQualifier(cq); // the remove function adds the key with the delete flag set to true deleter.putDelete(cf, cq); } bw.addMutation(deleter); } }
BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency( Long.parseLong(getProperties() .getProperty("accumulo.batchWriterMaxLatency", "30000")), TimeUnit.MILLISECONDS); bwc.setMaxMemory(Long.parseLong( getProperties().getProperty("accumulo.batchWriterSize", "100000"))); final String numThreadsValue = getProperties().getProperty("accumulo.batchWriterThreads"); // Try to saturate the client machine. int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2); if (null != numThreadsValue) { numThreads = Integer.parseInt(numThreadsValue); } System.err.println("Using " + numThreads + " threads to write data"); bwc.setMaxWriteThreads(numThreads); return connector.createBatchWriter(table, bwc);
brianfrankcooper_YCSB
YCSB/accumulo1.9/src/main/java/site/ycsb/db/accumulo/AccumuloClient.java
AccumuloClient
getRow
class AccumuloClient extends DB { private ZooKeeperInstance inst; private Connector connector; private Text colFam = new Text(""); private byte[] colFamBytes = new byte[0]; private final ConcurrentHashMap<String, BatchWriter> writers = new ConcurrentHashMap<>(); static { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { CleanUp.shutdownNow(); } }); } @Override public void init() throws DBException { colFam = new Text(getProperties().getProperty("accumulo.columnFamily")); colFamBytes = colFam.toString().getBytes(UTF_8); inst = new ZooKeeperInstance(new ClientConfiguration() .withInstance(getProperties().getProperty("accumulo.instanceName")) .withZkHosts(getProperties().getProperty("accumulo.zooKeepers"))); try { String principal = getProperties().getProperty("accumulo.username"); AuthenticationToken token = new PasswordToken(getProperties().getProperty("accumulo.password")); connector = inst.getConnector(principal, token); } catch (AccumuloException | AccumuloSecurityException e) { throw new DBException(e); } if (!(getProperties().getProperty("accumulo.pcFlag", "none").equals("none"))) { System.err.println("Sorry, the ZK based producer/consumer implementation has been removed. " + "Please see YCSB issue #416 for work on adding a general solution to coordinated work."); } } @Override public void cleanup() throws DBException { try { Iterator<BatchWriter> iterator = writers.values().iterator(); while (iterator.hasNext()) { BatchWriter writer = iterator.next(); writer.close(); iterator.remove(); } } catch (MutationsRejectedException e) { throw new DBException(e); } } /** * Called when the user specifies a table that isn't the same as the existing * table. Connect to it and if necessary, close our current connection. * * @param table * The table to open. */ public BatchWriter getWriter(String table) throws TableNotFoundException { // tl;dr We're paying a cost for the ConcurrentHashMap here to deal with the DB api. // We know that YCSB is really only ever going to send us data for one table, so using // a concurrent data structure is overkill (especially in such a hot code path). // However, the impact seems to be relatively negligible in trivial local tests and it's // "more correct" WRT to the API. BatchWriter writer = writers.get(table); if (null == writer) { BatchWriter newWriter = createBatchWriter(table); BatchWriter oldWriter = writers.putIfAbsent(table, newWriter); // Someone beat us to creating a BatchWriter for this table, use their BatchWriters if (null != oldWriter) { try { // Make sure to clean up our new batchwriter! newWriter.close(); } catch (MutationsRejectedException e) { throw new RuntimeException(e); } writer = oldWriter; } else { writer = newWriter; } } return writer; } /** * Creates a BatchWriter with the expected configuration. * * @param table The table to write to */ private BatchWriter createBatchWriter(String table) throws TableNotFoundException { BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency( Long.parseLong(getProperties() .getProperty("accumulo.batchWriterMaxLatency", "30000")), TimeUnit.MILLISECONDS); bwc.setMaxMemory(Long.parseLong( getProperties().getProperty("accumulo.batchWriterSize", "100000"))); final String numThreadsValue = getProperties().getProperty("accumulo.batchWriterThreads"); // Try to saturate the client machine. int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2); if (null != numThreadsValue) { numThreads = Integer.parseInt(numThreadsValue); } System.err.println("Using " + numThreads + " threads to write data"); bwc.setMaxWriteThreads(numThreads); return connector.createBatchWriter(table, bwc); } /** * Gets a scanner from Accumulo over one row. * * @param row the row to scan * @param fields the set of columns to scan * @return an Accumulo {@link Scanner} bound to the given row and columns */ private Scanner getRow(String table, Text row, Set<String> fields) throws TableNotFoundException {<FILL_FUNCTION_BODY>} @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Scanner scanner = null; try { scanner = getRow(table, new Text(key), null); // Pick out the results we care about. final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { entry.getKey().getColumnQualifier(cq); Value v = entry.getValue(); byte[] buf = v.get(); result.put(cq.toString(), new ByteArrayByteIterator(buf)); } } catch (Exception e) { System.err.println("Error trying to reading Accumulo table " + table + " " + key); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { // Just make the end 'infinity' and only read as much as we need. Scanner scanner = null; try { scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(new Text(startkey), null)); // Have Accumulo send us complete rows, serialized in a single Key-Value pair IteratorSetting cfg = new IteratorSetting(100, WholeRowIterator.class); scanner.addScanIterator(cfg); // If no fields are provided, we assume one column/row. if (fields != null) { // And add each of them as fields we want. for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } int count = 0; for (Entry<Key, Value> entry : scanner) { // Deserialize the row SortedMap<Key, Value> row = WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()); HashMap<String, ByteIterator> rowData; if (null != fields) { rowData = new HashMap<>(fields.size()); } else { rowData = new HashMap<>(); } result.add(rowData); // Parse the data in the row, avoid unnecessary Text object creation final Text cq = new Text(); for (Entry<Key, Value> rowEntry : row.entrySet()) { rowEntry.getKey().getColumnQualifier(cq); rowData.put(cq.toString(), new ByteArrayByteIterator(rowEntry.getValue().get())); } if (count++ == recordcount) { // Done reading the last row. break; } } } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } catch (IOException e) { System.err.println("Error deserializing data from Accumulo."); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { BatchWriter bw = null; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error opening batch writer to Accumulo table " + table); e.printStackTrace(); return Status.ERROR; } Mutation mutInsert = new Mutation(key.getBytes(UTF_8)); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { mutInsert.put(colFamBytes, entry.getKey().getBytes(UTF_8), entry.getValue().toArray()); } try { bw.addMutation(mutInsert); } catch (MutationsRejectedException e) { System.err.println("Error performing update."); e.printStackTrace(); return Status.ERROR; } return Status.BATCHED_OK; } @Override public Status insert(String t, String key, Map<String, ByteIterator> values) { return update(t, key, values); } @Override public Status delete(String table, String key) { BatchWriter bw; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } try { deleteRow(table, new Text(key), bw); } catch (TableNotFoundException | MutationsRejectedException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } catch (RuntimeException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } return Status.OK; } // These functions are adapted from RowOperations.java: private void deleteRow(String table, Text row, BatchWriter bw) throws MutationsRejectedException, TableNotFoundException { // TODO Use a batchDeleter instead deleteRow(getRow(table, row, null), bw); } /** * Deletes a row, given a Scanner of JUST that row. */ private void deleteRow(Scanner scanner, BatchWriter bw) throws MutationsRejectedException { Mutation deleter = null; // iterate through the keys final Text row = new Text(); final Text cf = new Text(); final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { // create a mutation for the row if (deleter == null) { entry.getKey().getRow(row); deleter = new Mutation(row); } entry.getKey().getColumnFamily(cf); entry.getKey().getColumnQualifier(cq); // the remove function adds the key with the delete flag set to true deleter.putDelete(cf, cq); } bw.addMutation(deleter); } }
Scanner scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(row)); if (fields != null) { for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } return scanner;
brianfrankcooper_YCSB
YCSB/accumulo1.9/src/main/java/site/ycsb/db/accumulo/AccumuloClient.java
AccumuloClient
read
class AccumuloClient extends DB { private ZooKeeperInstance inst; private Connector connector; private Text colFam = new Text(""); private byte[] colFamBytes = new byte[0]; private final ConcurrentHashMap<String, BatchWriter> writers = new ConcurrentHashMap<>(); static { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { CleanUp.shutdownNow(); } }); } @Override public void init() throws DBException { colFam = new Text(getProperties().getProperty("accumulo.columnFamily")); colFamBytes = colFam.toString().getBytes(UTF_8); inst = new ZooKeeperInstance(new ClientConfiguration() .withInstance(getProperties().getProperty("accumulo.instanceName")) .withZkHosts(getProperties().getProperty("accumulo.zooKeepers"))); try { String principal = getProperties().getProperty("accumulo.username"); AuthenticationToken token = new PasswordToken(getProperties().getProperty("accumulo.password")); connector = inst.getConnector(principal, token); } catch (AccumuloException | AccumuloSecurityException e) { throw new DBException(e); } if (!(getProperties().getProperty("accumulo.pcFlag", "none").equals("none"))) { System.err.println("Sorry, the ZK based producer/consumer implementation has been removed. " + "Please see YCSB issue #416 for work on adding a general solution to coordinated work."); } } @Override public void cleanup() throws DBException { try { Iterator<BatchWriter> iterator = writers.values().iterator(); while (iterator.hasNext()) { BatchWriter writer = iterator.next(); writer.close(); iterator.remove(); } } catch (MutationsRejectedException e) { throw new DBException(e); } } /** * Called when the user specifies a table that isn't the same as the existing * table. Connect to it and if necessary, close our current connection. * * @param table * The table to open. */ public BatchWriter getWriter(String table) throws TableNotFoundException { // tl;dr We're paying a cost for the ConcurrentHashMap here to deal with the DB api. // We know that YCSB is really only ever going to send us data for one table, so using // a concurrent data structure is overkill (especially in such a hot code path). // However, the impact seems to be relatively negligible in trivial local tests and it's // "more correct" WRT to the API. BatchWriter writer = writers.get(table); if (null == writer) { BatchWriter newWriter = createBatchWriter(table); BatchWriter oldWriter = writers.putIfAbsent(table, newWriter); // Someone beat us to creating a BatchWriter for this table, use their BatchWriters if (null != oldWriter) { try { // Make sure to clean up our new batchwriter! newWriter.close(); } catch (MutationsRejectedException e) { throw new RuntimeException(e); } writer = oldWriter; } else { writer = newWriter; } } return writer; } /** * Creates a BatchWriter with the expected configuration. * * @param table The table to write to */ private BatchWriter createBatchWriter(String table) throws TableNotFoundException { BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency( Long.parseLong(getProperties() .getProperty("accumulo.batchWriterMaxLatency", "30000")), TimeUnit.MILLISECONDS); bwc.setMaxMemory(Long.parseLong( getProperties().getProperty("accumulo.batchWriterSize", "100000"))); final String numThreadsValue = getProperties().getProperty("accumulo.batchWriterThreads"); // Try to saturate the client machine. int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2); if (null != numThreadsValue) { numThreads = Integer.parseInt(numThreadsValue); } System.err.println("Using " + numThreads + " threads to write data"); bwc.setMaxWriteThreads(numThreads); return connector.createBatchWriter(table, bwc); } /** * Gets a scanner from Accumulo over one row. * * @param row the row to scan * @param fields the set of columns to scan * @return an Accumulo {@link Scanner} bound to the given row and columns */ private Scanner getRow(String table, Text row, Set<String> fields) throws TableNotFoundException { Scanner scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(row)); if (fields != null) { for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } return scanner; } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {<FILL_FUNCTION_BODY>} @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { // Just make the end 'infinity' and only read as much as we need. Scanner scanner = null; try { scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(new Text(startkey), null)); // Have Accumulo send us complete rows, serialized in a single Key-Value pair IteratorSetting cfg = new IteratorSetting(100, WholeRowIterator.class); scanner.addScanIterator(cfg); // If no fields are provided, we assume one column/row. if (fields != null) { // And add each of them as fields we want. for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } int count = 0; for (Entry<Key, Value> entry : scanner) { // Deserialize the row SortedMap<Key, Value> row = WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()); HashMap<String, ByteIterator> rowData; if (null != fields) { rowData = new HashMap<>(fields.size()); } else { rowData = new HashMap<>(); } result.add(rowData); // Parse the data in the row, avoid unnecessary Text object creation final Text cq = new Text(); for (Entry<Key, Value> rowEntry : row.entrySet()) { rowEntry.getKey().getColumnQualifier(cq); rowData.put(cq.toString(), new ByteArrayByteIterator(rowEntry.getValue().get())); } if (count++ == recordcount) { // Done reading the last row. break; } } } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } catch (IOException e) { System.err.println("Error deserializing data from Accumulo."); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { BatchWriter bw = null; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error opening batch writer to Accumulo table " + table); e.printStackTrace(); return Status.ERROR; } Mutation mutInsert = new Mutation(key.getBytes(UTF_8)); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { mutInsert.put(colFamBytes, entry.getKey().getBytes(UTF_8), entry.getValue().toArray()); } try { bw.addMutation(mutInsert); } catch (MutationsRejectedException e) { System.err.println("Error performing update."); e.printStackTrace(); return Status.ERROR; } return Status.BATCHED_OK; } @Override public Status insert(String t, String key, Map<String, ByteIterator> values) { return update(t, key, values); } @Override public Status delete(String table, String key) { BatchWriter bw; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } try { deleteRow(table, new Text(key), bw); } catch (TableNotFoundException | MutationsRejectedException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } catch (RuntimeException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } return Status.OK; } // These functions are adapted from RowOperations.java: private void deleteRow(String table, Text row, BatchWriter bw) throws MutationsRejectedException, TableNotFoundException { // TODO Use a batchDeleter instead deleteRow(getRow(table, row, null), bw); } /** * Deletes a row, given a Scanner of JUST that row. */ private void deleteRow(Scanner scanner, BatchWriter bw) throws MutationsRejectedException { Mutation deleter = null; // iterate through the keys final Text row = new Text(); final Text cf = new Text(); final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { // create a mutation for the row if (deleter == null) { entry.getKey().getRow(row); deleter = new Mutation(row); } entry.getKey().getColumnFamily(cf); entry.getKey().getColumnQualifier(cq); // the remove function adds the key with the delete flag set to true deleter.putDelete(cf, cq); } bw.addMutation(deleter); } }
Scanner scanner = null; try { scanner = getRow(table, new Text(key), null); // Pick out the results we care about. final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { entry.getKey().getColumnQualifier(cq); Value v = entry.getValue(); byte[] buf = v.get(); result.put(cq.toString(), new ByteArrayByteIterator(buf)); } } catch (Exception e) { System.err.println("Error trying to reading Accumulo table " + table + " " + key); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK;
brianfrankcooper_YCSB
YCSB/accumulo1.9/src/main/java/site/ycsb/db/accumulo/AccumuloClient.java
AccumuloClient
scan
class AccumuloClient extends DB { private ZooKeeperInstance inst; private Connector connector; private Text colFam = new Text(""); private byte[] colFamBytes = new byte[0]; private final ConcurrentHashMap<String, BatchWriter> writers = new ConcurrentHashMap<>(); static { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { CleanUp.shutdownNow(); } }); } @Override public void init() throws DBException { colFam = new Text(getProperties().getProperty("accumulo.columnFamily")); colFamBytes = colFam.toString().getBytes(UTF_8); inst = new ZooKeeperInstance(new ClientConfiguration() .withInstance(getProperties().getProperty("accumulo.instanceName")) .withZkHosts(getProperties().getProperty("accumulo.zooKeepers"))); try { String principal = getProperties().getProperty("accumulo.username"); AuthenticationToken token = new PasswordToken(getProperties().getProperty("accumulo.password")); connector = inst.getConnector(principal, token); } catch (AccumuloException | AccumuloSecurityException e) { throw new DBException(e); } if (!(getProperties().getProperty("accumulo.pcFlag", "none").equals("none"))) { System.err.println("Sorry, the ZK based producer/consumer implementation has been removed. " + "Please see YCSB issue #416 for work on adding a general solution to coordinated work."); } } @Override public void cleanup() throws DBException { try { Iterator<BatchWriter> iterator = writers.values().iterator(); while (iterator.hasNext()) { BatchWriter writer = iterator.next(); writer.close(); iterator.remove(); } } catch (MutationsRejectedException e) { throw new DBException(e); } } /** * Called when the user specifies a table that isn't the same as the existing * table. Connect to it and if necessary, close our current connection. * * @param table * The table to open. */ public BatchWriter getWriter(String table) throws TableNotFoundException { // tl;dr We're paying a cost for the ConcurrentHashMap here to deal with the DB api. // We know that YCSB is really only ever going to send us data for one table, so using // a concurrent data structure is overkill (especially in such a hot code path). // However, the impact seems to be relatively negligible in trivial local tests and it's // "more correct" WRT to the API. BatchWriter writer = writers.get(table); if (null == writer) { BatchWriter newWriter = createBatchWriter(table); BatchWriter oldWriter = writers.putIfAbsent(table, newWriter); // Someone beat us to creating a BatchWriter for this table, use their BatchWriters if (null != oldWriter) { try { // Make sure to clean up our new batchwriter! newWriter.close(); } catch (MutationsRejectedException e) { throw new RuntimeException(e); } writer = oldWriter; } else { writer = newWriter; } } return writer; } /** * Creates a BatchWriter with the expected configuration. * * @param table The table to write to */ private BatchWriter createBatchWriter(String table) throws TableNotFoundException { BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency( Long.parseLong(getProperties() .getProperty("accumulo.batchWriterMaxLatency", "30000")), TimeUnit.MILLISECONDS); bwc.setMaxMemory(Long.parseLong( getProperties().getProperty("accumulo.batchWriterSize", "100000"))); final String numThreadsValue = getProperties().getProperty("accumulo.batchWriterThreads"); // Try to saturate the client machine. int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2); if (null != numThreadsValue) { numThreads = Integer.parseInt(numThreadsValue); } System.err.println("Using " + numThreads + " threads to write data"); bwc.setMaxWriteThreads(numThreads); return connector.createBatchWriter(table, bwc); } /** * Gets a scanner from Accumulo over one row. * * @param row the row to scan * @param fields the set of columns to scan * @return an Accumulo {@link Scanner} bound to the given row and columns */ private Scanner getRow(String table, Text row, Set<String> fields) throws TableNotFoundException { Scanner scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(row)); if (fields != null) { for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } return scanner; } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Scanner scanner = null; try { scanner = getRow(table, new Text(key), null); // Pick out the results we care about. final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { entry.getKey().getColumnQualifier(cq); Value v = entry.getValue(); byte[] buf = v.get(); result.put(cq.toString(), new ByteArrayByteIterator(buf)); } } catch (Exception e) { System.err.println("Error trying to reading Accumulo table " + table + " " + key); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {<FILL_FUNCTION_BODY>} @Override public Status update(String table, String key, Map<String, ByteIterator> values) { BatchWriter bw = null; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error opening batch writer to Accumulo table " + table); e.printStackTrace(); return Status.ERROR; } Mutation mutInsert = new Mutation(key.getBytes(UTF_8)); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { mutInsert.put(colFamBytes, entry.getKey().getBytes(UTF_8), entry.getValue().toArray()); } try { bw.addMutation(mutInsert); } catch (MutationsRejectedException e) { System.err.println("Error performing update."); e.printStackTrace(); return Status.ERROR; } return Status.BATCHED_OK; } @Override public Status insert(String t, String key, Map<String, ByteIterator> values) { return update(t, key, values); } @Override public Status delete(String table, String key) { BatchWriter bw; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } try { deleteRow(table, new Text(key), bw); } catch (TableNotFoundException | MutationsRejectedException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } catch (RuntimeException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } return Status.OK; } // These functions are adapted from RowOperations.java: private void deleteRow(String table, Text row, BatchWriter bw) throws MutationsRejectedException, TableNotFoundException { // TODO Use a batchDeleter instead deleteRow(getRow(table, row, null), bw); } /** * Deletes a row, given a Scanner of JUST that row. */ private void deleteRow(Scanner scanner, BatchWriter bw) throws MutationsRejectedException { Mutation deleter = null; // iterate through the keys final Text row = new Text(); final Text cf = new Text(); final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { // create a mutation for the row if (deleter == null) { entry.getKey().getRow(row); deleter = new Mutation(row); } entry.getKey().getColumnFamily(cf); entry.getKey().getColumnQualifier(cq); // the remove function adds the key with the delete flag set to true deleter.putDelete(cf, cq); } bw.addMutation(deleter); } }
// Just make the end 'infinity' and only read as much as we need. Scanner scanner = null; try { scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(new Text(startkey), null)); // Have Accumulo send us complete rows, serialized in a single Key-Value pair IteratorSetting cfg = new IteratorSetting(100, WholeRowIterator.class); scanner.addScanIterator(cfg); // If no fields are provided, we assume one column/row. if (fields != null) { // And add each of them as fields we want. for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } int count = 0; for (Entry<Key, Value> entry : scanner) { // Deserialize the row SortedMap<Key, Value> row = WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()); HashMap<String, ByteIterator> rowData; if (null != fields) { rowData = new HashMap<>(fields.size()); } else { rowData = new HashMap<>(); } result.add(rowData); // Parse the data in the row, avoid unnecessary Text object creation final Text cq = new Text(); for (Entry<Key, Value> rowEntry : row.entrySet()) { rowEntry.getKey().getColumnQualifier(cq); rowData.put(cq.toString(), new ByteArrayByteIterator(rowEntry.getValue().get())); } if (count++ == recordcount) { // Done reading the last row. break; } } } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } catch (IOException e) { System.err.println("Error deserializing data from Accumulo."); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK;
brianfrankcooper_YCSB
YCSB/accumulo1.9/src/main/java/site/ycsb/db/accumulo/AccumuloClient.java
AccumuloClient
update
class AccumuloClient extends DB { private ZooKeeperInstance inst; private Connector connector; private Text colFam = new Text(""); private byte[] colFamBytes = new byte[0]; private final ConcurrentHashMap<String, BatchWriter> writers = new ConcurrentHashMap<>(); static { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { CleanUp.shutdownNow(); } }); } @Override public void init() throws DBException { colFam = new Text(getProperties().getProperty("accumulo.columnFamily")); colFamBytes = colFam.toString().getBytes(UTF_8); inst = new ZooKeeperInstance(new ClientConfiguration() .withInstance(getProperties().getProperty("accumulo.instanceName")) .withZkHosts(getProperties().getProperty("accumulo.zooKeepers"))); try { String principal = getProperties().getProperty("accumulo.username"); AuthenticationToken token = new PasswordToken(getProperties().getProperty("accumulo.password")); connector = inst.getConnector(principal, token); } catch (AccumuloException | AccumuloSecurityException e) { throw new DBException(e); } if (!(getProperties().getProperty("accumulo.pcFlag", "none").equals("none"))) { System.err.println("Sorry, the ZK based producer/consumer implementation has been removed. " + "Please see YCSB issue #416 for work on adding a general solution to coordinated work."); } } @Override public void cleanup() throws DBException { try { Iterator<BatchWriter> iterator = writers.values().iterator(); while (iterator.hasNext()) { BatchWriter writer = iterator.next(); writer.close(); iterator.remove(); } } catch (MutationsRejectedException e) { throw new DBException(e); } } /** * Called when the user specifies a table that isn't the same as the existing * table. Connect to it and if necessary, close our current connection. * * @param table * The table to open. */ public BatchWriter getWriter(String table) throws TableNotFoundException { // tl;dr We're paying a cost for the ConcurrentHashMap here to deal with the DB api. // We know that YCSB is really only ever going to send us data for one table, so using // a concurrent data structure is overkill (especially in such a hot code path). // However, the impact seems to be relatively negligible in trivial local tests and it's // "more correct" WRT to the API. BatchWriter writer = writers.get(table); if (null == writer) { BatchWriter newWriter = createBatchWriter(table); BatchWriter oldWriter = writers.putIfAbsent(table, newWriter); // Someone beat us to creating a BatchWriter for this table, use their BatchWriters if (null != oldWriter) { try { // Make sure to clean up our new batchwriter! newWriter.close(); } catch (MutationsRejectedException e) { throw new RuntimeException(e); } writer = oldWriter; } else { writer = newWriter; } } return writer; } /** * Creates a BatchWriter with the expected configuration. * * @param table The table to write to */ private BatchWriter createBatchWriter(String table) throws TableNotFoundException { BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency( Long.parseLong(getProperties() .getProperty("accumulo.batchWriterMaxLatency", "30000")), TimeUnit.MILLISECONDS); bwc.setMaxMemory(Long.parseLong( getProperties().getProperty("accumulo.batchWriterSize", "100000"))); final String numThreadsValue = getProperties().getProperty("accumulo.batchWriterThreads"); // Try to saturate the client machine. int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2); if (null != numThreadsValue) { numThreads = Integer.parseInt(numThreadsValue); } System.err.println("Using " + numThreads + " threads to write data"); bwc.setMaxWriteThreads(numThreads); return connector.createBatchWriter(table, bwc); } /** * Gets a scanner from Accumulo over one row. * * @param row the row to scan * @param fields the set of columns to scan * @return an Accumulo {@link Scanner} bound to the given row and columns */ private Scanner getRow(String table, Text row, Set<String> fields) throws TableNotFoundException { Scanner scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(row)); if (fields != null) { for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } return scanner; } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Scanner scanner = null; try { scanner = getRow(table, new Text(key), null); // Pick out the results we care about. final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { entry.getKey().getColumnQualifier(cq); Value v = entry.getValue(); byte[] buf = v.get(); result.put(cq.toString(), new ByteArrayByteIterator(buf)); } } catch (Exception e) { System.err.println("Error trying to reading Accumulo table " + table + " " + key); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { // Just make the end 'infinity' and only read as much as we need. Scanner scanner = null; try { scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(new Text(startkey), null)); // Have Accumulo send us complete rows, serialized in a single Key-Value pair IteratorSetting cfg = new IteratorSetting(100, WholeRowIterator.class); scanner.addScanIterator(cfg); // If no fields are provided, we assume one column/row. if (fields != null) { // And add each of them as fields we want. for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } int count = 0; for (Entry<Key, Value> entry : scanner) { // Deserialize the row SortedMap<Key, Value> row = WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()); HashMap<String, ByteIterator> rowData; if (null != fields) { rowData = new HashMap<>(fields.size()); } else { rowData = new HashMap<>(); } result.add(rowData); // Parse the data in the row, avoid unnecessary Text object creation final Text cq = new Text(); for (Entry<Key, Value> rowEntry : row.entrySet()) { rowEntry.getKey().getColumnQualifier(cq); rowData.put(cq.toString(), new ByteArrayByteIterator(rowEntry.getValue().get())); } if (count++ == recordcount) { // Done reading the last row. break; } } } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } catch (IOException e) { System.err.println("Error deserializing data from Accumulo."); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>} @Override public Status insert(String t, String key, Map<String, ByteIterator> values) { return update(t, key, values); } @Override public Status delete(String table, String key) { BatchWriter bw; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } try { deleteRow(table, new Text(key), bw); } catch (TableNotFoundException | MutationsRejectedException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } catch (RuntimeException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } return Status.OK; } // These functions are adapted from RowOperations.java: private void deleteRow(String table, Text row, BatchWriter bw) throws MutationsRejectedException, TableNotFoundException { // TODO Use a batchDeleter instead deleteRow(getRow(table, row, null), bw); } /** * Deletes a row, given a Scanner of JUST that row. */ private void deleteRow(Scanner scanner, BatchWriter bw) throws MutationsRejectedException { Mutation deleter = null; // iterate through the keys final Text row = new Text(); final Text cf = new Text(); final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { // create a mutation for the row if (deleter == null) { entry.getKey().getRow(row); deleter = new Mutation(row); } entry.getKey().getColumnFamily(cf); entry.getKey().getColumnQualifier(cq); // the remove function adds the key with the delete flag set to true deleter.putDelete(cf, cq); } bw.addMutation(deleter); } }
BatchWriter bw = null; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error opening batch writer to Accumulo table " + table); e.printStackTrace(); return Status.ERROR; } Mutation mutInsert = new Mutation(key.getBytes(UTF_8)); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { mutInsert.put(colFamBytes, entry.getKey().getBytes(UTF_8), entry.getValue().toArray()); } try { bw.addMutation(mutInsert); } catch (MutationsRejectedException e) { System.err.println("Error performing update."); e.printStackTrace(); return Status.ERROR; } return Status.BATCHED_OK;
brianfrankcooper_YCSB
YCSB/accumulo1.9/src/main/java/site/ycsb/db/accumulo/AccumuloClient.java
AccumuloClient
insert
class AccumuloClient extends DB { private ZooKeeperInstance inst; private Connector connector; private Text colFam = new Text(""); private byte[] colFamBytes = new byte[0]; private final ConcurrentHashMap<String, BatchWriter> writers = new ConcurrentHashMap<>(); static { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { CleanUp.shutdownNow(); } }); } @Override public void init() throws DBException { colFam = new Text(getProperties().getProperty("accumulo.columnFamily")); colFamBytes = colFam.toString().getBytes(UTF_8); inst = new ZooKeeperInstance(new ClientConfiguration() .withInstance(getProperties().getProperty("accumulo.instanceName")) .withZkHosts(getProperties().getProperty("accumulo.zooKeepers"))); try { String principal = getProperties().getProperty("accumulo.username"); AuthenticationToken token = new PasswordToken(getProperties().getProperty("accumulo.password")); connector = inst.getConnector(principal, token); } catch (AccumuloException | AccumuloSecurityException e) { throw new DBException(e); } if (!(getProperties().getProperty("accumulo.pcFlag", "none").equals("none"))) { System.err.println("Sorry, the ZK based producer/consumer implementation has been removed. " + "Please see YCSB issue #416 for work on adding a general solution to coordinated work."); } } @Override public void cleanup() throws DBException { try { Iterator<BatchWriter> iterator = writers.values().iterator(); while (iterator.hasNext()) { BatchWriter writer = iterator.next(); writer.close(); iterator.remove(); } } catch (MutationsRejectedException e) { throw new DBException(e); } } /** * Called when the user specifies a table that isn't the same as the existing * table. Connect to it and if necessary, close our current connection. * * @param table * The table to open. */ public BatchWriter getWriter(String table) throws TableNotFoundException { // tl;dr We're paying a cost for the ConcurrentHashMap here to deal with the DB api. // We know that YCSB is really only ever going to send us data for one table, so using // a concurrent data structure is overkill (especially in such a hot code path). // However, the impact seems to be relatively negligible in trivial local tests and it's // "more correct" WRT to the API. BatchWriter writer = writers.get(table); if (null == writer) { BatchWriter newWriter = createBatchWriter(table); BatchWriter oldWriter = writers.putIfAbsent(table, newWriter); // Someone beat us to creating a BatchWriter for this table, use their BatchWriters if (null != oldWriter) { try { // Make sure to clean up our new batchwriter! newWriter.close(); } catch (MutationsRejectedException e) { throw new RuntimeException(e); } writer = oldWriter; } else { writer = newWriter; } } return writer; } /** * Creates a BatchWriter with the expected configuration. * * @param table The table to write to */ private BatchWriter createBatchWriter(String table) throws TableNotFoundException { BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency( Long.parseLong(getProperties() .getProperty("accumulo.batchWriterMaxLatency", "30000")), TimeUnit.MILLISECONDS); bwc.setMaxMemory(Long.parseLong( getProperties().getProperty("accumulo.batchWriterSize", "100000"))); final String numThreadsValue = getProperties().getProperty("accumulo.batchWriterThreads"); // Try to saturate the client machine. int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2); if (null != numThreadsValue) { numThreads = Integer.parseInt(numThreadsValue); } System.err.println("Using " + numThreads + " threads to write data"); bwc.setMaxWriteThreads(numThreads); return connector.createBatchWriter(table, bwc); } /** * Gets a scanner from Accumulo over one row. * * @param row the row to scan * @param fields the set of columns to scan * @return an Accumulo {@link Scanner} bound to the given row and columns */ private Scanner getRow(String table, Text row, Set<String> fields) throws TableNotFoundException { Scanner scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(row)); if (fields != null) { for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } return scanner; } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Scanner scanner = null; try { scanner = getRow(table, new Text(key), null); // Pick out the results we care about. final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { entry.getKey().getColumnQualifier(cq); Value v = entry.getValue(); byte[] buf = v.get(); result.put(cq.toString(), new ByteArrayByteIterator(buf)); } } catch (Exception e) { System.err.println("Error trying to reading Accumulo table " + table + " " + key); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { // Just make the end 'infinity' and only read as much as we need. Scanner scanner = null; try { scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(new Text(startkey), null)); // Have Accumulo send us complete rows, serialized in a single Key-Value pair IteratorSetting cfg = new IteratorSetting(100, WholeRowIterator.class); scanner.addScanIterator(cfg); // If no fields are provided, we assume one column/row. if (fields != null) { // And add each of them as fields we want. for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } int count = 0; for (Entry<Key, Value> entry : scanner) { // Deserialize the row SortedMap<Key, Value> row = WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()); HashMap<String, ByteIterator> rowData; if (null != fields) { rowData = new HashMap<>(fields.size()); } else { rowData = new HashMap<>(); } result.add(rowData); // Parse the data in the row, avoid unnecessary Text object creation final Text cq = new Text(); for (Entry<Key, Value> rowEntry : row.entrySet()) { rowEntry.getKey().getColumnQualifier(cq); rowData.put(cq.toString(), new ByteArrayByteIterator(rowEntry.getValue().get())); } if (count++ == recordcount) { // Done reading the last row. break; } } } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } catch (IOException e) { System.err.println("Error deserializing data from Accumulo."); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { BatchWriter bw = null; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error opening batch writer to Accumulo table " + table); e.printStackTrace(); return Status.ERROR; } Mutation mutInsert = new Mutation(key.getBytes(UTF_8)); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { mutInsert.put(colFamBytes, entry.getKey().getBytes(UTF_8), entry.getValue().toArray()); } try { bw.addMutation(mutInsert); } catch (MutationsRejectedException e) { System.err.println("Error performing update."); e.printStackTrace(); return Status.ERROR; } return Status.BATCHED_OK; } @Override public Status insert(String t, String key, Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>} @Override public Status delete(String table, String key) { BatchWriter bw; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } try { deleteRow(table, new Text(key), bw); } catch (TableNotFoundException | MutationsRejectedException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } catch (RuntimeException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } return Status.OK; } // These functions are adapted from RowOperations.java: private void deleteRow(String table, Text row, BatchWriter bw) throws MutationsRejectedException, TableNotFoundException { // TODO Use a batchDeleter instead deleteRow(getRow(table, row, null), bw); } /** * Deletes a row, given a Scanner of JUST that row. */ private void deleteRow(Scanner scanner, BatchWriter bw) throws MutationsRejectedException { Mutation deleter = null; // iterate through the keys final Text row = new Text(); final Text cf = new Text(); final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { // create a mutation for the row if (deleter == null) { entry.getKey().getRow(row); deleter = new Mutation(row); } entry.getKey().getColumnFamily(cf); entry.getKey().getColumnQualifier(cq); // the remove function adds the key with the delete flag set to true deleter.putDelete(cf, cq); } bw.addMutation(deleter); } }
return update(t, key, values);
brianfrankcooper_YCSB
YCSB/accumulo1.9/src/main/java/site/ycsb/db/accumulo/AccumuloClient.java
AccumuloClient
delete
class AccumuloClient extends DB { private ZooKeeperInstance inst; private Connector connector; private Text colFam = new Text(""); private byte[] colFamBytes = new byte[0]; private final ConcurrentHashMap<String, BatchWriter> writers = new ConcurrentHashMap<>(); static { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { CleanUp.shutdownNow(); } }); } @Override public void init() throws DBException { colFam = new Text(getProperties().getProperty("accumulo.columnFamily")); colFamBytes = colFam.toString().getBytes(UTF_8); inst = new ZooKeeperInstance(new ClientConfiguration() .withInstance(getProperties().getProperty("accumulo.instanceName")) .withZkHosts(getProperties().getProperty("accumulo.zooKeepers"))); try { String principal = getProperties().getProperty("accumulo.username"); AuthenticationToken token = new PasswordToken(getProperties().getProperty("accumulo.password")); connector = inst.getConnector(principal, token); } catch (AccumuloException | AccumuloSecurityException e) { throw new DBException(e); } if (!(getProperties().getProperty("accumulo.pcFlag", "none").equals("none"))) { System.err.println("Sorry, the ZK based producer/consumer implementation has been removed. " + "Please see YCSB issue #416 for work on adding a general solution to coordinated work."); } } @Override public void cleanup() throws DBException { try { Iterator<BatchWriter> iterator = writers.values().iterator(); while (iterator.hasNext()) { BatchWriter writer = iterator.next(); writer.close(); iterator.remove(); } } catch (MutationsRejectedException e) { throw new DBException(e); } } /** * Called when the user specifies a table that isn't the same as the existing * table. Connect to it and if necessary, close our current connection. * * @param table * The table to open. */ public BatchWriter getWriter(String table) throws TableNotFoundException { // tl;dr We're paying a cost for the ConcurrentHashMap here to deal with the DB api. // We know that YCSB is really only ever going to send us data for one table, so using // a concurrent data structure is overkill (especially in such a hot code path). // However, the impact seems to be relatively negligible in trivial local tests and it's // "more correct" WRT to the API. BatchWriter writer = writers.get(table); if (null == writer) { BatchWriter newWriter = createBatchWriter(table); BatchWriter oldWriter = writers.putIfAbsent(table, newWriter); // Someone beat us to creating a BatchWriter for this table, use their BatchWriters if (null != oldWriter) { try { // Make sure to clean up our new batchwriter! newWriter.close(); } catch (MutationsRejectedException e) { throw new RuntimeException(e); } writer = oldWriter; } else { writer = newWriter; } } return writer; } /** * Creates a BatchWriter with the expected configuration. * * @param table The table to write to */ private BatchWriter createBatchWriter(String table) throws TableNotFoundException { BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency( Long.parseLong(getProperties() .getProperty("accumulo.batchWriterMaxLatency", "30000")), TimeUnit.MILLISECONDS); bwc.setMaxMemory(Long.parseLong( getProperties().getProperty("accumulo.batchWriterSize", "100000"))); final String numThreadsValue = getProperties().getProperty("accumulo.batchWriterThreads"); // Try to saturate the client machine. int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2); if (null != numThreadsValue) { numThreads = Integer.parseInt(numThreadsValue); } System.err.println("Using " + numThreads + " threads to write data"); bwc.setMaxWriteThreads(numThreads); return connector.createBatchWriter(table, bwc); } /** * Gets a scanner from Accumulo over one row. * * @param row the row to scan * @param fields the set of columns to scan * @return an Accumulo {@link Scanner} bound to the given row and columns */ private Scanner getRow(String table, Text row, Set<String> fields) throws TableNotFoundException { Scanner scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(row)); if (fields != null) { for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } return scanner; } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Scanner scanner = null; try { scanner = getRow(table, new Text(key), null); // Pick out the results we care about. final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { entry.getKey().getColumnQualifier(cq); Value v = entry.getValue(); byte[] buf = v.get(); result.put(cq.toString(), new ByteArrayByteIterator(buf)); } } catch (Exception e) { System.err.println("Error trying to reading Accumulo table " + table + " " + key); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { // Just make the end 'infinity' and only read as much as we need. Scanner scanner = null; try { scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(new Text(startkey), null)); // Have Accumulo send us complete rows, serialized in a single Key-Value pair IteratorSetting cfg = new IteratorSetting(100, WholeRowIterator.class); scanner.addScanIterator(cfg); // If no fields are provided, we assume one column/row. if (fields != null) { // And add each of them as fields we want. for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } int count = 0; for (Entry<Key, Value> entry : scanner) { // Deserialize the row SortedMap<Key, Value> row = WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()); HashMap<String, ByteIterator> rowData; if (null != fields) { rowData = new HashMap<>(fields.size()); } else { rowData = new HashMap<>(); } result.add(rowData); // Parse the data in the row, avoid unnecessary Text object creation final Text cq = new Text(); for (Entry<Key, Value> rowEntry : row.entrySet()) { rowEntry.getKey().getColumnQualifier(cq); rowData.put(cq.toString(), new ByteArrayByteIterator(rowEntry.getValue().get())); } if (count++ == recordcount) { // Done reading the last row. break; } } } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } catch (IOException e) { System.err.println("Error deserializing data from Accumulo."); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { BatchWriter bw = null; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error opening batch writer to Accumulo table " + table); e.printStackTrace(); return Status.ERROR; } Mutation mutInsert = new Mutation(key.getBytes(UTF_8)); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { mutInsert.put(colFamBytes, entry.getKey().getBytes(UTF_8), entry.getValue().toArray()); } try { bw.addMutation(mutInsert); } catch (MutationsRejectedException e) { System.err.println("Error performing update."); e.printStackTrace(); return Status.ERROR; } return Status.BATCHED_OK; } @Override public Status insert(String t, String key, Map<String, ByteIterator> values) { return update(t, key, values); } @Override public Status delete(String table, String key) {<FILL_FUNCTION_BODY>} // These functions are adapted from RowOperations.java: private void deleteRow(String table, Text row, BatchWriter bw) throws MutationsRejectedException, TableNotFoundException { // TODO Use a batchDeleter instead deleteRow(getRow(table, row, null), bw); } /** * Deletes a row, given a Scanner of JUST that row. */ private void deleteRow(Scanner scanner, BatchWriter bw) throws MutationsRejectedException { Mutation deleter = null; // iterate through the keys final Text row = new Text(); final Text cf = new Text(); final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { // create a mutation for the row if (deleter == null) { entry.getKey().getRow(row); deleter = new Mutation(row); } entry.getKey().getColumnFamily(cf); entry.getKey().getColumnQualifier(cq); // the remove function adds the key with the delete flag set to true deleter.putDelete(cf, cq); } bw.addMutation(deleter); } }
BatchWriter bw; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } try { deleteRow(table, new Text(key), bw); } catch (TableNotFoundException | MutationsRejectedException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } catch (RuntimeException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } return Status.OK;
brianfrankcooper_YCSB
YCSB/accumulo1.9/src/main/java/site/ycsb/db/accumulo/AccumuloClient.java
AccumuloClient
deleteRow
class AccumuloClient extends DB { private ZooKeeperInstance inst; private Connector connector; private Text colFam = new Text(""); private byte[] colFamBytes = new byte[0]; private final ConcurrentHashMap<String, BatchWriter> writers = new ConcurrentHashMap<>(); static { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { CleanUp.shutdownNow(); } }); } @Override public void init() throws DBException { colFam = new Text(getProperties().getProperty("accumulo.columnFamily")); colFamBytes = colFam.toString().getBytes(UTF_8); inst = new ZooKeeperInstance(new ClientConfiguration() .withInstance(getProperties().getProperty("accumulo.instanceName")) .withZkHosts(getProperties().getProperty("accumulo.zooKeepers"))); try { String principal = getProperties().getProperty("accumulo.username"); AuthenticationToken token = new PasswordToken(getProperties().getProperty("accumulo.password")); connector = inst.getConnector(principal, token); } catch (AccumuloException | AccumuloSecurityException e) { throw new DBException(e); } if (!(getProperties().getProperty("accumulo.pcFlag", "none").equals("none"))) { System.err.println("Sorry, the ZK based producer/consumer implementation has been removed. " + "Please see YCSB issue #416 for work on adding a general solution to coordinated work."); } } @Override public void cleanup() throws DBException { try { Iterator<BatchWriter> iterator = writers.values().iterator(); while (iterator.hasNext()) { BatchWriter writer = iterator.next(); writer.close(); iterator.remove(); } } catch (MutationsRejectedException e) { throw new DBException(e); } } /** * Called when the user specifies a table that isn't the same as the existing * table. Connect to it and if necessary, close our current connection. * * @param table * The table to open. */ public BatchWriter getWriter(String table) throws TableNotFoundException { // tl;dr We're paying a cost for the ConcurrentHashMap here to deal with the DB api. // We know that YCSB is really only ever going to send us data for one table, so using // a concurrent data structure is overkill (especially in such a hot code path). // However, the impact seems to be relatively negligible in trivial local tests and it's // "more correct" WRT to the API. BatchWriter writer = writers.get(table); if (null == writer) { BatchWriter newWriter = createBatchWriter(table); BatchWriter oldWriter = writers.putIfAbsent(table, newWriter); // Someone beat us to creating a BatchWriter for this table, use their BatchWriters if (null != oldWriter) { try { // Make sure to clean up our new batchwriter! newWriter.close(); } catch (MutationsRejectedException e) { throw new RuntimeException(e); } writer = oldWriter; } else { writer = newWriter; } } return writer; } /** * Creates a BatchWriter with the expected configuration. * * @param table The table to write to */ private BatchWriter createBatchWriter(String table) throws TableNotFoundException { BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency( Long.parseLong(getProperties() .getProperty("accumulo.batchWriterMaxLatency", "30000")), TimeUnit.MILLISECONDS); bwc.setMaxMemory(Long.parseLong( getProperties().getProperty("accumulo.batchWriterSize", "100000"))); final String numThreadsValue = getProperties().getProperty("accumulo.batchWriterThreads"); // Try to saturate the client machine. int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2); if (null != numThreadsValue) { numThreads = Integer.parseInt(numThreadsValue); } System.err.println("Using " + numThreads + " threads to write data"); bwc.setMaxWriteThreads(numThreads); return connector.createBatchWriter(table, bwc); } /** * Gets a scanner from Accumulo over one row. * * @param row the row to scan * @param fields the set of columns to scan * @return an Accumulo {@link Scanner} bound to the given row and columns */ private Scanner getRow(String table, Text row, Set<String> fields) throws TableNotFoundException { Scanner scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(row)); if (fields != null) { for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } return scanner; } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Scanner scanner = null; try { scanner = getRow(table, new Text(key), null); // Pick out the results we care about. final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { entry.getKey().getColumnQualifier(cq); Value v = entry.getValue(); byte[] buf = v.get(); result.put(cq.toString(), new ByteArrayByteIterator(buf)); } } catch (Exception e) { System.err.println("Error trying to reading Accumulo table " + table + " " + key); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { // Just make the end 'infinity' and only read as much as we need. Scanner scanner = null; try { scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(new Text(startkey), null)); // Have Accumulo send us complete rows, serialized in a single Key-Value pair IteratorSetting cfg = new IteratorSetting(100, WholeRowIterator.class); scanner.addScanIterator(cfg); // If no fields are provided, we assume one column/row. if (fields != null) { // And add each of them as fields we want. for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } int count = 0; for (Entry<Key, Value> entry : scanner) { // Deserialize the row SortedMap<Key, Value> row = WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()); HashMap<String, ByteIterator> rowData; if (null != fields) { rowData = new HashMap<>(fields.size()); } else { rowData = new HashMap<>(); } result.add(rowData); // Parse the data in the row, avoid unnecessary Text object creation final Text cq = new Text(); for (Entry<Key, Value> rowEntry : row.entrySet()) { rowEntry.getKey().getColumnQualifier(cq); rowData.put(cq.toString(), new ByteArrayByteIterator(rowEntry.getValue().get())); } if (count++ == recordcount) { // Done reading the last row. break; } } } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } catch (IOException e) { System.err.println("Error deserializing data from Accumulo."); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { BatchWriter bw = null; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error opening batch writer to Accumulo table " + table); e.printStackTrace(); return Status.ERROR; } Mutation mutInsert = new Mutation(key.getBytes(UTF_8)); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { mutInsert.put(colFamBytes, entry.getKey().getBytes(UTF_8), entry.getValue().toArray()); } try { bw.addMutation(mutInsert); } catch (MutationsRejectedException e) { System.err.println("Error performing update."); e.printStackTrace(); return Status.ERROR; } return Status.BATCHED_OK; } @Override public Status insert(String t, String key, Map<String, ByteIterator> values) { return update(t, key, values); } @Override public Status delete(String table, String key) { BatchWriter bw; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } try { deleteRow(table, new Text(key), bw); } catch (TableNotFoundException | MutationsRejectedException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } catch (RuntimeException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } return Status.OK; } // These functions are adapted from RowOperations.java: private void deleteRow(String table, Text row, BatchWriter bw) throws MutationsRejectedException, TableNotFoundException {<FILL_FUNCTION_BODY>} /** * Deletes a row, given a Scanner of JUST that row. */ private void deleteRow(Scanner scanner, BatchWriter bw) throws MutationsRejectedException { Mutation deleter = null; // iterate through the keys final Text row = new Text(); final Text cf = new Text(); final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { // create a mutation for the row if (deleter == null) { entry.getKey().getRow(row); deleter = new Mutation(row); } entry.getKey().getColumnFamily(cf); entry.getKey().getColumnQualifier(cq); // the remove function adds the key with the delete flag set to true deleter.putDelete(cf, cq); } bw.addMutation(deleter); } }
// TODO Use a batchDeleter instead deleteRow(getRow(table, row, null), bw);
brianfrankcooper_YCSB
YCSB/accumulo1.9/src/main/java/site/ycsb/db/accumulo/AccumuloClient.java
AccumuloClient
deleteRow
class AccumuloClient extends DB { private ZooKeeperInstance inst; private Connector connector; private Text colFam = new Text(""); private byte[] colFamBytes = new byte[0]; private final ConcurrentHashMap<String, BatchWriter> writers = new ConcurrentHashMap<>(); static { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { CleanUp.shutdownNow(); } }); } @Override public void init() throws DBException { colFam = new Text(getProperties().getProperty("accumulo.columnFamily")); colFamBytes = colFam.toString().getBytes(UTF_8); inst = new ZooKeeperInstance(new ClientConfiguration() .withInstance(getProperties().getProperty("accumulo.instanceName")) .withZkHosts(getProperties().getProperty("accumulo.zooKeepers"))); try { String principal = getProperties().getProperty("accumulo.username"); AuthenticationToken token = new PasswordToken(getProperties().getProperty("accumulo.password")); connector = inst.getConnector(principal, token); } catch (AccumuloException | AccumuloSecurityException e) { throw new DBException(e); } if (!(getProperties().getProperty("accumulo.pcFlag", "none").equals("none"))) { System.err.println("Sorry, the ZK based producer/consumer implementation has been removed. " + "Please see YCSB issue #416 for work on adding a general solution to coordinated work."); } } @Override public void cleanup() throws DBException { try { Iterator<BatchWriter> iterator = writers.values().iterator(); while (iterator.hasNext()) { BatchWriter writer = iterator.next(); writer.close(); iterator.remove(); } } catch (MutationsRejectedException e) { throw new DBException(e); } } /** * Called when the user specifies a table that isn't the same as the existing * table. Connect to it and if necessary, close our current connection. * * @param table * The table to open. */ public BatchWriter getWriter(String table) throws TableNotFoundException { // tl;dr We're paying a cost for the ConcurrentHashMap here to deal with the DB api. // We know that YCSB is really only ever going to send us data for one table, so using // a concurrent data structure is overkill (especially in such a hot code path). // However, the impact seems to be relatively negligible in trivial local tests and it's // "more correct" WRT to the API. BatchWriter writer = writers.get(table); if (null == writer) { BatchWriter newWriter = createBatchWriter(table); BatchWriter oldWriter = writers.putIfAbsent(table, newWriter); // Someone beat us to creating a BatchWriter for this table, use their BatchWriters if (null != oldWriter) { try { // Make sure to clean up our new batchwriter! newWriter.close(); } catch (MutationsRejectedException e) { throw new RuntimeException(e); } writer = oldWriter; } else { writer = newWriter; } } return writer; } /** * Creates a BatchWriter with the expected configuration. * * @param table The table to write to */ private BatchWriter createBatchWriter(String table) throws TableNotFoundException { BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency( Long.parseLong(getProperties() .getProperty("accumulo.batchWriterMaxLatency", "30000")), TimeUnit.MILLISECONDS); bwc.setMaxMemory(Long.parseLong( getProperties().getProperty("accumulo.batchWriterSize", "100000"))); final String numThreadsValue = getProperties().getProperty("accumulo.batchWriterThreads"); // Try to saturate the client machine. int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2); if (null != numThreadsValue) { numThreads = Integer.parseInt(numThreadsValue); } System.err.println("Using " + numThreads + " threads to write data"); bwc.setMaxWriteThreads(numThreads); return connector.createBatchWriter(table, bwc); } /** * Gets a scanner from Accumulo over one row. * * @param row the row to scan * @param fields the set of columns to scan * @return an Accumulo {@link Scanner} bound to the given row and columns */ private Scanner getRow(String table, Text row, Set<String> fields) throws TableNotFoundException { Scanner scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(row)); if (fields != null) { for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } return scanner; } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Scanner scanner = null; try { scanner = getRow(table, new Text(key), null); // Pick out the results we care about. final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { entry.getKey().getColumnQualifier(cq); Value v = entry.getValue(); byte[] buf = v.get(); result.put(cq.toString(), new ByteArrayByteIterator(buf)); } } catch (Exception e) { System.err.println("Error trying to reading Accumulo table " + table + " " + key); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { // Just make the end 'infinity' and only read as much as we need. Scanner scanner = null; try { scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(new Text(startkey), null)); // Have Accumulo send us complete rows, serialized in a single Key-Value pair IteratorSetting cfg = new IteratorSetting(100, WholeRowIterator.class); scanner.addScanIterator(cfg); // If no fields are provided, we assume one column/row. if (fields != null) { // And add each of them as fields we want. for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } int count = 0; for (Entry<Key, Value> entry : scanner) { // Deserialize the row SortedMap<Key, Value> row = WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()); HashMap<String, ByteIterator> rowData; if (null != fields) { rowData = new HashMap<>(fields.size()); } else { rowData = new HashMap<>(); } result.add(rowData); // Parse the data in the row, avoid unnecessary Text object creation final Text cq = new Text(); for (Entry<Key, Value> rowEntry : row.entrySet()) { rowEntry.getKey().getColumnQualifier(cq); rowData.put(cq.toString(), new ByteArrayByteIterator(rowEntry.getValue().get())); } if (count++ == recordcount) { // Done reading the last row. break; } } } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } catch (IOException e) { System.err.println("Error deserializing data from Accumulo."); e.printStackTrace(); return Status.ERROR; } finally { if (null != scanner) { scanner.close(); } } return Status.OK; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { BatchWriter bw = null; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error opening batch writer to Accumulo table " + table); e.printStackTrace(); return Status.ERROR; } Mutation mutInsert = new Mutation(key.getBytes(UTF_8)); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { mutInsert.put(colFamBytes, entry.getKey().getBytes(UTF_8), entry.getValue().toArray()); } try { bw.addMutation(mutInsert); } catch (MutationsRejectedException e) { System.err.println("Error performing update."); e.printStackTrace(); return Status.ERROR; } return Status.BATCHED_OK; } @Override public Status insert(String t, String key, Map<String, ByteIterator> values) { return update(t, key, values); } @Override public Status delete(String table, String key) { BatchWriter bw; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error trying to connect to Accumulo table."); e.printStackTrace(); return Status.ERROR; } try { deleteRow(table, new Text(key), bw); } catch (TableNotFoundException | MutationsRejectedException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } catch (RuntimeException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; } return Status.OK; } // These functions are adapted from RowOperations.java: private void deleteRow(String table, Text row, BatchWriter bw) throws MutationsRejectedException, TableNotFoundException { // TODO Use a batchDeleter instead deleteRow(getRow(table, row, null), bw); } /** * Deletes a row, given a Scanner of JUST that row. */ private void deleteRow(Scanner scanner, BatchWriter bw) throws MutationsRejectedException {<FILL_FUNCTION_BODY>} }
Mutation deleter = null; // iterate through the keys final Text row = new Text(); final Text cf = new Text(); final Text cq = new Text(); for (Entry<Key, Value> entry : scanner) { // create a mutation for the row if (deleter == null) { entry.getKey().getRow(row); deleter = new Mutation(row); } entry.getKey().getColumnFamily(cf); entry.getKey().getColumnQualifier(cq); // the remove function adds the key with the delete flag set to true deleter.putDelete(cf, cq); } bw.addMutation(deleter);
brianfrankcooper_YCSB
YCSB/aerospike/src/main/java/site/ycsb/db/AerospikeClient.java
AerospikeClient
init
class AerospikeClient extends site.ycsb.DB { private static final String DEFAULT_HOST = "localhost"; private static final String DEFAULT_PORT = "3000"; private static final String DEFAULT_TIMEOUT = "10000"; private static final String DEFAULT_NAMESPACE = "ycsb"; private String namespace = null; private com.aerospike.client.AerospikeClient client = null; private Policy readPolicy = new Policy(); private WritePolicy insertPolicy = new WritePolicy(); private WritePolicy updatePolicy = new WritePolicy(); private WritePolicy deletePolicy = new WritePolicy(); @Override public void init() throws DBException {<FILL_FUNCTION_BODY>} @Override public void cleanup() throws DBException { client.close(); } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { Record record; if (fields != null) { record = client.get(readPolicy, new Key(namespace, table, key), fields.toArray(new String[fields.size()])); } else { record = client.get(readPolicy, new Key(namespace, table, key)); } if (record == null) { System.err.println("Record key " + key + " not found (read)"); return Status.ERROR; } for (Map.Entry<String, Object> entry: record.bins.entrySet()) { result.put(entry.getKey(), new ByteArrayByteIterator((byte[])entry.getValue())); } return Status.OK; } catch (AerospikeException e) { System.err.println("Error while reading key " + key + ": " + e); return Status.ERROR; } } @Override public Status scan(String table, String start, int count, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { System.err.println("Scan not implemented"); return Status.ERROR; } private Status write(String table, String key, WritePolicy writePolicy, Map<String, ByteIterator> values) { Bin[] bins = new Bin[values.size()]; int index = 0; for (Map.Entry<String, ByteIterator> entry: values.entrySet()) { bins[index] = new Bin(entry.getKey(), entry.getValue().toArray()); ++index; } Key keyObj = new Key(namespace, table, key); try { client.put(writePolicy, keyObj, bins); return Status.OK; } catch (AerospikeException e) { System.err.println("Error while writing key " + key + ": " + e); return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { return write(table, key, updatePolicy, values); } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { return write(table, key, insertPolicy, values); } @Override public Status delete(String table, String key) { try { if (!client.delete(deletePolicy, new Key(namespace, table, key))) { System.err.println("Record key " + key + " not found (delete)"); return Status.ERROR; } return Status.OK; } catch (AerospikeException e) { System.err.println("Error while deleting key " + key + ": " + e); return Status.ERROR; } } }
insertPolicy.recordExistsAction = RecordExistsAction.CREATE_ONLY; updatePolicy.recordExistsAction = RecordExistsAction.REPLACE_ONLY; Properties props = getProperties(); namespace = props.getProperty("as.namespace", DEFAULT_NAMESPACE); String host = props.getProperty("as.host", DEFAULT_HOST); String user = props.getProperty("as.user"); String password = props.getProperty("as.password"); int port = Integer.parseInt(props.getProperty("as.port", DEFAULT_PORT)); int timeout = Integer.parseInt(props.getProperty("as.timeout", DEFAULT_TIMEOUT)); readPolicy.timeout = timeout; insertPolicy.timeout = timeout; updatePolicy.timeout = timeout; deletePolicy.timeout = timeout; ClientPolicy clientPolicy = new ClientPolicy(); if (user != null && password != null) { clientPolicy.user = user; clientPolicy.password = password; } try { client = new com.aerospike.client.AerospikeClient(clientPolicy, host, port); } catch (AerospikeException e) { throw new DBException(String.format("Error while creating Aerospike " + "client for %s:%d.", host, port), e); }
brianfrankcooper_YCSB
YCSB/aerospike/src/main/java/site/ycsb/db/AerospikeClient.java
AerospikeClient
cleanup
class AerospikeClient extends site.ycsb.DB { private static final String DEFAULT_HOST = "localhost"; private static final String DEFAULT_PORT = "3000"; private static final String DEFAULT_TIMEOUT = "10000"; private static final String DEFAULT_NAMESPACE = "ycsb"; private String namespace = null; private com.aerospike.client.AerospikeClient client = null; private Policy readPolicy = new Policy(); private WritePolicy insertPolicy = new WritePolicy(); private WritePolicy updatePolicy = new WritePolicy(); private WritePolicy deletePolicy = new WritePolicy(); @Override public void init() throws DBException { insertPolicy.recordExistsAction = RecordExistsAction.CREATE_ONLY; updatePolicy.recordExistsAction = RecordExistsAction.REPLACE_ONLY; Properties props = getProperties(); namespace = props.getProperty("as.namespace", DEFAULT_NAMESPACE); String host = props.getProperty("as.host", DEFAULT_HOST); String user = props.getProperty("as.user"); String password = props.getProperty("as.password"); int port = Integer.parseInt(props.getProperty("as.port", DEFAULT_PORT)); int timeout = Integer.parseInt(props.getProperty("as.timeout", DEFAULT_TIMEOUT)); readPolicy.timeout = timeout; insertPolicy.timeout = timeout; updatePolicy.timeout = timeout; deletePolicy.timeout = timeout; ClientPolicy clientPolicy = new ClientPolicy(); if (user != null && password != null) { clientPolicy.user = user; clientPolicy.password = password; } try { client = new com.aerospike.client.AerospikeClient(clientPolicy, host, port); } catch (AerospikeException e) { throw new DBException(String.format("Error while creating Aerospike " + "client for %s:%d.", host, port), e); } } @Override public void cleanup() throws DBException {<FILL_FUNCTION_BODY>} @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { Record record; if (fields != null) { record = client.get(readPolicy, new Key(namespace, table, key), fields.toArray(new String[fields.size()])); } else { record = client.get(readPolicy, new Key(namespace, table, key)); } if (record == null) { System.err.println("Record key " + key + " not found (read)"); return Status.ERROR; } for (Map.Entry<String, Object> entry: record.bins.entrySet()) { result.put(entry.getKey(), new ByteArrayByteIterator((byte[])entry.getValue())); } return Status.OK; } catch (AerospikeException e) { System.err.println("Error while reading key " + key + ": " + e); return Status.ERROR; } } @Override public Status scan(String table, String start, int count, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { System.err.println("Scan not implemented"); return Status.ERROR; } private Status write(String table, String key, WritePolicy writePolicy, Map<String, ByteIterator> values) { Bin[] bins = new Bin[values.size()]; int index = 0; for (Map.Entry<String, ByteIterator> entry: values.entrySet()) { bins[index] = new Bin(entry.getKey(), entry.getValue().toArray()); ++index; } Key keyObj = new Key(namespace, table, key); try { client.put(writePolicy, keyObj, bins); return Status.OK; } catch (AerospikeException e) { System.err.println("Error while writing key " + key + ": " + e); return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { return write(table, key, updatePolicy, values); } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { return write(table, key, insertPolicy, values); } @Override public Status delete(String table, String key) { try { if (!client.delete(deletePolicy, new Key(namespace, table, key))) { System.err.println("Record key " + key + " not found (delete)"); return Status.ERROR; } return Status.OK; } catch (AerospikeException e) { System.err.println("Error while deleting key " + key + ": " + e); return Status.ERROR; } } }
client.close();
brianfrankcooper_YCSB
YCSB/aerospike/src/main/java/site/ycsb/db/AerospikeClient.java
AerospikeClient
read
class AerospikeClient extends site.ycsb.DB { private static final String DEFAULT_HOST = "localhost"; private static final String DEFAULT_PORT = "3000"; private static final String DEFAULT_TIMEOUT = "10000"; private static final String DEFAULT_NAMESPACE = "ycsb"; private String namespace = null; private com.aerospike.client.AerospikeClient client = null; private Policy readPolicy = new Policy(); private WritePolicy insertPolicy = new WritePolicy(); private WritePolicy updatePolicy = new WritePolicy(); private WritePolicy deletePolicy = new WritePolicy(); @Override public void init() throws DBException { insertPolicy.recordExistsAction = RecordExistsAction.CREATE_ONLY; updatePolicy.recordExistsAction = RecordExistsAction.REPLACE_ONLY; Properties props = getProperties(); namespace = props.getProperty("as.namespace", DEFAULT_NAMESPACE); String host = props.getProperty("as.host", DEFAULT_HOST); String user = props.getProperty("as.user"); String password = props.getProperty("as.password"); int port = Integer.parseInt(props.getProperty("as.port", DEFAULT_PORT)); int timeout = Integer.parseInt(props.getProperty("as.timeout", DEFAULT_TIMEOUT)); readPolicy.timeout = timeout; insertPolicy.timeout = timeout; updatePolicy.timeout = timeout; deletePolicy.timeout = timeout; ClientPolicy clientPolicy = new ClientPolicy(); if (user != null && password != null) { clientPolicy.user = user; clientPolicy.password = password; } try { client = new com.aerospike.client.AerospikeClient(clientPolicy, host, port); } catch (AerospikeException e) { throw new DBException(String.format("Error while creating Aerospike " + "client for %s:%d.", host, port), e); } } @Override public void cleanup() throws DBException { client.close(); } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {<FILL_FUNCTION_BODY>} @Override public Status scan(String table, String start, int count, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { System.err.println("Scan not implemented"); return Status.ERROR; } private Status write(String table, String key, WritePolicy writePolicy, Map<String, ByteIterator> values) { Bin[] bins = new Bin[values.size()]; int index = 0; for (Map.Entry<String, ByteIterator> entry: values.entrySet()) { bins[index] = new Bin(entry.getKey(), entry.getValue().toArray()); ++index; } Key keyObj = new Key(namespace, table, key); try { client.put(writePolicy, keyObj, bins); return Status.OK; } catch (AerospikeException e) { System.err.println("Error while writing key " + key + ": " + e); return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { return write(table, key, updatePolicy, values); } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { return write(table, key, insertPolicy, values); } @Override public Status delete(String table, String key) { try { if (!client.delete(deletePolicy, new Key(namespace, table, key))) { System.err.println("Record key " + key + " not found (delete)"); return Status.ERROR; } return Status.OK; } catch (AerospikeException e) { System.err.println("Error while deleting key " + key + ": " + e); return Status.ERROR; } } }
try { Record record; if (fields != null) { record = client.get(readPolicy, new Key(namespace, table, key), fields.toArray(new String[fields.size()])); } else { record = client.get(readPolicy, new Key(namespace, table, key)); } if (record == null) { System.err.println("Record key " + key + " not found (read)"); return Status.ERROR; } for (Map.Entry<String, Object> entry: record.bins.entrySet()) { result.put(entry.getKey(), new ByteArrayByteIterator((byte[])entry.getValue())); } return Status.OK; } catch (AerospikeException e) { System.err.println("Error while reading key " + key + ": " + e); return Status.ERROR; }
brianfrankcooper_YCSB
YCSB/aerospike/src/main/java/site/ycsb/db/AerospikeClient.java
AerospikeClient
scan
class AerospikeClient extends site.ycsb.DB { private static final String DEFAULT_HOST = "localhost"; private static final String DEFAULT_PORT = "3000"; private static final String DEFAULT_TIMEOUT = "10000"; private static final String DEFAULT_NAMESPACE = "ycsb"; private String namespace = null; private com.aerospike.client.AerospikeClient client = null; private Policy readPolicy = new Policy(); private WritePolicy insertPolicy = new WritePolicy(); private WritePolicy updatePolicy = new WritePolicy(); private WritePolicy deletePolicy = new WritePolicy(); @Override public void init() throws DBException { insertPolicy.recordExistsAction = RecordExistsAction.CREATE_ONLY; updatePolicy.recordExistsAction = RecordExistsAction.REPLACE_ONLY; Properties props = getProperties(); namespace = props.getProperty("as.namespace", DEFAULT_NAMESPACE); String host = props.getProperty("as.host", DEFAULT_HOST); String user = props.getProperty("as.user"); String password = props.getProperty("as.password"); int port = Integer.parseInt(props.getProperty("as.port", DEFAULT_PORT)); int timeout = Integer.parseInt(props.getProperty("as.timeout", DEFAULT_TIMEOUT)); readPolicy.timeout = timeout; insertPolicy.timeout = timeout; updatePolicy.timeout = timeout; deletePolicy.timeout = timeout; ClientPolicy clientPolicy = new ClientPolicy(); if (user != null && password != null) { clientPolicy.user = user; clientPolicy.password = password; } try { client = new com.aerospike.client.AerospikeClient(clientPolicy, host, port); } catch (AerospikeException e) { throw new DBException(String.format("Error while creating Aerospike " + "client for %s:%d.", host, port), e); } } @Override public void cleanup() throws DBException { client.close(); } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { Record record; if (fields != null) { record = client.get(readPolicy, new Key(namespace, table, key), fields.toArray(new String[fields.size()])); } else { record = client.get(readPolicy, new Key(namespace, table, key)); } if (record == null) { System.err.println("Record key " + key + " not found (read)"); return Status.ERROR; } for (Map.Entry<String, Object> entry: record.bins.entrySet()) { result.put(entry.getKey(), new ByteArrayByteIterator((byte[])entry.getValue())); } return Status.OK; } catch (AerospikeException e) { System.err.println("Error while reading key " + key + ": " + e); return Status.ERROR; } } @Override public Status scan(String table, String start, int count, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {<FILL_FUNCTION_BODY>} private Status write(String table, String key, WritePolicy writePolicy, Map<String, ByteIterator> values) { Bin[] bins = new Bin[values.size()]; int index = 0; for (Map.Entry<String, ByteIterator> entry: values.entrySet()) { bins[index] = new Bin(entry.getKey(), entry.getValue().toArray()); ++index; } Key keyObj = new Key(namespace, table, key); try { client.put(writePolicy, keyObj, bins); return Status.OK; } catch (AerospikeException e) { System.err.println("Error while writing key " + key + ": " + e); return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { return write(table, key, updatePolicy, values); } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { return write(table, key, insertPolicy, values); } @Override public Status delete(String table, String key) { try { if (!client.delete(deletePolicy, new Key(namespace, table, key))) { System.err.println("Record key " + key + " not found (delete)"); return Status.ERROR; } return Status.OK; } catch (AerospikeException e) { System.err.println("Error while deleting key " + key + ": " + e); return Status.ERROR; } } }
System.err.println("Scan not implemented"); return Status.ERROR;
brianfrankcooper_YCSB
YCSB/aerospike/src/main/java/site/ycsb/db/AerospikeClient.java
AerospikeClient
write
class AerospikeClient extends site.ycsb.DB { private static final String DEFAULT_HOST = "localhost"; private static final String DEFAULT_PORT = "3000"; private static final String DEFAULT_TIMEOUT = "10000"; private static final String DEFAULT_NAMESPACE = "ycsb"; private String namespace = null; private com.aerospike.client.AerospikeClient client = null; private Policy readPolicy = new Policy(); private WritePolicy insertPolicy = new WritePolicy(); private WritePolicy updatePolicy = new WritePolicy(); private WritePolicy deletePolicy = new WritePolicy(); @Override public void init() throws DBException { insertPolicy.recordExistsAction = RecordExistsAction.CREATE_ONLY; updatePolicy.recordExistsAction = RecordExistsAction.REPLACE_ONLY; Properties props = getProperties(); namespace = props.getProperty("as.namespace", DEFAULT_NAMESPACE); String host = props.getProperty("as.host", DEFAULT_HOST); String user = props.getProperty("as.user"); String password = props.getProperty("as.password"); int port = Integer.parseInt(props.getProperty("as.port", DEFAULT_PORT)); int timeout = Integer.parseInt(props.getProperty("as.timeout", DEFAULT_TIMEOUT)); readPolicy.timeout = timeout; insertPolicy.timeout = timeout; updatePolicy.timeout = timeout; deletePolicy.timeout = timeout; ClientPolicy clientPolicy = new ClientPolicy(); if (user != null && password != null) { clientPolicy.user = user; clientPolicy.password = password; } try { client = new com.aerospike.client.AerospikeClient(clientPolicy, host, port); } catch (AerospikeException e) { throw new DBException(String.format("Error while creating Aerospike " + "client for %s:%d.", host, port), e); } } @Override public void cleanup() throws DBException { client.close(); } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { Record record; if (fields != null) { record = client.get(readPolicy, new Key(namespace, table, key), fields.toArray(new String[fields.size()])); } else { record = client.get(readPolicy, new Key(namespace, table, key)); } if (record == null) { System.err.println("Record key " + key + " not found (read)"); return Status.ERROR; } for (Map.Entry<String, Object> entry: record.bins.entrySet()) { result.put(entry.getKey(), new ByteArrayByteIterator((byte[])entry.getValue())); } return Status.OK; } catch (AerospikeException e) { System.err.println("Error while reading key " + key + ": " + e); return Status.ERROR; } } @Override public Status scan(String table, String start, int count, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { System.err.println("Scan not implemented"); return Status.ERROR; } private Status write(String table, String key, WritePolicy writePolicy, Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>} @Override public Status update(String table, String key, Map<String, ByteIterator> values) { return write(table, key, updatePolicy, values); } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { return write(table, key, insertPolicy, values); } @Override public Status delete(String table, String key) { try { if (!client.delete(deletePolicy, new Key(namespace, table, key))) { System.err.println("Record key " + key + " not found (delete)"); return Status.ERROR; } return Status.OK; } catch (AerospikeException e) { System.err.println("Error while deleting key " + key + ": " + e); return Status.ERROR; } } }
Bin[] bins = new Bin[values.size()]; int index = 0; for (Map.Entry<String, ByteIterator> entry: values.entrySet()) { bins[index] = new Bin(entry.getKey(), entry.getValue().toArray()); ++index; } Key keyObj = new Key(namespace, table, key); try { client.put(writePolicy, keyObj, bins); return Status.OK; } catch (AerospikeException e) { System.err.println("Error while writing key " + key + ": " + e); return Status.ERROR; }
brianfrankcooper_YCSB
YCSB/aerospike/src/main/java/site/ycsb/db/AerospikeClient.java
AerospikeClient
update
class AerospikeClient extends site.ycsb.DB { private static final String DEFAULT_HOST = "localhost"; private static final String DEFAULT_PORT = "3000"; private static final String DEFAULT_TIMEOUT = "10000"; private static final String DEFAULT_NAMESPACE = "ycsb"; private String namespace = null; private com.aerospike.client.AerospikeClient client = null; private Policy readPolicy = new Policy(); private WritePolicy insertPolicy = new WritePolicy(); private WritePolicy updatePolicy = new WritePolicy(); private WritePolicy deletePolicy = new WritePolicy(); @Override public void init() throws DBException { insertPolicy.recordExistsAction = RecordExistsAction.CREATE_ONLY; updatePolicy.recordExistsAction = RecordExistsAction.REPLACE_ONLY; Properties props = getProperties(); namespace = props.getProperty("as.namespace", DEFAULT_NAMESPACE); String host = props.getProperty("as.host", DEFAULT_HOST); String user = props.getProperty("as.user"); String password = props.getProperty("as.password"); int port = Integer.parseInt(props.getProperty("as.port", DEFAULT_PORT)); int timeout = Integer.parseInt(props.getProperty("as.timeout", DEFAULT_TIMEOUT)); readPolicy.timeout = timeout; insertPolicy.timeout = timeout; updatePolicy.timeout = timeout; deletePolicy.timeout = timeout; ClientPolicy clientPolicy = new ClientPolicy(); if (user != null && password != null) { clientPolicy.user = user; clientPolicy.password = password; } try { client = new com.aerospike.client.AerospikeClient(clientPolicy, host, port); } catch (AerospikeException e) { throw new DBException(String.format("Error while creating Aerospike " + "client for %s:%d.", host, port), e); } } @Override public void cleanup() throws DBException { client.close(); } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { Record record; if (fields != null) { record = client.get(readPolicy, new Key(namespace, table, key), fields.toArray(new String[fields.size()])); } else { record = client.get(readPolicy, new Key(namespace, table, key)); } if (record == null) { System.err.println("Record key " + key + " not found (read)"); return Status.ERROR; } for (Map.Entry<String, Object> entry: record.bins.entrySet()) { result.put(entry.getKey(), new ByteArrayByteIterator((byte[])entry.getValue())); } return Status.OK; } catch (AerospikeException e) { System.err.println("Error while reading key " + key + ": " + e); return Status.ERROR; } } @Override public Status scan(String table, String start, int count, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { System.err.println("Scan not implemented"); return Status.ERROR; } private Status write(String table, String key, WritePolicy writePolicy, Map<String, ByteIterator> values) { Bin[] bins = new Bin[values.size()]; int index = 0; for (Map.Entry<String, ByteIterator> entry: values.entrySet()) { bins[index] = new Bin(entry.getKey(), entry.getValue().toArray()); ++index; } Key keyObj = new Key(namespace, table, key); try { client.put(writePolicy, keyObj, bins); return Status.OK; } catch (AerospikeException e) { System.err.println("Error while writing key " + key + ": " + e); return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>} @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { return write(table, key, insertPolicy, values); } @Override public Status delete(String table, String key) { try { if (!client.delete(deletePolicy, new Key(namespace, table, key))) { System.err.println("Record key " + key + " not found (delete)"); return Status.ERROR; } return Status.OK; } catch (AerospikeException e) { System.err.println("Error while deleting key " + key + ": " + e); return Status.ERROR; } } }
return write(table, key, updatePolicy, values);
brianfrankcooper_YCSB
YCSB/aerospike/src/main/java/site/ycsb/db/AerospikeClient.java
AerospikeClient
insert
class AerospikeClient extends site.ycsb.DB { private static final String DEFAULT_HOST = "localhost"; private static final String DEFAULT_PORT = "3000"; private static final String DEFAULT_TIMEOUT = "10000"; private static final String DEFAULT_NAMESPACE = "ycsb"; private String namespace = null; private com.aerospike.client.AerospikeClient client = null; private Policy readPolicy = new Policy(); private WritePolicy insertPolicy = new WritePolicy(); private WritePolicy updatePolicy = new WritePolicy(); private WritePolicy deletePolicy = new WritePolicy(); @Override public void init() throws DBException { insertPolicy.recordExistsAction = RecordExistsAction.CREATE_ONLY; updatePolicy.recordExistsAction = RecordExistsAction.REPLACE_ONLY; Properties props = getProperties(); namespace = props.getProperty("as.namespace", DEFAULT_NAMESPACE); String host = props.getProperty("as.host", DEFAULT_HOST); String user = props.getProperty("as.user"); String password = props.getProperty("as.password"); int port = Integer.parseInt(props.getProperty("as.port", DEFAULT_PORT)); int timeout = Integer.parseInt(props.getProperty("as.timeout", DEFAULT_TIMEOUT)); readPolicy.timeout = timeout; insertPolicy.timeout = timeout; updatePolicy.timeout = timeout; deletePolicy.timeout = timeout; ClientPolicy clientPolicy = new ClientPolicy(); if (user != null && password != null) { clientPolicy.user = user; clientPolicy.password = password; } try { client = new com.aerospike.client.AerospikeClient(clientPolicy, host, port); } catch (AerospikeException e) { throw new DBException(String.format("Error while creating Aerospike " + "client for %s:%d.", host, port), e); } } @Override public void cleanup() throws DBException { client.close(); } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { Record record; if (fields != null) { record = client.get(readPolicy, new Key(namespace, table, key), fields.toArray(new String[fields.size()])); } else { record = client.get(readPolicy, new Key(namespace, table, key)); } if (record == null) { System.err.println("Record key " + key + " not found (read)"); return Status.ERROR; } for (Map.Entry<String, Object> entry: record.bins.entrySet()) { result.put(entry.getKey(), new ByteArrayByteIterator((byte[])entry.getValue())); } return Status.OK; } catch (AerospikeException e) { System.err.println("Error while reading key " + key + ": " + e); return Status.ERROR; } } @Override public Status scan(String table, String start, int count, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { System.err.println("Scan not implemented"); return Status.ERROR; } private Status write(String table, String key, WritePolicy writePolicy, Map<String, ByteIterator> values) { Bin[] bins = new Bin[values.size()]; int index = 0; for (Map.Entry<String, ByteIterator> entry: values.entrySet()) { bins[index] = new Bin(entry.getKey(), entry.getValue().toArray()); ++index; } Key keyObj = new Key(namespace, table, key); try { client.put(writePolicy, keyObj, bins); return Status.OK; } catch (AerospikeException e) { System.err.println("Error while writing key " + key + ": " + e); return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { return write(table, key, updatePolicy, values); } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>} @Override public Status delete(String table, String key) { try { if (!client.delete(deletePolicy, new Key(namespace, table, key))) { System.err.println("Record key " + key + " not found (delete)"); return Status.ERROR; } return Status.OK; } catch (AerospikeException e) { System.err.println("Error while deleting key " + key + ": " + e); return Status.ERROR; } } }
return write(table, key, insertPolicy, values);
brianfrankcooper_YCSB
YCSB/aerospike/src/main/java/site/ycsb/db/AerospikeClient.java
AerospikeClient
delete
class AerospikeClient extends site.ycsb.DB { private static final String DEFAULT_HOST = "localhost"; private static final String DEFAULT_PORT = "3000"; private static final String DEFAULT_TIMEOUT = "10000"; private static final String DEFAULT_NAMESPACE = "ycsb"; private String namespace = null; private com.aerospike.client.AerospikeClient client = null; private Policy readPolicy = new Policy(); private WritePolicy insertPolicy = new WritePolicy(); private WritePolicy updatePolicy = new WritePolicy(); private WritePolicy deletePolicy = new WritePolicy(); @Override public void init() throws DBException { insertPolicy.recordExistsAction = RecordExistsAction.CREATE_ONLY; updatePolicy.recordExistsAction = RecordExistsAction.REPLACE_ONLY; Properties props = getProperties(); namespace = props.getProperty("as.namespace", DEFAULT_NAMESPACE); String host = props.getProperty("as.host", DEFAULT_HOST); String user = props.getProperty("as.user"); String password = props.getProperty("as.password"); int port = Integer.parseInt(props.getProperty("as.port", DEFAULT_PORT)); int timeout = Integer.parseInt(props.getProperty("as.timeout", DEFAULT_TIMEOUT)); readPolicy.timeout = timeout; insertPolicy.timeout = timeout; updatePolicy.timeout = timeout; deletePolicy.timeout = timeout; ClientPolicy clientPolicy = new ClientPolicy(); if (user != null && password != null) { clientPolicy.user = user; clientPolicy.password = password; } try { client = new com.aerospike.client.AerospikeClient(clientPolicy, host, port); } catch (AerospikeException e) { throw new DBException(String.format("Error while creating Aerospike " + "client for %s:%d.", host, port), e); } } @Override public void cleanup() throws DBException { client.close(); } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { Record record; if (fields != null) { record = client.get(readPolicy, new Key(namespace, table, key), fields.toArray(new String[fields.size()])); } else { record = client.get(readPolicy, new Key(namespace, table, key)); } if (record == null) { System.err.println("Record key " + key + " not found (read)"); return Status.ERROR; } for (Map.Entry<String, Object> entry: record.bins.entrySet()) { result.put(entry.getKey(), new ByteArrayByteIterator((byte[])entry.getValue())); } return Status.OK; } catch (AerospikeException e) { System.err.println("Error while reading key " + key + ": " + e); return Status.ERROR; } } @Override public Status scan(String table, String start, int count, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { System.err.println("Scan not implemented"); return Status.ERROR; } private Status write(String table, String key, WritePolicy writePolicy, Map<String, ByteIterator> values) { Bin[] bins = new Bin[values.size()]; int index = 0; for (Map.Entry<String, ByteIterator> entry: values.entrySet()) { bins[index] = new Bin(entry.getKey(), entry.getValue().toArray()); ++index; } Key keyObj = new Key(namespace, table, key); try { client.put(writePolicy, keyObj, bins); return Status.OK; } catch (AerospikeException e) { System.err.println("Error while writing key " + key + ": " + e); return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { return write(table, key, updatePolicy, values); } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { return write(table, key, insertPolicy, values); } @Override public Status delete(String table, String key) {<FILL_FUNCTION_BODY>} }
try { if (!client.delete(deletePolicy, new Key(namespace, table, key))) { System.err.println("Record key " + key + " not found (delete)"); return Status.ERROR; } return Status.OK; } catch (AerospikeException e) { System.err.println("Error while deleting key " + key + ": " + e); return Status.ERROR; }
brianfrankcooper_YCSB
YCSB/arangodb/src/main/java/site/ycsb/db/arangodb/ArangoDBClient.java
ArangoDBClient
init
class ArangoDBClient extends DB { private static Logger logger = LoggerFactory.getLogger(ArangoDBClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** ArangoDB Driver related, Singleton. */ private ArangoDB arangoDB; private String databaseName = "ycsb"; private String collectionName; private Boolean dropDBBeforeRun; private Boolean waitForSync = false; private Boolean transactionUpdate = false; /** * Initialize any state for this DB. Called once per DB instance; there is * one DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void init() throws DBException {<FILL_FUNCTION_BODY>} /** * Cleanup any state for this DB. Called once per DB instance; there is one * DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void cleanup() throws DBException { if (INIT_COUNT.decrementAndGet() == 0) { arangoDB.shutdown(); arangoDB = null; logger.info("Local cleaned up."); } } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { BaseDocument toInsert = new BaseDocument(key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { toInsert.addAttribute(entry.getKey(), byteIteratorToString(entry.getValue())); } DocumentCreateOptions options = new DocumentCreateOptions().waitForSync(waitForSync); arangoDB.db(databaseName).collection(table).insertDocument(toInsert, options); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying insert {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Read a record from the database. Each field/value pair from the result * will be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { VPackSlice document = arangoDB.db(databaseName).collection(table).getDocument(key, VPackSlice.class, null); if (!this.fillMap(result, document, fields)) { return Status.ERROR; } return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying read {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { if (!transactionUpdate) { BaseDocument updateDoc = new BaseDocument(); for (Entry<String, ByteIterator> field : values.entrySet()) { updateDoc.addAttribute(field.getKey(), byteIteratorToString(field.getValue())); } arangoDB.db(databaseName).collection(table).updateDocument(key, updateDoc); return Status.OK; } else { // id for documentHandle String transactionAction = "function (id) {" // use internal database functions + "var db = require('internal').db;" // collection.update(document, data, overwrite, keepNull, waitForSync) + String.format("db._update(id, %s, true, false, %s);}", mapToJson(values), Boolean.toString(waitForSync).toLowerCase()); TransactionOptions options = new TransactionOptions(); options.writeCollections(table); options.params(createDocumentHandle(table, key)); arangoDB.db(databaseName).transaction(transactionAction, Void.class, options); return Status.OK; } } catch (ArangoDBException e) { logger.error("Exception while trying update {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status delete(String table, String key) { try { arangoDB.db(databaseName).collection(table).deleteDocument(key); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying delete {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Perform a range scan for a set of records in the database. Each * field/value pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { ArangoCursor<VPackSlice> cursor = null; try { String aqlQuery = String.format( "FOR target IN %s FILTER target._key >= @key SORT target._key ASC LIMIT %d RETURN %s ", table, recordcount, constructReturnForAQL(fields, "target")); Map<String, Object> bindVars = new MapBuilder().put("key", startkey).get(); cursor = arangoDB.db(databaseName).query(aqlQuery, bindVars, null, VPackSlice.class); while (cursor.hasNext()) { VPackSlice aDocument = cursor.next(); HashMap<String, ByteIterator> aMap = new HashMap<String, ByteIterator>(aDocument.size()); if (!this.fillMap(aMap, aDocument)) { return Status.ERROR; } result.add(aMap); } return Status.OK; } catch (Exception e) { logger.error("Exception while trying scan {} {} {} with ex {}", table, startkey, recordcount, e.toString()); } finally { if (cursor != null) { try { cursor.close(); } catch (IOException e) { logger.error("Fail to close cursor", e); } } } return Status.ERROR; } private String createDocumentHandle(String collection, String documentKey) throws ArangoDBException { validateCollectionName(collection); return collection + "/" + documentKey; } private void validateCollectionName(String name) throws ArangoDBException { if (name.indexOf('/') != -1) { throw new ArangoDBException("does not allow '/' in name."); } } private String constructReturnForAQL(Set<String> fields, String targetName) { // Construct the AQL query string. String resultDes = targetName; if (fields != null && fields.size() != 0) { StringBuilder builder = new StringBuilder("{"); for (String field : fields) { builder.append(String.format("\n\"%s\" : %s.%s,", field, targetName, field)); } //Replace last ',' to newline. builder.setCharAt(builder.length() - 1, '\n'); builder.append("}"); resultDes = builder.toString(); } return resultDes; } private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document) { return fillMap(resultMap, document, null); } /** * Fills the map with the properties from the BaseDocument. * * @param resultMap * The map to fill/ * @param document * The record to read from * @param fields * The list of fields to read, or null for all of them * @return isSuccess */ private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document, Set<String> fields) { if (fields == null || fields.size() == 0) { for (Iterator<Entry<String, VPackSlice>> iterator = document.objectIterator(); iterator.hasNext();) { Entry<String, VPackSlice> next = iterator.next(); VPackSlice value = next.getValue(); if (value.isString()) { resultMap.put(next.getKey(), stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } else { for (String field : fields) { VPackSlice value = document.get(field); if (value.isString()) { resultMap.put(field, stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } return true; } private String byteIteratorToString(ByteIterator byteIter) { return new String(byteIter.toArray()); } private ByteIterator stringToByteIterator(String content) { return new StringByteIterator(content); } private String mapToJson(Map<String, ByteIterator> values) { VPackBuilder builder = new VPackBuilder().add(ValueType.OBJECT); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { builder.add(entry.getKey(), byteIteratorToString(entry.getValue())); } builder.close(); return arangoDB.util().deserialize(builder.slice(), String.class); } }
synchronized (ArangoDBClient.class) { Properties props = getProperties(); collectionName = props.getProperty("table", "usertable"); // Set the DB address String ip = props.getProperty("arangodb.ip", "localhost"); String portStr = props.getProperty("arangodb.port", "8529"); int port = Integer.parseInt(portStr); // Set network protocol String protocolStr = props.getProperty("arangodb.protocol", "VST"); Protocol protocol = Protocol.valueOf(protocolStr); // If clear db before run String dropDBBeforeRunStr = props.getProperty("arangodb.dropDBBeforeRun", "false"); dropDBBeforeRun = Boolean.parseBoolean(dropDBBeforeRunStr); // Set the sync mode String waitForSyncStr = props.getProperty("arangodb.waitForSync", "false"); waitForSync = Boolean.parseBoolean(waitForSyncStr); // Set if transaction for update String transactionUpdateStr = props.getProperty("arangodb.transactionUpdate", "false"); transactionUpdate = Boolean.parseBoolean(transactionUpdateStr); // Init ArangoDB connection try { arangoDB = new ArangoDB.Builder().host(ip).port(port).useProtocol(protocol).build(); } catch (Exception e) { logger.error("Failed to initialize ArangoDB", e); System.exit(-1); } if(INIT_COUNT.getAndIncrement() == 0) { // Init the database if (dropDBBeforeRun) { // Try delete first try { arangoDB.db(databaseName).drop(); } catch (ArangoDBException e) { logger.info("Fail to delete DB: {}", databaseName); } } try { arangoDB.createDatabase(databaseName); logger.info("Database created: " + databaseName); } catch (ArangoDBException e) { logger.error("Failed to create database: {} with ex: {}", databaseName, e.toString()); } try { arangoDB.db(databaseName).createCollection(collectionName); logger.info("Collection created: " + collectionName); } catch (ArangoDBException e) { logger.error("Failed to create collection: {} with ex: {}", collectionName, e.toString()); } logger.info("ArangoDB client connection created to {}:{}", ip, port); // Log the configuration logger.info("Arango Configuration: dropDBBeforeRun: {}; address: {}:{}; databaseName: {};" + " waitForSync: {}; transactionUpdate: {};", dropDBBeforeRun, ip, port, databaseName, waitForSync, transactionUpdate); } }
brianfrankcooper_YCSB
YCSB/arangodb/src/main/java/site/ycsb/db/arangodb/ArangoDBClient.java
ArangoDBClient
cleanup
class ArangoDBClient extends DB { private static Logger logger = LoggerFactory.getLogger(ArangoDBClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** ArangoDB Driver related, Singleton. */ private ArangoDB arangoDB; private String databaseName = "ycsb"; private String collectionName; private Boolean dropDBBeforeRun; private Boolean waitForSync = false; private Boolean transactionUpdate = false; /** * Initialize any state for this DB. Called once per DB instance; there is * one DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void init() throws DBException { synchronized (ArangoDBClient.class) { Properties props = getProperties(); collectionName = props.getProperty("table", "usertable"); // Set the DB address String ip = props.getProperty("arangodb.ip", "localhost"); String portStr = props.getProperty("arangodb.port", "8529"); int port = Integer.parseInt(portStr); // Set network protocol String protocolStr = props.getProperty("arangodb.protocol", "VST"); Protocol protocol = Protocol.valueOf(protocolStr); // If clear db before run String dropDBBeforeRunStr = props.getProperty("arangodb.dropDBBeforeRun", "false"); dropDBBeforeRun = Boolean.parseBoolean(dropDBBeforeRunStr); // Set the sync mode String waitForSyncStr = props.getProperty("arangodb.waitForSync", "false"); waitForSync = Boolean.parseBoolean(waitForSyncStr); // Set if transaction for update String transactionUpdateStr = props.getProperty("arangodb.transactionUpdate", "false"); transactionUpdate = Boolean.parseBoolean(transactionUpdateStr); // Init ArangoDB connection try { arangoDB = new ArangoDB.Builder().host(ip).port(port).useProtocol(protocol).build(); } catch (Exception e) { logger.error("Failed to initialize ArangoDB", e); System.exit(-1); } if(INIT_COUNT.getAndIncrement() == 0) { // Init the database if (dropDBBeforeRun) { // Try delete first try { arangoDB.db(databaseName).drop(); } catch (ArangoDBException e) { logger.info("Fail to delete DB: {}", databaseName); } } try { arangoDB.createDatabase(databaseName); logger.info("Database created: " + databaseName); } catch (ArangoDBException e) { logger.error("Failed to create database: {} with ex: {}", databaseName, e.toString()); } try { arangoDB.db(databaseName).createCollection(collectionName); logger.info("Collection created: " + collectionName); } catch (ArangoDBException e) { logger.error("Failed to create collection: {} with ex: {}", collectionName, e.toString()); } logger.info("ArangoDB client connection created to {}:{}", ip, port); // Log the configuration logger.info("Arango Configuration: dropDBBeforeRun: {}; address: {}:{}; databaseName: {};" + " waitForSync: {}; transactionUpdate: {};", dropDBBeforeRun, ip, port, databaseName, waitForSync, transactionUpdate); } } } /** * Cleanup any state for this DB. Called once per DB instance; there is one * DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void cleanup() throws DBException {<FILL_FUNCTION_BODY>} /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { BaseDocument toInsert = new BaseDocument(key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { toInsert.addAttribute(entry.getKey(), byteIteratorToString(entry.getValue())); } DocumentCreateOptions options = new DocumentCreateOptions().waitForSync(waitForSync); arangoDB.db(databaseName).collection(table).insertDocument(toInsert, options); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying insert {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Read a record from the database. Each field/value pair from the result * will be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { VPackSlice document = arangoDB.db(databaseName).collection(table).getDocument(key, VPackSlice.class, null); if (!this.fillMap(result, document, fields)) { return Status.ERROR; } return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying read {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { if (!transactionUpdate) { BaseDocument updateDoc = new BaseDocument(); for (Entry<String, ByteIterator> field : values.entrySet()) { updateDoc.addAttribute(field.getKey(), byteIteratorToString(field.getValue())); } arangoDB.db(databaseName).collection(table).updateDocument(key, updateDoc); return Status.OK; } else { // id for documentHandle String transactionAction = "function (id) {" // use internal database functions + "var db = require('internal').db;" // collection.update(document, data, overwrite, keepNull, waitForSync) + String.format("db._update(id, %s, true, false, %s);}", mapToJson(values), Boolean.toString(waitForSync).toLowerCase()); TransactionOptions options = new TransactionOptions(); options.writeCollections(table); options.params(createDocumentHandle(table, key)); arangoDB.db(databaseName).transaction(transactionAction, Void.class, options); return Status.OK; } } catch (ArangoDBException e) { logger.error("Exception while trying update {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status delete(String table, String key) { try { arangoDB.db(databaseName).collection(table).deleteDocument(key); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying delete {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Perform a range scan for a set of records in the database. Each * field/value pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { ArangoCursor<VPackSlice> cursor = null; try { String aqlQuery = String.format( "FOR target IN %s FILTER target._key >= @key SORT target._key ASC LIMIT %d RETURN %s ", table, recordcount, constructReturnForAQL(fields, "target")); Map<String, Object> bindVars = new MapBuilder().put("key", startkey).get(); cursor = arangoDB.db(databaseName).query(aqlQuery, bindVars, null, VPackSlice.class); while (cursor.hasNext()) { VPackSlice aDocument = cursor.next(); HashMap<String, ByteIterator> aMap = new HashMap<String, ByteIterator>(aDocument.size()); if (!this.fillMap(aMap, aDocument)) { return Status.ERROR; } result.add(aMap); } return Status.OK; } catch (Exception e) { logger.error("Exception while trying scan {} {} {} with ex {}", table, startkey, recordcount, e.toString()); } finally { if (cursor != null) { try { cursor.close(); } catch (IOException e) { logger.error("Fail to close cursor", e); } } } return Status.ERROR; } private String createDocumentHandle(String collection, String documentKey) throws ArangoDBException { validateCollectionName(collection); return collection + "/" + documentKey; } private void validateCollectionName(String name) throws ArangoDBException { if (name.indexOf('/') != -1) { throw new ArangoDBException("does not allow '/' in name."); } } private String constructReturnForAQL(Set<String> fields, String targetName) { // Construct the AQL query string. String resultDes = targetName; if (fields != null && fields.size() != 0) { StringBuilder builder = new StringBuilder("{"); for (String field : fields) { builder.append(String.format("\n\"%s\" : %s.%s,", field, targetName, field)); } //Replace last ',' to newline. builder.setCharAt(builder.length() - 1, '\n'); builder.append("}"); resultDes = builder.toString(); } return resultDes; } private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document) { return fillMap(resultMap, document, null); } /** * Fills the map with the properties from the BaseDocument. * * @param resultMap * The map to fill/ * @param document * The record to read from * @param fields * The list of fields to read, or null for all of them * @return isSuccess */ private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document, Set<String> fields) { if (fields == null || fields.size() == 0) { for (Iterator<Entry<String, VPackSlice>> iterator = document.objectIterator(); iterator.hasNext();) { Entry<String, VPackSlice> next = iterator.next(); VPackSlice value = next.getValue(); if (value.isString()) { resultMap.put(next.getKey(), stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } else { for (String field : fields) { VPackSlice value = document.get(field); if (value.isString()) { resultMap.put(field, stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } return true; } private String byteIteratorToString(ByteIterator byteIter) { return new String(byteIter.toArray()); } private ByteIterator stringToByteIterator(String content) { return new StringByteIterator(content); } private String mapToJson(Map<String, ByteIterator> values) { VPackBuilder builder = new VPackBuilder().add(ValueType.OBJECT); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { builder.add(entry.getKey(), byteIteratorToString(entry.getValue())); } builder.close(); return arangoDB.util().deserialize(builder.slice(), String.class); } }
if (INIT_COUNT.decrementAndGet() == 0) { arangoDB.shutdown(); arangoDB = null; logger.info("Local cleaned up."); }
brianfrankcooper_YCSB
YCSB/arangodb/src/main/java/site/ycsb/db/arangodb/ArangoDBClient.java
ArangoDBClient
insert
class ArangoDBClient extends DB { private static Logger logger = LoggerFactory.getLogger(ArangoDBClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** ArangoDB Driver related, Singleton. */ private ArangoDB arangoDB; private String databaseName = "ycsb"; private String collectionName; private Boolean dropDBBeforeRun; private Boolean waitForSync = false; private Boolean transactionUpdate = false; /** * Initialize any state for this DB. Called once per DB instance; there is * one DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void init() throws DBException { synchronized (ArangoDBClient.class) { Properties props = getProperties(); collectionName = props.getProperty("table", "usertable"); // Set the DB address String ip = props.getProperty("arangodb.ip", "localhost"); String portStr = props.getProperty("arangodb.port", "8529"); int port = Integer.parseInt(portStr); // Set network protocol String protocolStr = props.getProperty("arangodb.protocol", "VST"); Protocol protocol = Protocol.valueOf(protocolStr); // If clear db before run String dropDBBeforeRunStr = props.getProperty("arangodb.dropDBBeforeRun", "false"); dropDBBeforeRun = Boolean.parseBoolean(dropDBBeforeRunStr); // Set the sync mode String waitForSyncStr = props.getProperty("arangodb.waitForSync", "false"); waitForSync = Boolean.parseBoolean(waitForSyncStr); // Set if transaction for update String transactionUpdateStr = props.getProperty("arangodb.transactionUpdate", "false"); transactionUpdate = Boolean.parseBoolean(transactionUpdateStr); // Init ArangoDB connection try { arangoDB = new ArangoDB.Builder().host(ip).port(port).useProtocol(protocol).build(); } catch (Exception e) { logger.error("Failed to initialize ArangoDB", e); System.exit(-1); } if(INIT_COUNT.getAndIncrement() == 0) { // Init the database if (dropDBBeforeRun) { // Try delete first try { arangoDB.db(databaseName).drop(); } catch (ArangoDBException e) { logger.info("Fail to delete DB: {}", databaseName); } } try { arangoDB.createDatabase(databaseName); logger.info("Database created: " + databaseName); } catch (ArangoDBException e) { logger.error("Failed to create database: {} with ex: {}", databaseName, e.toString()); } try { arangoDB.db(databaseName).createCollection(collectionName); logger.info("Collection created: " + collectionName); } catch (ArangoDBException e) { logger.error("Failed to create collection: {} with ex: {}", collectionName, e.toString()); } logger.info("ArangoDB client connection created to {}:{}", ip, port); // Log the configuration logger.info("Arango Configuration: dropDBBeforeRun: {}; address: {}:{}; databaseName: {};" + " waitForSync: {}; transactionUpdate: {};", dropDBBeforeRun, ip, port, databaseName, waitForSync, transactionUpdate); } } } /** * Cleanup any state for this DB. Called once per DB instance; there is one * DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void cleanup() throws DBException { if (INIT_COUNT.decrementAndGet() == 0) { arangoDB.shutdown(); arangoDB = null; logger.info("Local cleaned up."); } } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>} /** * Read a record from the database. Each field/value pair from the result * will be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { VPackSlice document = arangoDB.db(databaseName).collection(table).getDocument(key, VPackSlice.class, null); if (!this.fillMap(result, document, fields)) { return Status.ERROR; } return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying read {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { if (!transactionUpdate) { BaseDocument updateDoc = new BaseDocument(); for (Entry<String, ByteIterator> field : values.entrySet()) { updateDoc.addAttribute(field.getKey(), byteIteratorToString(field.getValue())); } arangoDB.db(databaseName).collection(table).updateDocument(key, updateDoc); return Status.OK; } else { // id for documentHandle String transactionAction = "function (id) {" // use internal database functions + "var db = require('internal').db;" // collection.update(document, data, overwrite, keepNull, waitForSync) + String.format("db._update(id, %s, true, false, %s);}", mapToJson(values), Boolean.toString(waitForSync).toLowerCase()); TransactionOptions options = new TransactionOptions(); options.writeCollections(table); options.params(createDocumentHandle(table, key)); arangoDB.db(databaseName).transaction(transactionAction, Void.class, options); return Status.OK; } } catch (ArangoDBException e) { logger.error("Exception while trying update {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status delete(String table, String key) { try { arangoDB.db(databaseName).collection(table).deleteDocument(key); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying delete {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Perform a range scan for a set of records in the database. Each * field/value pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { ArangoCursor<VPackSlice> cursor = null; try { String aqlQuery = String.format( "FOR target IN %s FILTER target._key >= @key SORT target._key ASC LIMIT %d RETURN %s ", table, recordcount, constructReturnForAQL(fields, "target")); Map<String, Object> bindVars = new MapBuilder().put("key", startkey).get(); cursor = arangoDB.db(databaseName).query(aqlQuery, bindVars, null, VPackSlice.class); while (cursor.hasNext()) { VPackSlice aDocument = cursor.next(); HashMap<String, ByteIterator> aMap = new HashMap<String, ByteIterator>(aDocument.size()); if (!this.fillMap(aMap, aDocument)) { return Status.ERROR; } result.add(aMap); } return Status.OK; } catch (Exception e) { logger.error("Exception while trying scan {} {} {} with ex {}", table, startkey, recordcount, e.toString()); } finally { if (cursor != null) { try { cursor.close(); } catch (IOException e) { logger.error("Fail to close cursor", e); } } } return Status.ERROR; } private String createDocumentHandle(String collection, String documentKey) throws ArangoDBException { validateCollectionName(collection); return collection + "/" + documentKey; } private void validateCollectionName(String name) throws ArangoDBException { if (name.indexOf('/') != -1) { throw new ArangoDBException("does not allow '/' in name."); } } private String constructReturnForAQL(Set<String> fields, String targetName) { // Construct the AQL query string. String resultDes = targetName; if (fields != null && fields.size() != 0) { StringBuilder builder = new StringBuilder("{"); for (String field : fields) { builder.append(String.format("\n\"%s\" : %s.%s,", field, targetName, field)); } //Replace last ',' to newline. builder.setCharAt(builder.length() - 1, '\n'); builder.append("}"); resultDes = builder.toString(); } return resultDes; } private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document) { return fillMap(resultMap, document, null); } /** * Fills the map with the properties from the BaseDocument. * * @param resultMap * The map to fill/ * @param document * The record to read from * @param fields * The list of fields to read, or null for all of them * @return isSuccess */ private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document, Set<String> fields) { if (fields == null || fields.size() == 0) { for (Iterator<Entry<String, VPackSlice>> iterator = document.objectIterator(); iterator.hasNext();) { Entry<String, VPackSlice> next = iterator.next(); VPackSlice value = next.getValue(); if (value.isString()) { resultMap.put(next.getKey(), stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } else { for (String field : fields) { VPackSlice value = document.get(field); if (value.isString()) { resultMap.put(field, stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } return true; } private String byteIteratorToString(ByteIterator byteIter) { return new String(byteIter.toArray()); } private ByteIterator stringToByteIterator(String content) { return new StringByteIterator(content); } private String mapToJson(Map<String, ByteIterator> values) { VPackBuilder builder = new VPackBuilder().add(ValueType.OBJECT); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { builder.add(entry.getKey(), byteIteratorToString(entry.getValue())); } builder.close(); return arangoDB.util().deserialize(builder.slice(), String.class); } }
try { BaseDocument toInsert = new BaseDocument(key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { toInsert.addAttribute(entry.getKey(), byteIteratorToString(entry.getValue())); } DocumentCreateOptions options = new DocumentCreateOptions().waitForSync(waitForSync); arangoDB.db(databaseName).collection(table).insertDocument(toInsert, options); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying insert {} {} with ex {}", table, key, e.toString()); } return Status.ERROR;
brianfrankcooper_YCSB
YCSB/arangodb/src/main/java/site/ycsb/db/arangodb/ArangoDBClient.java
ArangoDBClient
read
class ArangoDBClient extends DB { private static Logger logger = LoggerFactory.getLogger(ArangoDBClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** ArangoDB Driver related, Singleton. */ private ArangoDB arangoDB; private String databaseName = "ycsb"; private String collectionName; private Boolean dropDBBeforeRun; private Boolean waitForSync = false; private Boolean transactionUpdate = false; /** * Initialize any state for this DB. Called once per DB instance; there is * one DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void init() throws DBException { synchronized (ArangoDBClient.class) { Properties props = getProperties(); collectionName = props.getProperty("table", "usertable"); // Set the DB address String ip = props.getProperty("arangodb.ip", "localhost"); String portStr = props.getProperty("arangodb.port", "8529"); int port = Integer.parseInt(portStr); // Set network protocol String protocolStr = props.getProperty("arangodb.protocol", "VST"); Protocol protocol = Protocol.valueOf(protocolStr); // If clear db before run String dropDBBeforeRunStr = props.getProperty("arangodb.dropDBBeforeRun", "false"); dropDBBeforeRun = Boolean.parseBoolean(dropDBBeforeRunStr); // Set the sync mode String waitForSyncStr = props.getProperty("arangodb.waitForSync", "false"); waitForSync = Boolean.parseBoolean(waitForSyncStr); // Set if transaction for update String transactionUpdateStr = props.getProperty("arangodb.transactionUpdate", "false"); transactionUpdate = Boolean.parseBoolean(transactionUpdateStr); // Init ArangoDB connection try { arangoDB = new ArangoDB.Builder().host(ip).port(port).useProtocol(protocol).build(); } catch (Exception e) { logger.error("Failed to initialize ArangoDB", e); System.exit(-1); } if(INIT_COUNT.getAndIncrement() == 0) { // Init the database if (dropDBBeforeRun) { // Try delete first try { arangoDB.db(databaseName).drop(); } catch (ArangoDBException e) { logger.info("Fail to delete DB: {}", databaseName); } } try { arangoDB.createDatabase(databaseName); logger.info("Database created: " + databaseName); } catch (ArangoDBException e) { logger.error("Failed to create database: {} with ex: {}", databaseName, e.toString()); } try { arangoDB.db(databaseName).createCollection(collectionName); logger.info("Collection created: " + collectionName); } catch (ArangoDBException e) { logger.error("Failed to create collection: {} with ex: {}", collectionName, e.toString()); } logger.info("ArangoDB client connection created to {}:{}", ip, port); // Log the configuration logger.info("Arango Configuration: dropDBBeforeRun: {}; address: {}:{}; databaseName: {};" + " waitForSync: {}; transactionUpdate: {};", dropDBBeforeRun, ip, port, databaseName, waitForSync, transactionUpdate); } } } /** * Cleanup any state for this DB. Called once per DB instance; there is one * DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void cleanup() throws DBException { if (INIT_COUNT.decrementAndGet() == 0) { arangoDB.shutdown(); arangoDB = null; logger.info("Local cleaned up."); } } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { BaseDocument toInsert = new BaseDocument(key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { toInsert.addAttribute(entry.getKey(), byteIteratorToString(entry.getValue())); } DocumentCreateOptions options = new DocumentCreateOptions().waitForSync(waitForSync); arangoDB.db(databaseName).collection(table).insertDocument(toInsert, options); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying insert {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Read a record from the database. Each field/value pair from the result * will be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {<FILL_FUNCTION_BODY>} /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { if (!transactionUpdate) { BaseDocument updateDoc = new BaseDocument(); for (Entry<String, ByteIterator> field : values.entrySet()) { updateDoc.addAttribute(field.getKey(), byteIteratorToString(field.getValue())); } arangoDB.db(databaseName).collection(table).updateDocument(key, updateDoc); return Status.OK; } else { // id for documentHandle String transactionAction = "function (id) {" // use internal database functions + "var db = require('internal').db;" // collection.update(document, data, overwrite, keepNull, waitForSync) + String.format("db._update(id, %s, true, false, %s);}", mapToJson(values), Boolean.toString(waitForSync).toLowerCase()); TransactionOptions options = new TransactionOptions(); options.writeCollections(table); options.params(createDocumentHandle(table, key)); arangoDB.db(databaseName).transaction(transactionAction, Void.class, options); return Status.OK; } } catch (ArangoDBException e) { logger.error("Exception while trying update {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status delete(String table, String key) { try { arangoDB.db(databaseName).collection(table).deleteDocument(key); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying delete {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Perform a range scan for a set of records in the database. Each * field/value pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { ArangoCursor<VPackSlice> cursor = null; try { String aqlQuery = String.format( "FOR target IN %s FILTER target._key >= @key SORT target._key ASC LIMIT %d RETURN %s ", table, recordcount, constructReturnForAQL(fields, "target")); Map<String, Object> bindVars = new MapBuilder().put("key", startkey).get(); cursor = arangoDB.db(databaseName).query(aqlQuery, bindVars, null, VPackSlice.class); while (cursor.hasNext()) { VPackSlice aDocument = cursor.next(); HashMap<String, ByteIterator> aMap = new HashMap<String, ByteIterator>(aDocument.size()); if (!this.fillMap(aMap, aDocument)) { return Status.ERROR; } result.add(aMap); } return Status.OK; } catch (Exception e) { logger.error("Exception while trying scan {} {} {} with ex {}", table, startkey, recordcount, e.toString()); } finally { if (cursor != null) { try { cursor.close(); } catch (IOException e) { logger.error("Fail to close cursor", e); } } } return Status.ERROR; } private String createDocumentHandle(String collection, String documentKey) throws ArangoDBException { validateCollectionName(collection); return collection + "/" + documentKey; } private void validateCollectionName(String name) throws ArangoDBException { if (name.indexOf('/') != -1) { throw new ArangoDBException("does not allow '/' in name."); } } private String constructReturnForAQL(Set<String> fields, String targetName) { // Construct the AQL query string. String resultDes = targetName; if (fields != null && fields.size() != 0) { StringBuilder builder = new StringBuilder("{"); for (String field : fields) { builder.append(String.format("\n\"%s\" : %s.%s,", field, targetName, field)); } //Replace last ',' to newline. builder.setCharAt(builder.length() - 1, '\n'); builder.append("}"); resultDes = builder.toString(); } return resultDes; } private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document) { return fillMap(resultMap, document, null); } /** * Fills the map with the properties from the BaseDocument. * * @param resultMap * The map to fill/ * @param document * The record to read from * @param fields * The list of fields to read, or null for all of them * @return isSuccess */ private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document, Set<String> fields) { if (fields == null || fields.size() == 0) { for (Iterator<Entry<String, VPackSlice>> iterator = document.objectIterator(); iterator.hasNext();) { Entry<String, VPackSlice> next = iterator.next(); VPackSlice value = next.getValue(); if (value.isString()) { resultMap.put(next.getKey(), stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } else { for (String field : fields) { VPackSlice value = document.get(field); if (value.isString()) { resultMap.put(field, stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } return true; } private String byteIteratorToString(ByteIterator byteIter) { return new String(byteIter.toArray()); } private ByteIterator stringToByteIterator(String content) { return new StringByteIterator(content); } private String mapToJson(Map<String, ByteIterator> values) { VPackBuilder builder = new VPackBuilder().add(ValueType.OBJECT); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { builder.add(entry.getKey(), byteIteratorToString(entry.getValue())); } builder.close(); return arangoDB.util().deserialize(builder.slice(), String.class); } }
try { VPackSlice document = arangoDB.db(databaseName).collection(table).getDocument(key, VPackSlice.class, null); if (!this.fillMap(result, document, fields)) { return Status.ERROR; } return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying read {} {} with ex {}", table, key, e.toString()); } return Status.ERROR;
brianfrankcooper_YCSB
YCSB/arangodb/src/main/java/site/ycsb/db/arangodb/ArangoDBClient.java
ArangoDBClient
update
class ArangoDBClient extends DB { private static Logger logger = LoggerFactory.getLogger(ArangoDBClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** ArangoDB Driver related, Singleton. */ private ArangoDB arangoDB; private String databaseName = "ycsb"; private String collectionName; private Boolean dropDBBeforeRun; private Boolean waitForSync = false; private Boolean transactionUpdate = false; /** * Initialize any state for this DB. Called once per DB instance; there is * one DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void init() throws DBException { synchronized (ArangoDBClient.class) { Properties props = getProperties(); collectionName = props.getProperty("table", "usertable"); // Set the DB address String ip = props.getProperty("arangodb.ip", "localhost"); String portStr = props.getProperty("arangodb.port", "8529"); int port = Integer.parseInt(portStr); // Set network protocol String protocolStr = props.getProperty("arangodb.protocol", "VST"); Protocol protocol = Protocol.valueOf(protocolStr); // If clear db before run String dropDBBeforeRunStr = props.getProperty("arangodb.dropDBBeforeRun", "false"); dropDBBeforeRun = Boolean.parseBoolean(dropDBBeforeRunStr); // Set the sync mode String waitForSyncStr = props.getProperty("arangodb.waitForSync", "false"); waitForSync = Boolean.parseBoolean(waitForSyncStr); // Set if transaction for update String transactionUpdateStr = props.getProperty("arangodb.transactionUpdate", "false"); transactionUpdate = Boolean.parseBoolean(transactionUpdateStr); // Init ArangoDB connection try { arangoDB = new ArangoDB.Builder().host(ip).port(port).useProtocol(protocol).build(); } catch (Exception e) { logger.error("Failed to initialize ArangoDB", e); System.exit(-1); } if(INIT_COUNT.getAndIncrement() == 0) { // Init the database if (dropDBBeforeRun) { // Try delete first try { arangoDB.db(databaseName).drop(); } catch (ArangoDBException e) { logger.info("Fail to delete DB: {}", databaseName); } } try { arangoDB.createDatabase(databaseName); logger.info("Database created: " + databaseName); } catch (ArangoDBException e) { logger.error("Failed to create database: {} with ex: {}", databaseName, e.toString()); } try { arangoDB.db(databaseName).createCollection(collectionName); logger.info("Collection created: " + collectionName); } catch (ArangoDBException e) { logger.error("Failed to create collection: {} with ex: {}", collectionName, e.toString()); } logger.info("ArangoDB client connection created to {}:{}", ip, port); // Log the configuration logger.info("Arango Configuration: dropDBBeforeRun: {}; address: {}:{}; databaseName: {};" + " waitForSync: {}; transactionUpdate: {};", dropDBBeforeRun, ip, port, databaseName, waitForSync, transactionUpdate); } } } /** * Cleanup any state for this DB. Called once per DB instance; there is one * DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void cleanup() throws DBException { if (INIT_COUNT.decrementAndGet() == 0) { arangoDB.shutdown(); arangoDB = null; logger.info("Local cleaned up."); } } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { BaseDocument toInsert = new BaseDocument(key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { toInsert.addAttribute(entry.getKey(), byteIteratorToString(entry.getValue())); } DocumentCreateOptions options = new DocumentCreateOptions().waitForSync(waitForSync); arangoDB.db(databaseName).collection(table).insertDocument(toInsert, options); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying insert {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Read a record from the database. Each field/value pair from the result * will be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { VPackSlice document = arangoDB.db(databaseName).collection(table).getDocument(key, VPackSlice.class, null); if (!this.fillMap(result, document, fields)) { return Status.ERROR; } return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying read {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>} /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status delete(String table, String key) { try { arangoDB.db(databaseName).collection(table).deleteDocument(key); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying delete {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Perform a range scan for a set of records in the database. Each * field/value pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { ArangoCursor<VPackSlice> cursor = null; try { String aqlQuery = String.format( "FOR target IN %s FILTER target._key >= @key SORT target._key ASC LIMIT %d RETURN %s ", table, recordcount, constructReturnForAQL(fields, "target")); Map<String, Object> bindVars = new MapBuilder().put("key", startkey).get(); cursor = arangoDB.db(databaseName).query(aqlQuery, bindVars, null, VPackSlice.class); while (cursor.hasNext()) { VPackSlice aDocument = cursor.next(); HashMap<String, ByteIterator> aMap = new HashMap<String, ByteIterator>(aDocument.size()); if (!this.fillMap(aMap, aDocument)) { return Status.ERROR; } result.add(aMap); } return Status.OK; } catch (Exception e) { logger.error("Exception while trying scan {} {} {} with ex {}", table, startkey, recordcount, e.toString()); } finally { if (cursor != null) { try { cursor.close(); } catch (IOException e) { logger.error("Fail to close cursor", e); } } } return Status.ERROR; } private String createDocumentHandle(String collection, String documentKey) throws ArangoDBException { validateCollectionName(collection); return collection + "/" + documentKey; } private void validateCollectionName(String name) throws ArangoDBException { if (name.indexOf('/') != -1) { throw new ArangoDBException("does not allow '/' in name."); } } private String constructReturnForAQL(Set<String> fields, String targetName) { // Construct the AQL query string. String resultDes = targetName; if (fields != null && fields.size() != 0) { StringBuilder builder = new StringBuilder("{"); for (String field : fields) { builder.append(String.format("\n\"%s\" : %s.%s,", field, targetName, field)); } //Replace last ',' to newline. builder.setCharAt(builder.length() - 1, '\n'); builder.append("}"); resultDes = builder.toString(); } return resultDes; } private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document) { return fillMap(resultMap, document, null); } /** * Fills the map with the properties from the BaseDocument. * * @param resultMap * The map to fill/ * @param document * The record to read from * @param fields * The list of fields to read, or null for all of them * @return isSuccess */ private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document, Set<String> fields) { if (fields == null || fields.size() == 0) { for (Iterator<Entry<String, VPackSlice>> iterator = document.objectIterator(); iterator.hasNext();) { Entry<String, VPackSlice> next = iterator.next(); VPackSlice value = next.getValue(); if (value.isString()) { resultMap.put(next.getKey(), stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } else { for (String field : fields) { VPackSlice value = document.get(field); if (value.isString()) { resultMap.put(field, stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } return true; } private String byteIteratorToString(ByteIterator byteIter) { return new String(byteIter.toArray()); } private ByteIterator stringToByteIterator(String content) { return new StringByteIterator(content); } private String mapToJson(Map<String, ByteIterator> values) { VPackBuilder builder = new VPackBuilder().add(ValueType.OBJECT); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { builder.add(entry.getKey(), byteIteratorToString(entry.getValue())); } builder.close(); return arangoDB.util().deserialize(builder.slice(), String.class); } }
try { if (!transactionUpdate) { BaseDocument updateDoc = new BaseDocument(); for (Entry<String, ByteIterator> field : values.entrySet()) { updateDoc.addAttribute(field.getKey(), byteIteratorToString(field.getValue())); } arangoDB.db(databaseName).collection(table).updateDocument(key, updateDoc); return Status.OK; } else { // id for documentHandle String transactionAction = "function (id) {" // use internal database functions + "var db = require('internal').db;" // collection.update(document, data, overwrite, keepNull, waitForSync) + String.format("db._update(id, %s, true, false, %s);}", mapToJson(values), Boolean.toString(waitForSync).toLowerCase()); TransactionOptions options = new TransactionOptions(); options.writeCollections(table); options.params(createDocumentHandle(table, key)); arangoDB.db(databaseName).transaction(transactionAction, Void.class, options); return Status.OK; } } catch (ArangoDBException e) { logger.error("Exception while trying update {} {} with ex {}", table, key, e.toString()); } return Status.ERROR;
brianfrankcooper_YCSB
YCSB/arangodb/src/main/java/site/ycsb/db/arangodb/ArangoDBClient.java
ArangoDBClient
delete
class ArangoDBClient extends DB { private static Logger logger = LoggerFactory.getLogger(ArangoDBClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** ArangoDB Driver related, Singleton. */ private ArangoDB arangoDB; private String databaseName = "ycsb"; private String collectionName; private Boolean dropDBBeforeRun; private Boolean waitForSync = false; private Boolean transactionUpdate = false; /** * Initialize any state for this DB. Called once per DB instance; there is * one DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void init() throws DBException { synchronized (ArangoDBClient.class) { Properties props = getProperties(); collectionName = props.getProperty("table", "usertable"); // Set the DB address String ip = props.getProperty("arangodb.ip", "localhost"); String portStr = props.getProperty("arangodb.port", "8529"); int port = Integer.parseInt(portStr); // Set network protocol String protocolStr = props.getProperty("arangodb.protocol", "VST"); Protocol protocol = Protocol.valueOf(protocolStr); // If clear db before run String dropDBBeforeRunStr = props.getProperty("arangodb.dropDBBeforeRun", "false"); dropDBBeforeRun = Boolean.parseBoolean(dropDBBeforeRunStr); // Set the sync mode String waitForSyncStr = props.getProperty("arangodb.waitForSync", "false"); waitForSync = Boolean.parseBoolean(waitForSyncStr); // Set if transaction for update String transactionUpdateStr = props.getProperty("arangodb.transactionUpdate", "false"); transactionUpdate = Boolean.parseBoolean(transactionUpdateStr); // Init ArangoDB connection try { arangoDB = new ArangoDB.Builder().host(ip).port(port).useProtocol(protocol).build(); } catch (Exception e) { logger.error("Failed to initialize ArangoDB", e); System.exit(-1); } if(INIT_COUNT.getAndIncrement() == 0) { // Init the database if (dropDBBeforeRun) { // Try delete first try { arangoDB.db(databaseName).drop(); } catch (ArangoDBException e) { logger.info("Fail to delete DB: {}", databaseName); } } try { arangoDB.createDatabase(databaseName); logger.info("Database created: " + databaseName); } catch (ArangoDBException e) { logger.error("Failed to create database: {} with ex: {}", databaseName, e.toString()); } try { arangoDB.db(databaseName).createCollection(collectionName); logger.info("Collection created: " + collectionName); } catch (ArangoDBException e) { logger.error("Failed to create collection: {} with ex: {}", collectionName, e.toString()); } logger.info("ArangoDB client connection created to {}:{}", ip, port); // Log the configuration logger.info("Arango Configuration: dropDBBeforeRun: {}; address: {}:{}; databaseName: {};" + " waitForSync: {}; transactionUpdate: {};", dropDBBeforeRun, ip, port, databaseName, waitForSync, transactionUpdate); } } } /** * Cleanup any state for this DB. Called once per DB instance; there is one * DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void cleanup() throws DBException { if (INIT_COUNT.decrementAndGet() == 0) { arangoDB.shutdown(); arangoDB = null; logger.info("Local cleaned up."); } } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { BaseDocument toInsert = new BaseDocument(key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { toInsert.addAttribute(entry.getKey(), byteIteratorToString(entry.getValue())); } DocumentCreateOptions options = new DocumentCreateOptions().waitForSync(waitForSync); arangoDB.db(databaseName).collection(table).insertDocument(toInsert, options); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying insert {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Read a record from the database. Each field/value pair from the result * will be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { VPackSlice document = arangoDB.db(databaseName).collection(table).getDocument(key, VPackSlice.class, null); if (!this.fillMap(result, document, fields)) { return Status.ERROR; } return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying read {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { if (!transactionUpdate) { BaseDocument updateDoc = new BaseDocument(); for (Entry<String, ByteIterator> field : values.entrySet()) { updateDoc.addAttribute(field.getKey(), byteIteratorToString(field.getValue())); } arangoDB.db(databaseName).collection(table).updateDocument(key, updateDoc); return Status.OK; } else { // id for documentHandle String transactionAction = "function (id) {" // use internal database functions + "var db = require('internal').db;" // collection.update(document, data, overwrite, keepNull, waitForSync) + String.format("db._update(id, %s, true, false, %s);}", mapToJson(values), Boolean.toString(waitForSync).toLowerCase()); TransactionOptions options = new TransactionOptions(); options.writeCollections(table); options.params(createDocumentHandle(table, key)); arangoDB.db(databaseName).transaction(transactionAction, Void.class, options); return Status.OK; } } catch (ArangoDBException e) { logger.error("Exception while trying update {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status delete(String table, String key) {<FILL_FUNCTION_BODY>} /** * Perform a range scan for a set of records in the database. Each * field/value pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { ArangoCursor<VPackSlice> cursor = null; try { String aqlQuery = String.format( "FOR target IN %s FILTER target._key >= @key SORT target._key ASC LIMIT %d RETURN %s ", table, recordcount, constructReturnForAQL(fields, "target")); Map<String, Object> bindVars = new MapBuilder().put("key", startkey).get(); cursor = arangoDB.db(databaseName).query(aqlQuery, bindVars, null, VPackSlice.class); while (cursor.hasNext()) { VPackSlice aDocument = cursor.next(); HashMap<String, ByteIterator> aMap = new HashMap<String, ByteIterator>(aDocument.size()); if (!this.fillMap(aMap, aDocument)) { return Status.ERROR; } result.add(aMap); } return Status.OK; } catch (Exception e) { logger.error("Exception while trying scan {} {} {} with ex {}", table, startkey, recordcount, e.toString()); } finally { if (cursor != null) { try { cursor.close(); } catch (IOException e) { logger.error("Fail to close cursor", e); } } } return Status.ERROR; } private String createDocumentHandle(String collection, String documentKey) throws ArangoDBException { validateCollectionName(collection); return collection + "/" + documentKey; } private void validateCollectionName(String name) throws ArangoDBException { if (name.indexOf('/') != -1) { throw new ArangoDBException("does not allow '/' in name."); } } private String constructReturnForAQL(Set<String> fields, String targetName) { // Construct the AQL query string. String resultDes = targetName; if (fields != null && fields.size() != 0) { StringBuilder builder = new StringBuilder("{"); for (String field : fields) { builder.append(String.format("\n\"%s\" : %s.%s,", field, targetName, field)); } //Replace last ',' to newline. builder.setCharAt(builder.length() - 1, '\n'); builder.append("}"); resultDes = builder.toString(); } return resultDes; } private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document) { return fillMap(resultMap, document, null); } /** * Fills the map with the properties from the BaseDocument. * * @param resultMap * The map to fill/ * @param document * The record to read from * @param fields * The list of fields to read, or null for all of them * @return isSuccess */ private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document, Set<String> fields) { if (fields == null || fields.size() == 0) { for (Iterator<Entry<String, VPackSlice>> iterator = document.objectIterator(); iterator.hasNext();) { Entry<String, VPackSlice> next = iterator.next(); VPackSlice value = next.getValue(); if (value.isString()) { resultMap.put(next.getKey(), stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } else { for (String field : fields) { VPackSlice value = document.get(field); if (value.isString()) { resultMap.put(field, stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } return true; } private String byteIteratorToString(ByteIterator byteIter) { return new String(byteIter.toArray()); } private ByteIterator stringToByteIterator(String content) { return new StringByteIterator(content); } private String mapToJson(Map<String, ByteIterator> values) { VPackBuilder builder = new VPackBuilder().add(ValueType.OBJECT); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { builder.add(entry.getKey(), byteIteratorToString(entry.getValue())); } builder.close(); return arangoDB.util().deserialize(builder.slice(), String.class); } }
try { arangoDB.db(databaseName).collection(table).deleteDocument(key); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying delete {} {} with ex {}", table, key, e.toString()); } return Status.ERROR;
brianfrankcooper_YCSB
YCSB/arangodb/src/main/java/site/ycsb/db/arangodb/ArangoDBClient.java
ArangoDBClient
scan
class ArangoDBClient extends DB { private static Logger logger = LoggerFactory.getLogger(ArangoDBClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** ArangoDB Driver related, Singleton. */ private ArangoDB arangoDB; private String databaseName = "ycsb"; private String collectionName; private Boolean dropDBBeforeRun; private Boolean waitForSync = false; private Boolean transactionUpdate = false; /** * Initialize any state for this DB. Called once per DB instance; there is * one DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void init() throws DBException { synchronized (ArangoDBClient.class) { Properties props = getProperties(); collectionName = props.getProperty("table", "usertable"); // Set the DB address String ip = props.getProperty("arangodb.ip", "localhost"); String portStr = props.getProperty("arangodb.port", "8529"); int port = Integer.parseInt(portStr); // Set network protocol String protocolStr = props.getProperty("arangodb.protocol", "VST"); Protocol protocol = Protocol.valueOf(protocolStr); // If clear db before run String dropDBBeforeRunStr = props.getProperty("arangodb.dropDBBeforeRun", "false"); dropDBBeforeRun = Boolean.parseBoolean(dropDBBeforeRunStr); // Set the sync mode String waitForSyncStr = props.getProperty("arangodb.waitForSync", "false"); waitForSync = Boolean.parseBoolean(waitForSyncStr); // Set if transaction for update String transactionUpdateStr = props.getProperty("arangodb.transactionUpdate", "false"); transactionUpdate = Boolean.parseBoolean(transactionUpdateStr); // Init ArangoDB connection try { arangoDB = new ArangoDB.Builder().host(ip).port(port).useProtocol(protocol).build(); } catch (Exception e) { logger.error("Failed to initialize ArangoDB", e); System.exit(-1); } if(INIT_COUNT.getAndIncrement() == 0) { // Init the database if (dropDBBeforeRun) { // Try delete first try { arangoDB.db(databaseName).drop(); } catch (ArangoDBException e) { logger.info("Fail to delete DB: {}", databaseName); } } try { arangoDB.createDatabase(databaseName); logger.info("Database created: " + databaseName); } catch (ArangoDBException e) { logger.error("Failed to create database: {} with ex: {}", databaseName, e.toString()); } try { arangoDB.db(databaseName).createCollection(collectionName); logger.info("Collection created: " + collectionName); } catch (ArangoDBException e) { logger.error("Failed to create collection: {} with ex: {}", collectionName, e.toString()); } logger.info("ArangoDB client connection created to {}:{}", ip, port); // Log the configuration logger.info("Arango Configuration: dropDBBeforeRun: {}; address: {}:{}; databaseName: {};" + " waitForSync: {}; transactionUpdate: {};", dropDBBeforeRun, ip, port, databaseName, waitForSync, transactionUpdate); } } } /** * Cleanup any state for this DB. Called once per DB instance; there is one * DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void cleanup() throws DBException { if (INIT_COUNT.decrementAndGet() == 0) { arangoDB.shutdown(); arangoDB = null; logger.info("Local cleaned up."); } } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { BaseDocument toInsert = new BaseDocument(key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { toInsert.addAttribute(entry.getKey(), byteIteratorToString(entry.getValue())); } DocumentCreateOptions options = new DocumentCreateOptions().waitForSync(waitForSync); arangoDB.db(databaseName).collection(table).insertDocument(toInsert, options); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying insert {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Read a record from the database. Each field/value pair from the result * will be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { VPackSlice document = arangoDB.db(databaseName).collection(table).getDocument(key, VPackSlice.class, null); if (!this.fillMap(result, document, fields)) { return Status.ERROR; } return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying read {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { if (!transactionUpdate) { BaseDocument updateDoc = new BaseDocument(); for (Entry<String, ByteIterator> field : values.entrySet()) { updateDoc.addAttribute(field.getKey(), byteIteratorToString(field.getValue())); } arangoDB.db(databaseName).collection(table).updateDocument(key, updateDoc); return Status.OK; } else { // id for documentHandle String transactionAction = "function (id) {" // use internal database functions + "var db = require('internal').db;" // collection.update(document, data, overwrite, keepNull, waitForSync) + String.format("db._update(id, %s, true, false, %s);}", mapToJson(values), Boolean.toString(waitForSync).toLowerCase()); TransactionOptions options = new TransactionOptions(); options.writeCollections(table); options.params(createDocumentHandle(table, key)); arangoDB.db(databaseName).transaction(transactionAction, Void.class, options); return Status.OK; } } catch (ArangoDBException e) { logger.error("Exception while trying update {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status delete(String table, String key) { try { arangoDB.db(databaseName).collection(table).deleteDocument(key); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying delete {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Perform a range scan for a set of records in the database. Each * field/value pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {<FILL_FUNCTION_BODY>} private String createDocumentHandle(String collection, String documentKey) throws ArangoDBException { validateCollectionName(collection); return collection + "/" + documentKey; } private void validateCollectionName(String name) throws ArangoDBException { if (name.indexOf('/') != -1) { throw new ArangoDBException("does not allow '/' in name."); } } private String constructReturnForAQL(Set<String> fields, String targetName) { // Construct the AQL query string. String resultDes = targetName; if (fields != null && fields.size() != 0) { StringBuilder builder = new StringBuilder("{"); for (String field : fields) { builder.append(String.format("\n\"%s\" : %s.%s,", field, targetName, field)); } //Replace last ',' to newline. builder.setCharAt(builder.length() - 1, '\n'); builder.append("}"); resultDes = builder.toString(); } return resultDes; } private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document) { return fillMap(resultMap, document, null); } /** * Fills the map with the properties from the BaseDocument. * * @param resultMap * The map to fill/ * @param document * The record to read from * @param fields * The list of fields to read, or null for all of them * @return isSuccess */ private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document, Set<String> fields) { if (fields == null || fields.size() == 0) { for (Iterator<Entry<String, VPackSlice>> iterator = document.objectIterator(); iterator.hasNext();) { Entry<String, VPackSlice> next = iterator.next(); VPackSlice value = next.getValue(); if (value.isString()) { resultMap.put(next.getKey(), stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } else { for (String field : fields) { VPackSlice value = document.get(field); if (value.isString()) { resultMap.put(field, stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } return true; } private String byteIteratorToString(ByteIterator byteIter) { return new String(byteIter.toArray()); } private ByteIterator stringToByteIterator(String content) { return new StringByteIterator(content); } private String mapToJson(Map<String, ByteIterator> values) { VPackBuilder builder = new VPackBuilder().add(ValueType.OBJECT); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { builder.add(entry.getKey(), byteIteratorToString(entry.getValue())); } builder.close(); return arangoDB.util().deserialize(builder.slice(), String.class); } }
ArangoCursor<VPackSlice> cursor = null; try { String aqlQuery = String.format( "FOR target IN %s FILTER target._key >= @key SORT target._key ASC LIMIT %d RETURN %s ", table, recordcount, constructReturnForAQL(fields, "target")); Map<String, Object> bindVars = new MapBuilder().put("key", startkey).get(); cursor = arangoDB.db(databaseName).query(aqlQuery, bindVars, null, VPackSlice.class); while (cursor.hasNext()) { VPackSlice aDocument = cursor.next(); HashMap<String, ByteIterator> aMap = new HashMap<String, ByteIterator>(aDocument.size()); if (!this.fillMap(aMap, aDocument)) { return Status.ERROR; } result.add(aMap); } return Status.OK; } catch (Exception e) { logger.error("Exception while trying scan {} {} {} with ex {}", table, startkey, recordcount, e.toString()); } finally { if (cursor != null) { try { cursor.close(); } catch (IOException e) { logger.error("Fail to close cursor", e); } } } return Status.ERROR;
brianfrankcooper_YCSB
YCSB/arangodb/src/main/java/site/ycsb/db/arangodb/ArangoDBClient.java
ArangoDBClient
createDocumentHandle
class ArangoDBClient extends DB { private static Logger logger = LoggerFactory.getLogger(ArangoDBClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** ArangoDB Driver related, Singleton. */ private ArangoDB arangoDB; private String databaseName = "ycsb"; private String collectionName; private Boolean dropDBBeforeRun; private Boolean waitForSync = false; private Boolean transactionUpdate = false; /** * Initialize any state for this DB. Called once per DB instance; there is * one DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void init() throws DBException { synchronized (ArangoDBClient.class) { Properties props = getProperties(); collectionName = props.getProperty("table", "usertable"); // Set the DB address String ip = props.getProperty("arangodb.ip", "localhost"); String portStr = props.getProperty("arangodb.port", "8529"); int port = Integer.parseInt(portStr); // Set network protocol String protocolStr = props.getProperty("arangodb.protocol", "VST"); Protocol protocol = Protocol.valueOf(protocolStr); // If clear db before run String dropDBBeforeRunStr = props.getProperty("arangodb.dropDBBeforeRun", "false"); dropDBBeforeRun = Boolean.parseBoolean(dropDBBeforeRunStr); // Set the sync mode String waitForSyncStr = props.getProperty("arangodb.waitForSync", "false"); waitForSync = Boolean.parseBoolean(waitForSyncStr); // Set if transaction for update String transactionUpdateStr = props.getProperty("arangodb.transactionUpdate", "false"); transactionUpdate = Boolean.parseBoolean(transactionUpdateStr); // Init ArangoDB connection try { arangoDB = new ArangoDB.Builder().host(ip).port(port).useProtocol(protocol).build(); } catch (Exception e) { logger.error("Failed to initialize ArangoDB", e); System.exit(-1); } if(INIT_COUNT.getAndIncrement() == 0) { // Init the database if (dropDBBeforeRun) { // Try delete first try { arangoDB.db(databaseName).drop(); } catch (ArangoDBException e) { logger.info("Fail to delete DB: {}", databaseName); } } try { arangoDB.createDatabase(databaseName); logger.info("Database created: " + databaseName); } catch (ArangoDBException e) { logger.error("Failed to create database: {} with ex: {}", databaseName, e.toString()); } try { arangoDB.db(databaseName).createCollection(collectionName); logger.info("Collection created: " + collectionName); } catch (ArangoDBException e) { logger.error("Failed to create collection: {} with ex: {}", collectionName, e.toString()); } logger.info("ArangoDB client connection created to {}:{}", ip, port); // Log the configuration logger.info("Arango Configuration: dropDBBeforeRun: {}; address: {}:{}; databaseName: {};" + " waitForSync: {}; transactionUpdate: {};", dropDBBeforeRun, ip, port, databaseName, waitForSync, transactionUpdate); } } } /** * Cleanup any state for this DB. Called once per DB instance; there is one * DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void cleanup() throws DBException { if (INIT_COUNT.decrementAndGet() == 0) { arangoDB.shutdown(); arangoDB = null; logger.info("Local cleaned up."); } } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { BaseDocument toInsert = new BaseDocument(key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { toInsert.addAttribute(entry.getKey(), byteIteratorToString(entry.getValue())); } DocumentCreateOptions options = new DocumentCreateOptions().waitForSync(waitForSync); arangoDB.db(databaseName).collection(table).insertDocument(toInsert, options); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying insert {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Read a record from the database. Each field/value pair from the result * will be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { VPackSlice document = arangoDB.db(databaseName).collection(table).getDocument(key, VPackSlice.class, null); if (!this.fillMap(result, document, fields)) { return Status.ERROR; } return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying read {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { if (!transactionUpdate) { BaseDocument updateDoc = new BaseDocument(); for (Entry<String, ByteIterator> field : values.entrySet()) { updateDoc.addAttribute(field.getKey(), byteIteratorToString(field.getValue())); } arangoDB.db(databaseName).collection(table).updateDocument(key, updateDoc); return Status.OK; } else { // id for documentHandle String transactionAction = "function (id) {" // use internal database functions + "var db = require('internal').db;" // collection.update(document, data, overwrite, keepNull, waitForSync) + String.format("db._update(id, %s, true, false, %s);}", mapToJson(values), Boolean.toString(waitForSync).toLowerCase()); TransactionOptions options = new TransactionOptions(); options.writeCollections(table); options.params(createDocumentHandle(table, key)); arangoDB.db(databaseName).transaction(transactionAction, Void.class, options); return Status.OK; } } catch (ArangoDBException e) { logger.error("Exception while trying update {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status delete(String table, String key) { try { arangoDB.db(databaseName).collection(table).deleteDocument(key); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying delete {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Perform a range scan for a set of records in the database. Each * field/value pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { ArangoCursor<VPackSlice> cursor = null; try { String aqlQuery = String.format( "FOR target IN %s FILTER target._key >= @key SORT target._key ASC LIMIT %d RETURN %s ", table, recordcount, constructReturnForAQL(fields, "target")); Map<String, Object> bindVars = new MapBuilder().put("key", startkey).get(); cursor = arangoDB.db(databaseName).query(aqlQuery, bindVars, null, VPackSlice.class); while (cursor.hasNext()) { VPackSlice aDocument = cursor.next(); HashMap<String, ByteIterator> aMap = new HashMap<String, ByteIterator>(aDocument.size()); if (!this.fillMap(aMap, aDocument)) { return Status.ERROR; } result.add(aMap); } return Status.OK; } catch (Exception e) { logger.error("Exception while trying scan {} {} {} with ex {}", table, startkey, recordcount, e.toString()); } finally { if (cursor != null) { try { cursor.close(); } catch (IOException e) { logger.error("Fail to close cursor", e); } } } return Status.ERROR; } private String createDocumentHandle(String collection, String documentKey) throws ArangoDBException {<FILL_FUNCTION_BODY>} private void validateCollectionName(String name) throws ArangoDBException { if (name.indexOf('/') != -1) { throw new ArangoDBException("does not allow '/' in name."); } } private String constructReturnForAQL(Set<String> fields, String targetName) { // Construct the AQL query string. String resultDes = targetName; if (fields != null && fields.size() != 0) { StringBuilder builder = new StringBuilder("{"); for (String field : fields) { builder.append(String.format("\n\"%s\" : %s.%s,", field, targetName, field)); } //Replace last ',' to newline. builder.setCharAt(builder.length() - 1, '\n'); builder.append("}"); resultDes = builder.toString(); } return resultDes; } private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document) { return fillMap(resultMap, document, null); } /** * Fills the map with the properties from the BaseDocument. * * @param resultMap * The map to fill/ * @param document * The record to read from * @param fields * The list of fields to read, or null for all of them * @return isSuccess */ private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document, Set<String> fields) { if (fields == null || fields.size() == 0) { for (Iterator<Entry<String, VPackSlice>> iterator = document.objectIterator(); iterator.hasNext();) { Entry<String, VPackSlice> next = iterator.next(); VPackSlice value = next.getValue(); if (value.isString()) { resultMap.put(next.getKey(), stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } else { for (String field : fields) { VPackSlice value = document.get(field); if (value.isString()) { resultMap.put(field, stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } return true; } private String byteIteratorToString(ByteIterator byteIter) { return new String(byteIter.toArray()); } private ByteIterator stringToByteIterator(String content) { return new StringByteIterator(content); } private String mapToJson(Map<String, ByteIterator> values) { VPackBuilder builder = new VPackBuilder().add(ValueType.OBJECT); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { builder.add(entry.getKey(), byteIteratorToString(entry.getValue())); } builder.close(); return arangoDB.util().deserialize(builder.slice(), String.class); } }
validateCollectionName(collection); return collection + "/" + documentKey;
brianfrankcooper_YCSB
YCSB/arangodb/src/main/java/site/ycsb/db/arangodb/ArangoDBClient.java
ArangoDBClient
validateCollectionName
class ArangoDBClient extends DB { private static Logger logger = LoggerFactory.getLogger(ArangoDBClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** ArangoDB Driver related, Singleton. */ private ArangoDB arangoDB; private String databaseName = "ycsb"; private String collectionName; private Boolean dropDBBeforeRun; private Boolean waitForSync = false; private Boolean transactionUpdate = false; /** * Initialize any state for this DB. Called once per DB instance; there is * one DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void init() throws DBException { synchronized (ArangoDBClient.class) { Properties props = getProperties(); collectionName = props.getProperty("table", "usertable"); // Set the DB address String ip = props.getProperty("arangodb.ip", "localhost"); String portStr = props.getProperty("arangodb.port", "8529"); int port = Integer.parseInt(portStr); // Set network protocol String protocolStr = props.getProperty("arangodb.protocol", "VST"); Protocol protocol = Protocol.valueOf(protocolStr); // If clear db before run String dropDBBeforeRunStr = props.getProperty("arangodb.dropDBBeforeRun", "false"); dropDBBeforeRun = Boolean.parseBoolean(dropDBBeforeRunStr); // Set the sync mode String waitForSyncStr = props.getProperty("arangodb.waitForSync", "false"); waitForSync = Boolean.parseBoolean(waitForSyncStr); // Set if transaction for update String transactionUpdateStr = props.getProperty("arangodb.transactionUpdate", "false"); transactionUpdate = Boolean.parseBoolean(transactionUpdateStr); // Init ArangoDB connection try { arangoDB = new ArangoDB.Builder().host(ip).port(port).useProtocol(protocol).build(); } catch (Exception e) { logger.error("Failed to initialize ArangoDB", e); System.exit(-1); } if(INIT_COUNT.getAndIncrement() == 0) { // Init the database if (dropDBBeforeRun) { // Try delete first try { arangoDB.db(databaseName).drop(); } catch (ArangoDBException e) { logger.info("Fail to delete DB: {}", databaseName); } } try { arangoDB.createDatabase(databaseName); logger.info("Database created: " + databaseName); } catch (ArangoDBException e) { logger.error("Failed to create database: {} with ex: {}", databaseName, e.toString()); } try { arangoDB.db(databaseName).createCollection(collectionName); logger.info("Collection created: " + collectionName); } catch (ArangoDBException e) { logger.error("Failed to create collection: {} with ex: {}", collectionName, e.toString()); } logger.info("ArangoDB client connection created to {}:{}", ip, port); // Log the configuration logger.info("Arango Configuration: dropDBBeforeRun: {}; address: {}:{}; databaseName: {};" + " waitForSync: {}; transactionUpdate: {};", dropDBBeforeRun, ip, port, databaseName, waitForSync, transactionUpdate); } } } /** * Cleanup any state for this DB. Called once per DB instance; there is one * DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void cleanup() throws DBException { if (INIT_COUNT.decrementAndGet() == 0) { arangoDB.shutdown(); arangoDB = null; logger.info("Local cleaned up."); } } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { BaseDocument toInsert = new BaseDocument(key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { toInsert.addAttribute(entry.getKey(), byteIteratorToString(entry.getValue())); } DocumentCreateOptions options = new DocumentCreateOptions().waitForSync(waitForSync); arangoDB.db(databaseName).collection(table).insertDocument(toInsert, options); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying insert {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Read a record from the database. Each field/value pair from the result * will be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { VPackSlice document = arangoDB.db(databaseName).collection(table).getDocument(key, VPackSlice.class, null); if (!this.fillMap(result, document, fields)) { return Status.ERROR; } return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying read {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { if (!transactionUpdate) { BaseDocument updateDoc = new BaseDocument(); for (Entry<String, ByteIterator> field : values.entrySet()) { updateDoc.addAttribute(field.getKey(), byteIteratorToString(field.getValue())); } arangoDB.db(databaseName).collection(table).updateDocument(key, updateDoc); return Status.OK; } else { // id for documentHandle String transactionAction = "function (id) {" // use internal database functions + "var db = require('internal').db;" // collection.update(document, data, overwrite, keepNull, waitForSync) + String.format("db._update(id, %s, true, false, %s);}", mapToJson(values), Boolean.toString(waitForSync).toLowerCase()); TransactionOptions options = new TransactionOptions(); options.writeCollections(table); options.params(createDocumentHandle(table, key)); arangoDB.db(databaseName).transaction(transactionAction, Void.class, options); return Status.OK; } } catch (ArangoDBException e) { logger.error("Exception while trying update {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status delete(String table, String key) { try { arangoDB.db(databaseName).collection(table).deleteDocument(key); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying delete {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Perform a range scan for a set of records in the database. Each * field/value pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { ArangoCursor<VPackSlice> cursor = null; try { String aqlQuery = String.format( "FOR target IN %s FILTER target._key >= @key SORT target._key ASC LIMIT %d RETURN %s ", table, recordcount, constructReturnForAQL(fields, "target")); Map<String, Object> bindVars = new MapBuilder().put("key", startkey).get(); cursor = arangoDB.db(databaseName).query(aqlQuery, bindVars, null, VPackSlice.class); while (cursor.hasNext()) { VPackSlice aDocument = cursor.next(); HashMap<String, ByteIterator> aMap = new HashMap<String, ByteIterator>(aDocument.size()); if (!this.fillMap(aMap, aDocument)) { return Status.ERROR; } result.add(aMap); } return Status.OK; } catch (Exception e) { logger.error("Exception while trying scan {} {} {} with ex {}", table, startkey, recordcount, e.toString()); } finally { if (cursor != null) { try { cursor.close(); } catch (IOException e) { logger.error("Fail to close cursor", e); } } } return Status.ERROR; } private String createDocumentHandle(String collection, String documentKey) throws ArangoDBException { validateCollectionName(collection); return collection + "/" + documentKey; } private void validateCollectionName(String name) throws ArangoDBException {<FILL_FUNCTION_BODY>} private String constructReturnForAQL(Set<String> fields, String targetName) { // Construct the AQL query string. String resultDes = targetName; if (fields != null && fields.size() != 0) { StringBuilder builder = new StringBuilder("{"); for (String field : fields) { builder.append(String.format("\n\"%s\" : %s.%s,", field, targetName, field)); } //Replace last ',' to newline. builder.setCharAt(builder.length() - 1, '\n'); builder.append("}"); resultDes = builder.toString(); } return resultDes; } private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document) { return fillMap(resultMap, document, null); } /** * Fills the map with the properties from the BaseDocument. * * @param resultMap * The map to fill/ * @param document * The record to read from * @param fields * The list of fields to read, or null for all of them * @return isSuccess */ private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document, Set<String> fields) { if (fields == null || fields.size() == 0) { for (Iterator<Entry<String, VPackSlice>> iterator = document.objectIterator(); iterator.hasNext();) { Entry<String, VPackSlice> next = iterator.next(); VPackSlice value = next.getValue(); if (value.isString()) { resultMap.put(next.getKey(), stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } else { for (String field : fields) { VPackSlice value = document.get(field); if (value.isString()) { resultMap.put(field, stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } return true; } private String byteIteratorToString(ByteIterator byteIter) { return new String(byteIter.toArray()); } private ByteIterator stringToByteIterator(String content) { return new StringByteIterator(content); } private String mapToJson(Map<String, ByteIterator> values) { VPackBuilder builder = new VPackBuilder().add(ValueType.OBJECT); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { builder.add(entry.getKey(), byteIteratorToString(entry.getValue())); } builder.close(); return arangoDB.util().deserialize(builder.slice(), String.class); } }
if (name.indexOf('/') != -1) { throw new ArangoDBException("does not allow '/' in name."); }
brianfrankcooper_YCSB
YCSB/arangodb/src/main/java/site/ycsb/db/arangodb/ArangoDBClient.java
ArangoDBClient
constructReturnForAQL
class ArangoDBClient extends DB { private static Logger logger = LoggerFactory.getLogger(ArangoDBClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** ArangoDB Driver related, Singleton. */ private ArangoDB arangoDB; private String databaseName = "ycsb"; private String collectionName; private Boolean dropDBBeforeRun; private Boolean waitForSync = false; private Boolean transactionUpdate = false; /** * Initialize any state for this DB. Called once per DB instance; there is * one DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void init() throws DBException { synchronized (ArangoDBClient.class) { Properties props = getProperties(); collectionName = props.getProperty("table", "usertable"); // Set the DB address String ip = props.getProperty("arangodb.ip", "localhost"); String portStr = props.getProperty("arangodb.port", "8529"); int port = Integer.parseInt(portStr); // Set network protocol String protocolStr = props.getProperty("arangodb.protocol", "VST"); Protocol protocol = Protocol.valueOf(protocolStr); // If clear db before run String dropDBBeforeRunStr = props.getProperty("arangodb.dropDBBeforeRun", "false"); dropDBBeforeRun = Boolean.parseBoolean(dropDBBeforeRunStr); // Set the sync mode String waitForSyncStr = props.getProperty("arangodb.waitForSync", "false"); waitForSync = Boolean.parseBoolean(waitForSyncStr); // Set if transaction for update String transactionUpdateStr = props.getProperty("arangodb.transactionUpdate", "false"); transactionUpdate = Boolean.parseBoolean(transactionUpdateStr); // Init ArangoDB connection try { arangoDB = new ArangoDB.Builder().host(ip).port(port).useProtocol(protocol).build(); } catch (Exception e) { logger.error("Failed to initialize ArangoDB", e); System.exit(-1); } if(INIT_COUNT.getAndIncrement() == 0) { // Init the database if (dropDBBeforeRun) { // Try delete first try { arangoDB.db(databaseName).drop(); } catch (ArangoDBException e) { logger.info("Fail to delete DB: {}", databaseName); } } try { arangoDB.createDatabase(databaseName); logger.info("Database created: " + databaseName); } catch (ArangoDBException e) { logger.error("Failed to create database: {} with ex: {}", databaseName, e.toString()); } try { arangoDB.db(databaseName).createCollection(collectionName); logger.info("Collection created: " + collectionName); } catch (ArangoDBException e) { logger.error("Failed to create collection: {} with ex: {}", collectionName, e.toString()); } logger.info("ArangoDB client connection created to {}:{}", ip, port); // Log the configuration logger.info("Arango Configuration: dropDBBeforeRun: {}; address: {}:{}; databaseName: {};" + " waitForSync: {}; transactionUpdate: {};", dropDBBeforeRun, ip, port, databaseName, waitForSync, transactionUpdate); } } } /** * Cleanup any state for this DB. Called once per DB instance; there is one * DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void cleanup() throws DBException { if (INIT_COUNT.decrementAndGet() == 0) { arangoDB.shutdown(); arangoDB = null; logger.info("Local cleaned up."); } } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { BaseDocument toInsert = new BaseDocument(key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { toInsert.addAttribute(entry.getKey(), byteIteratorToString(entry.getValue())); } DocumentCreateOptions options = new DocumentCreateOptions().waitForSync(waitForSync); arangoDB.db(databaseName).collection(table).insertDocument(toInsert, options); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying insert {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Read a record from the database. Each field/value pair from the result * will be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { VPackSlice document = arangoDB.db(databaseName).collection(table).getDocument(key, VPackSlice.class, null); if (!this.fillMap(result, document, fields)) { return Status.ERROR; } return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying read {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { if (!transactionUpdate) { BaseDocument updateDoc = new BaseDocument(); for (Entry<String, ByteIterator> field : values.entrySet()) { updateDoc.addAttribute(field.getKey(), byteIteratorToString(field.getValue())); } arangoDB.db(databaseName).collection(table).updateDocument(key, updateDoc); return Status.OK; } else { // id for documentHandle String transactionAction = "function (id) {" // use internal database functions + "var db = require('internal').db;" // collection.update(document, data, overwrite, keepNull, waitForSync) + String.format("db._update(id, %s, true, false, %s);}", mapToJson(values), Boolean.toString(waitForSync).toLowerCase()); TransactionOptions options = new TransactionOptions(); options.writeCollections(table); options.params(createDocumentHandle(table, key)); arangoDB.db(databaseName).transaction(transactionAction, Void.class, options); return Status.OK; } } catch (ArangoDBException e) { logger.error("Exception while trying update {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status delete(String table, String key) { try { arangoDB.db(databaseName).collection(table).deleteDocument(key); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying delete {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Perform a range scan for a set of records in the database. Each * field/value pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { ArangoCursor<VPackSlice> cursor = null; try { String aqlQuery = String.format( "FOR target IN %s FILTER target._key >= @key SORT target._key ASC LIMIT %d RETURN %s ", table, recordcount, constructReturnForAQL(fields, "target")); Map<String, Object> bindVars = new MapBuilder().put("key", startkey).get(); cursor = arangoDB.db(databaseName).query(aqlQuery, bindVars, null, VPackSlice.class); while (cursor.hasNext()) { VPackSlice aDocument = cursor.next(); HashMap<String, ByteIterator> aMap = new HashMap<String, ByteIterator>(aDocument.size()); if (!this.fillMap(aMap, aDocument)) { return Status.ERROR; } result.add(aMap); } return Status.OK; } catch (Exception e) { logger.error("Exception while trying scan {} {} {} with ex {}", table, startkey, recordcount, e.toString()); } finally { if (cursor != null) { try { cursor.close(); } catch (IOException e) { logger.error("Fail to close cursor", e); } } } return Status.ERROR; } private String createDocumentHandle(String collection, String documentKey) throws ArangoDBException { validateCollectionName(collection); return collection + "/" + documentKey; } private void validateCollectionName(String name) throws ArangoDBException { if (name.indexOf('/') != -1) { throw new ArangoDBException("does not allow '/' in name."); } } private String constructReturnForAQL(Set<String> fields, String targetName) {<FILL_FUNCTION_BODY>} private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document) { return fillMap(resultMap, document, null); } /** * Fills the map with the properties from the BaseDocument. * * @param resultMap * The map to fill/ * @param document * The record to read from * @param fields * The list of fields to read, or null for all of them * @return isSuccess */ private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document, Set<String> fields) { if (fields == null || fields.size() == 0) { for (Iterator<Entry<String, VPackSlice>> iterator = document.objectIterator(); iterator.hasNext();) { Entry<String, VPackSlice> next = iterator.next(); VPackSlice value = next.getValue(); if (value.isString()) { resultMap.put(next.getKey(), stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } else { for (String field : fields) { VPackSlice value = document.get(field); if (value.isString()) { resultMap.put(field, stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } return true; } private String byteIteratorToString(ByteIterator byteIter) { return new String(byteIter.toArray()); } private ByteIterator stringToByteIterator(String content) { return new StringByteIterator(content); } private String mapToJson(Map<String, ByteIterator> values) { VPackBuilder builder = new VPackBuilder().add(ValueType.OBJECT); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { builder.add(entry.getKey(), byteIteratorToString(entry.getValue())); } builder.close(); return arangoDB.util().deserialize(builder.slice(), String.class); } }
// Construct the AQL query string. String resultDes = targetName; if (fields != null && fields.size() != 0) { StringBuilder builder = new StringBuilder("{"); for (String field : fields) { builder.append(String.format("\n\"%s\" : %s.%s,", field, targetName, field)); } //Replace last ',' to newline. builder.setCharAt(builder.length() - 1, '\n'); builder.append("}"); resultDes = builder.toString(); } return resultDes;
brianfrankcooper_YCSB
YCSB/arangodb/src/main/java/site/ycsb/db/arangodb/ArangoDBClient.java
ArangoDBClient
fillMap
class ArangoDBClient extends DB { private static Logger logger = LoggerFactory.getLogger(ArangoDBClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** ArangoDB Driver related, Singleton. */ private ArangoDB arangoDB; private String databaseName = "ycsb"; private String collectionName; private Boolean dropDBBeforeRun; private Boolean waitForSync = false; private Boolean transactionUpdate = false; /** * Initialize any state for this DB. Called once per DB instance; there is * one DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void init() throws DBException { synchronized (ArangoDBClient.class) { Properties props = getProperties(); collectionName = props.getProperty("table", "usertable"); // Set the DB address String ip = props.getProperty("arangodb.ip", "localhost"); String portStr = props.getProperty("arangodb.port", "8529"); int port = Integer.parseInt(portStr); // Set network protocol String protocolStr = props.getProperty("arangodb.protocol", "VST"); Protocol protocol = Protocol.valueOf(protocolStr); // If clear db before run String dropDBBeforeRunStr = props.getProperty("arangodb.dropDBBeforeRun", "false"); dropDBBeforeRun = Boolean.parseBoolean(dropDBBeforeRunStr); // Set the sync mode String waitForSyncStr = props.getProperty("arangodb.waitForSync", "false"); waitForSync = Boolean.parseBoolean(waitForSyncStr); // Set if transaction for update String transactionUpdateStr = props.getProperty("arangodb.transactionUpdate", "false"); transactionUpdate = Boolean.parseBoolean(transactionUpdateStr); // Init ArangoDB connection try { arangoDB = new ArangoDB.Builder().host(ip).port(port).useProtocol(protocol).build(); } catch (Exception e) { logger.error("Failed to initialize ArangoDB", e); System.exit(-1); } if(INIT_COUNT.getAndIncrement() == 0) { // Init the database if (dropDBBeforeRun) { // Try delete first try { arangoDB.db(databaseName).drop(); } catch (ArangoDBException e) { logger.info("Fail to delete DB: {}", databaseName); } } try { arangoDB.createDatabase(databaseName); logger.info("Database created: " + databaseName); } catch (ArangoDBException e) { logger.error("Failed to create database: {} with ex: {}", databaseName, e.toString()); } try { arangoDB.db(databaseName).createCollection(collectionName); logger.info("Collection created: " + collectionName); } catch (ArangoDBException e) { logger.error("Failed to create collection: {} with ex: {}", collectionName, e.toString()); } logger.info("ArangoDB client connection created to {}:{}", ip, port); // Log the configuration logger.info("Arango Configuration: dropDBBeforeRun: {}; address: {}:{}; databaseName: {};" + " waitForSync: {}; transactionUpdate: {};", dropDBBeforeRun, ip, port, databaseName, waitForSync, transactionUpdate); } } } /** * Cleanup any state for this DB. Called once per DB instance; there is one * DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void cleanup() throws DBException { if (INIT_COUNT.decrementAndGet() == 0) { arangoDB.shutdown(); arangoDB = null; logger.info("Local cleaned up."); } } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { BaseDocument toInsert = new BaseDocument(key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { toInsert.addAttribute(entry.getKey(), byteIteratorToString(entry.getValue())); } DocumentCreateOptions options = new DocumentCreateOptions().waitForSync(waitForSync); arangoDB.db(databaseName).collection(table).insertDocument(toInsert, options); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying insert {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Read a record from the database. Each field/value pair from the result * will be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { VPackSlice document = arangoDB.db(databaseName).collection(table).getDocument(key, VPackSlice.class, null); if (!this.fillMap(result, document, fields)) { return Status.ERROR; } return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying read {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { if (!transactionUpdate) { BaseDocument updateDoc = new BaseDocument(); for (Entry<String, ByteIterator> field : values.entrySet()) { updateDoc.addAttribute(field.getKey(), byteIteratorToString(field.getValue())); } arangoDB.db(databaseName).collection(table).updateDocument(key, updateDoc); return Status.OK; } else { // id for documentHandle String transactionAction = "function (id) {" // use internal database functions + "var db = require('internal').db;" // collection.update(document, data, overwrite, keepNull, waitForSync) + String.format("db._update(id, %s, true, false, %s);}", mapToJson(values), Boolean.toString(waitForSync).toLowerCase()); TransactionOptions options = new TransactionOptions(); options.writeCollections(table); options.params(createDocumentHandle(table, key)); arangoDB.db(databaseName).transaction(transactionAction, Void.class, options); return Status.OK; } } catch (ArangoDBException e) { logger.error("Exception while trying update {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status delete(String table, String key) { try { arangoDB.db(databaseName).collection(table).deleteDocument(key); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying delete {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Perform a range scan for a set of records in the database. Each * field/value pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { ArangoCursor<VPackSlice> cursor = null; try { String aqlQuery = String.format( "FOR target IN %s FILTER target._key >= @key SORT target._key ASC LIMIT %d RETURN %s ", table, recordcount, constructReturnForAQL(fields, "target")); Map<String, Object> bindVars = new MapBuilder().put("key", startkey).get(); cursor = arangoDB.db(databaseName).query(aqlQuery, bindVars, null, VPackSlice.class); while (cursor.hasNext()) { VPackSlice aDocument = cursor.next(); HashMap<String, ByteIterator> aMap = new HashMap<String, ByteIterator>(aDocument.size()); if (!this.fillMap(aMap, aDocument)) { return Status.ERROR; } result.add(aMap); } return Status.OK; } catch (Exception e) { logger.error("Exception while trying scan {} {} {} with ex {}", table, startkey, recordcount, e.toString()); } finally { if (cursor != null) { try { cursor.close(); } catch (IOException e) { logger.error("Fail to close cursor", e); } } } return Status.ERROR; } private String createDocumentHandle(String collection, String documentKey) throws ArangoDBException { validateCollectionName(collection); return collection + "/" + documentKey; } private void validateCollectionName(String name) throws ArangoDBException { if (name.indexOf('/') != -1) { throw new ArangoDBException("does not allow '/' in name."); } } private String constructReturnForAQL(Set<String> fields, String targetName) { // Construct the AQL query string. String resultDes = targetName; if (fields != null && fields.size() != 0) { StringBuilder builder = new StringBuilder("{"); for (String field : fields) { builder.append(String.format("\n\"%s\" : %s.%s,", field, targetName, field)); } //Replace last ',' to newline. builder.setCharAt(builder.length() - 1, '\n'); builder.append("}"); resultDes = builder.toString(); } return resultDes; } private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document) {<FILL_FUNCTION_BODY>} /** * Fills the map with the properties from the BaseDocument. * * @param resultMap * The map to fill/ * @param document * The record to read from * @param fields * The list of fields to read, or null for all of them * @return isSuccess */ private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document, Set<String> fields) { if (fields == null || fields.size() == 0) { for (Iterator<Entry<String, VPackSlice>> iterator = document.objectIterator(); iterator.hasNext();) { Entry<String, VPackSlice> next = iterator.next(); VPackSlice value = next.getValue(); if (value.isString()) { resultMap.put(next.getKey(), stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } else { for (String field : fields) { VPackSlice value = document.get(field); if (value.isString()) { resultMap.put(field, stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } return true; } private String byteIteratorToString(ByteIterator byteIter) { return new String(byteIter.toArray()); } private ByteIterator stringToByteIterator(String content) { return new StringByteIterator(content); } private String mapToJson(Map<String, ByteIterator> values) { VPackBuilder builder = new VPackBuilder().add(ValueType.OBJECT); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { builder.add(entry.getKey(), byteIteratorToString(entry.getValue())); } builder.close(); return arangoDB.util().deserialize(builder.slice(), String.class); } }
return fillMap(resultMap, document, null);
brianfrankcooper_YCSB
YCSB/arangodb/src/main/java/site/ycsb/db/arangodb/ArangoDBClient.java
ArangoDBClient
fillMap
class ArangoDBClient extends DB { private static Logger logger = LoggerFactory.getLogger(ArangoDBClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** ArangoDB Driver related, Singleton. */ private ArangoDB arangoDB; private String databaseName = "ycsb"; private String collectionName; private Boolean dropDBBeforeRun; private Boolean waitForSync = false; private Boolean transactionUpdate = false; /** * Initialize any state for this DB. Called once per DB instance; there is * one DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void init() throws DBException { synchronized (ArangoDBClient.class) { Properties props = getProperties(); collectionName = props.getProperty("table", "usertable"); // Set the DB address String ip = props.getProperty("arangodb.ip", "localhost"); String portStr = props.getProperty("arangodb.port", "8529"); int port = Integer.parseInt(portStr); // Set network protocol String protocolStr = props.getProperty("arangodb.protocol", "VST"); Protocol protocol = Protocol.valueOf(protocolStr); // If clear db before run String dropDBBeforeRunStr = props.getProperty("arangodb.dropDBBeforeRun", "false"); dropDBBeforeRun = Boolean.parseBoolean(dropDBBeforeRunStr); // Set the sync mode String waitForSyncStr = props.getProperty("arangodb.waitForSync", "false"); waitForSync = Boolean.parseBoolean(waitForSyncStr); // Set if transaction for update String transactionUpdateStr = props.getProperty("arangodb.transactionUpdate", "false"); transactionUpdate = Boolean.parseBoolean(transactionUpdateStr); // Init ArangoDB connection try { arangoDB = new ArangoDB.Builder().host(ip).port(port).useProtocol(protocol).build(); } catch (Exception e) { logger.error("Failed to initialize ArangoDB", e); System.exit(-1); } if(INIT_COUNT.getAndIncrement() == 0) { // Init the database if (dropDBBeforeRun) { // Try delete first try { arangoDB.db(databaseName).drop(); } catch (ArangoDBException e) { logger.info("Fail to delete DB: {}", databaseName); } } try { arangoDB.createDatabase(databaseName); logger.info("Database created: " + databaseName); } catch (ArangoDBException e) { logger.error("Failed to create database: {} with ex: {}", databaseName, e.toString()); } try { arangoDB.db(databaseName).createCollection(collectionName); logger.info("Collection created: " + collectionName); } catch (ArangoDBException e) { logger.error("Failed to create collection: {} with ex: {}", collectionName, e.toString()); } logger.info("ArangoDB client connection created to {}:{}", ip, port); // Log the configuration logger.info("Arango Configuration: dropDBBeforeRun: {}; address: {}:{}; databaseName: {};" + " waitForSync: {}; transactionUpdate: {};", dropDBBeforeRun, ip, port, databaseName, waitForSync, transactionUpdate); } } } /** * Cleanup any state for this DB. Called once per DB instance; there is one * DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void cleanup() throws DBException { if (INIT_COUNT.decrementAndGet() == 0) { arangoDB.shutdown(); arangoDB = null; logger.info("Local cleaned up."); } } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { BaseDocument toInsert = new BaseDocument(key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { toInsert.addAttribute(entry.getKey(), byteIteratorToString(entry.getValue())); } DocumentCreateOptions options = new DocumentCreateOptions().waitForSync(waitForSync); arangoDB.db(databaseName).collection(table).insertDocument(toInsert, options); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying insert {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Read a record from the database. Each field/value pair from the result * will be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { VPackSlice document = arangoDB.db(databaseName).collection(table).getDocument(key, VPackSlice.class, null); if (!this.fillMap(result, document, fields)) { return Status.ERROR; } return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying read {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { if (!transactionUpdate) { BaseDocument updateDoc = new BaseDocument(); for (Entry<String, ByteIterator> field : values.entrySet()) { updateDoc.addAttribute(field.getKey(), byteIteratorToString(field.getValue())); } arangoDB.db(databaseName).collection(table).updateDocument(key, updateDoc); return Status.OK; } else { // id for documentHandle String transactionAction = "function (id) {" // use internal database functions + "var db = require('internal').db;" // collection.update(document, data, overwrite, keepNull, waitForSync) + String.format("db._update(id, %s, true, false, %s);}", mapToJson(values), Boolean.toString(waitForSync).toLowerCase()); TransactionOptions options = new TransactionOptions(); options.writeCollections(table); options.params(createDocumentHandle(table, key)); arangoDB.db(databaseName).transaction(transactionAction, Void.class, options); return Status.OK; } } catch (ArangoDBException e) { logger.error("Exception while trying update {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status delete(String table, String key) { try { arangoDB.db(databaseName).collection(table).deleteDocument(key); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying delete {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Perform a range scan for a set of records in the database. Each * field/value pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { ArangoCursor<VPackSlice> cursor = null; try { String aqlQuery = String.format( "FOR target IN %s FILTER target._key >= @key SORT target._key ASC LIMIT %d RETURN %s ", table, recordcount, constructReturnForAQL(fields, "target")); Map<String, Object> bindVars = new MapBuilder().put("key", startkey).get(); cursor = arangoDB.db(databaseName).query(aqlQuery, bindVars, null, VPackSlice.class); while (cursor.hasNext()) { VPackSlice aDocument = cursor.next(); HashMap<String, ByteIterator> aMap = new HashMap<String, ByteIterator>(aDocument.size()); if (!this.fillMap(aMap, aDocument)) { return Status.ERROR; } result.add(aMap); } return Status.OK; } catch (Exception e) { logger.error("Exception while trying scan {} {} {} with ex {}", table, startkey, recordcount, e.toString()); } finally { if (cursor != null) { try { cursor.close(); } catch (IOException e) { logger.error("Fail to close cursor", e); } } } return Status.ERROR; } private String createDocumentHandle(String collection, String documentKey) throws ArangoDBException { validateCollectionName(collection); return collection + "/" + documentKey; } private void validateCollectionName(String name) throws ArangoDBException { if (name.indexOf('/') != -1) { throw new ArangoDBException("does not allow '/' in name."); } } private String constructReturnForAQL(Set<String> fields, String targetName) { // Construct the AQL query string. String resultDes = targetName; if (fields != null && fields.size() != 0) { StringBuilder builder = new StringBuilder("{"); for (String field : fields) { builder.append(String.format("\n\"%s\" : %s.%s,", field, targetName, field)); } //Replace last ',' to newline. builder.setCharAt(builder.length() - 1, '\n'); builder.append("}"); resultDes = builder.toString(); } return resultDes; } private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document) { return fillMap(resultMap, document, null); } /** * Fills the map with the properties from the BaseDocument. * * @param resultMap * The map to fill/ * @param document * The record to read from * @param fields * The list of fields to read, or null for all of them * @return isSuccess */ private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document, Set<String> fields) {<FILL_FUNCTION_BODY>} private String byteIteratorToString(ByteIterator byteIter) { return new String(byteIter.toArray()); } private ByteIterator stringToByteIterator(String content) { return new StringByteIterator(content); } private String mapToJson(Map<String, ByteIterator> values) { VPackBuilder builder = new VPackBuilder().add(ValueType.OBJECT); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { builder.add(entry.getKey(), byteIteratorToString(entry.getValue())); } builder.close(); return arangoDB.util().deserialize(builder.slice(), String.class); } }
if (fields == null || fields.size() == 0) { for (Iterator<Entry<String, VPackSlice>> iterator = document.objectIterator(); iterator.hasNext();) { Entry<String, VPackSlice> next = iterator.next(); VPackSlice value = next.getValue(); if (value.isString()) { resultMap.put(next.getKey(), stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } else { for (String field : fields) { VPackSlice value = document.get(field); if (value.isString()) { resultMap.put(field, stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } return true;
brianfrankcooper_YCSB
YCSB/arangodb/src/main/java/site/ycsb/db/arangodb/ArangoDBClient.java
ArangoDBClient
byteIteratorToString
class ArangoDBClient extends DB { private static Logger logger = LoggerFactory.getLogger(ArangoDBClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** ArangoDB Driver related, Singleton. */ private ArangoDB arangoDB; private String databaseName = "ycsb"; private String collectionName; private Boolean dropDBBeforeRun; private Boolean waitForSync = false; private Boolean transactionUpdate = false; /** * Initialize any state for this DB. Called once per DB instance; there is * one DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void init() throws DBException { synchronized (ArangoDBClient.class) { Properties props = getProperties(); collectionName = props.getProperty("table", "usertable"); // Set the DB address String ip = props.getProperty("arangodb.ip", "localhost"); String portStr = props.getProperty("arangodb.port", "8529"); int port = Integer.parseInt(portStr); // Set network protocol String protocolStr = props.getProperty("arangodb.protocol", "VST"); Protocol protocol = Protocol.valueOf(protocolStr); // If clear db before run String dropDBBeforeRunStr = props.getProperty("arangodb.dropDBBeforeRun", "false"); dropDBBeforeRun = Boolean.parseBoolean(dropDBBeforeRunStr); // Set the sync mode String waitForSyncStr = props.getProperty("arangodb.waitForSync", "false"); waitForSync = Boolean.parseBoolean(waitForSyncStr); // Set if transaction for update String transactionUpdateStr = props.getProperty("arangodb.transactionUpdate", "false"); transactionUpdate = Boolean.parseBoolean(transactionUpdateStr); // Init ArangoDB connection try { arangoDB = new ArangoDB.Builder().host(ip).port(port).useProtocol(protocol).build(); } catch (Exception e) { logger.error("Failed to initialize ArangoDB", e); System.exit(-1); } if(INIT_COUNT.getAndIncrement() == 0) { // Init the database if (dropDBBeforeRun) { // Try delete first try { arangoDB.db(databaseName).drop(); } catch (ArangoDBException e) { logger.info("Fail to delete DB: {}", databaseName); } } try { arangoDB.createDatabase(databaseName); logger.info("Database created: " + databaseName); } catch (ArangoDBException e) { logger.error("Failed to create database: {} with ex: {}", databaseName, e.toString()); } try { arangoDB.db(databaseName).createCollection(collectionName); logger.info("Collection created: " + collectionName); } catch (ArangoDBException e) { logger.error("Failed to create collection: {} with ex: {}", collectionName, e.toString()); } logger.info("ArangoDB client connection created to {}:{}", ip, port); // Log the configuration logger.info("Arango Configuration: dropDBBeforeRun: {}; address: {}:{}; databaseName: {};" + " waitForSync: {}; transactionUpdate: {};", dropDBBeforeRun, ip, port, databaseName, waitForSync, transactionUpdate); } } } /** * Cleanup any state for this DB. Called once per DB instance; there is one * DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void cleanup() throws DBException { if (INIT_COUNT.decrementAndGet() == 0) { arangoDB.shutdown(); arangoDB = null; logger.info("Local cleaned up."); } } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { BaseDocument toInsert = new BaseDocument(key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { toInsert.addAttribute(entry.getKey(), byteIteratorToString(entry.getValue())); } DocumentCreateOptions options = new DocumentCreateOptions().waitForSync(waitForSync); arangoDB.db(databaseName).collection(table).insertDocument(toInsert, options); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying insert {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Read a record from the database. Each field/value pair from the result * will be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { VPackSlice document = arangoDB.db(databaseName).collection(table).getDocument(key, VPackSlice.class, null); if (!this.fillMap(result, document, fields)) { return Status.ERROR; } return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying read {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { if (!transactionUpdate) { BaseDocument updateDoc = new BaseDocument(); for (Entry<String, ByteIterator> field : values.entrySet()) { updateDoc.addAttribute(field.getKey(), byteIteratorToString(field.getValue())); } arangoDB.db(databaseName).collection(table).updateDocument(key, updateDoc); return Status.OK; } else { // id for documentHandle String transactionAction = "function (id) {" // use internal database functions + "var db = require('internal').db;" // collection.update(document, data, overwrite, keepNull, waitForSync) + String.format("db._update(id, %s, true, false, %s);}", mapToJson(values), Boolean.toString(waitForSync).toLowerCase()); TransactionOptions options = new TransactionOptions(); options.writeCollections(table); options.params(createDocumentHandle(table, key)); arangoDB.db(databaseName).transaction(transactionAction, Void.class, options); return Status.OK; } } catch (ArangoDBException e) { logger.error("Exception while trying update {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status delete(String table, String key) { try { arangoDB.db(databaseName).collection(table).deleteDocument(key); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying delete {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Perform a range scan for a set of records in the database. Each * field/value pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { ArangoCursor<VPackSlice> cursor = null; try { String aqlQuery = String.format( "FOR target IN %s FILTER target._key >= @key SORT target._key ASC LIMIT %d RETURN %s ", table, recordcount, constructReturnForAQL(fields, "target")); Map<String, Object> bindVars = new MapBuilder().put("key", startkey).get(); cursor = arangoDB.db(databaseName).query(aqlQuery, bindVars, null, VPackSlice.class); while (cursor.hasNext()) { VPackSlice aDocument = cursor.next(); HashMap<String, ByteIterator> aMap = new HashMap<String, ByteIterator>(aDocument.size()); if (!this.fillMap(aMap, aDocument)) { return Status.ERROR; } result.add(aMap); } return Status.OK; } catch (Exception e) { logger.error("Exception while trying scan {} {} {} with ex {}", table, startkey, recordcount, e.toString()); } finally { if (cursor != null) { try { cursor.close(); } catch (IOException e) { logger.error("Fail to close cursor", e); } } } return Status.ERROR; } private String createDocumentHandle(String collection, String documentKey) throws ArangoDBException { validateCollectionName(collection); return collection + "/" + documentKey; } private void validateCollectionName(String name) throws ArangoDBException { if (name.indexOf('/') != -1) { throw new ArangoDBException("does not allow '/' in name."); } } private String constructReturnForAQL(Set<String> fields, String targetName) { // Construct the AQL query string. String resultDes = targetName; if (fields != null && fields.size() != 0) { StringBuilder builder = new StringBuilder("{"); for (String field : fields) { builder.append(String.format("\n\"%s\" : %s.%s,", field, targetName, field)); } //Replace last ',' to newline. builder.setCharAt(builder.length() - 1, '\n'); builder.append("}"); resultDes = builder.toString(); } return resultDes; } private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document) { return fillMap(resultMap, document, null); } /** * Fills the map with the properties from the BaseDocument. * * @param resultMap * The map to fill/ * @param document * The record to read from * @param fields * The list of fields to read, or null for all of them * @return isSuccess */ private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document, Set<String> fields) { if (fields == null || fields.size() == 0) { for (Iterator<Entry<String, VPackSlice>> iterator = document.objectIterator(); iterator.hasNext();) { Entry<String, VPackSlice> next = iterator.next(); VPackSlice value = next.getValue(); if (value.isString()) { resultMap.put(next.getKey(), stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } else { for (String field : fields) { VPackSlice value = document.get(field); if (value.isString()) { resultMap.put(field, stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } return true; } private String byteIteratorToString(ByteIterator byteIter) {<FILL_FUNCTION_BODY>} private ByteIterator stringToByteIterator(String content) { return new StringByteIterator(content); } private String mapToJson(Map<String, ByteIterator> values) { VPackBuilder builder = new VPackBuilder().add(ValueType.OBJECT); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { builder.add(entry.getKey(), byteIteratorToString(entry.getValue())); } builder.close(); return arangoDB.util().deserialize(builder.slice(), String.class); } }
return new String(byteIter.toArray());
brianfrankcooper_YCSB
YCSB/arangodb/src/main/java/site/ycsb/db/arangodb/ArangoDBClient.java
ArangoDBClient
stringToByteIterator
class ArangoDBClient extends DB { private static Logger logger = LoggerFactory.getLogger(ArangoDBClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** ArangoDB Driver related, Singleton. */ private ArangoDB arangoDB; private String databaseName = "ycsb"; private String collectionName; private Boolean dropDBBeforeRun; private Boolean waitForSync = false; private Boolean transactionUpdate = false; /** * Initialize any state for this DB. Called once per DB instance; there is * one DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void init() throws DBException { synchronized (ArangoDBClient.class) { Properties props = getProperties(); collectionName = props.getProperty("table", "usertable"); // Set the DB address String ip = props.getProperty("arangodb.ip", "localhost"); String portStr = props.getProperty("arangodb.port", "8529"); int port = Integer.parseInt(portStr); // Set network protocol String protocolStr = props.getProperty("arangodb.protocol", "VST"); Protocol protocol = Protocol.valueOf(protocolStr); // If clear db before run String dropDBBeforeRunStr = props.getProperty("arangodb.dropDBBeforeRun", "false"); dropDBBeforeRun = Boolean.parseBoolean(dropDBBeforeRunStr); // Set the sync mode String waitForSyncStr = props.getProperty("arangodb.waitForSync", "false"); waitForSync = Boolean.parseBoolean(waitForSyncStr); // Set if transaction for update String transactionUpdateStr = props.getProperty("arangodb.transactionUpdate", "false"); transactionUpdate = Boolean.parseBoolean(transactionUpdateStr); // Init ArangoDB connection try { arangoDB = new ArangoDB.Builder().host(ip).port(port).useProtocol(protocol).build(); } catch (Exception e) { logger.error("Failed to initialize ArangoDB", e); System.exit(-1); } if(INIT_COUNT.getAndIncrement() == 0) { // Init the database if (dropDBBeforeRun) { // Try delete first try { arangoDB.db(databaseName).drop(); } catch (ArangoDBException e) { logger.info("Fail to delete DB: {}", databaseName); } } try { arangoDB.createDatabase(databaseName); logger.info("Database created: " + databaseName); } catch (ArangoDBException e) { logger.error("Failed to create database: {} with ex: {}", databaseName, e.toString()); } try { arangoDB.db(databaseName).createCollection(collectionName); logger.info("Collection created: " + collectionName); } catch (ArangoDBException e) { logger.error("Failed to create collection: {} with ex: {}", collectionName, e.toString()); } logger.info("ArangoDB client connection created to {}:{}", ip, port); // Log the configuration logger.info("Arango Configuration: dropDBBeforeRun: {}; address: {}:{}; databaseName: {};" + " waitForSync: {}; transactionUpdate: {};", dropDBBeforeRun, ip, port, databaseName, waitForSync, transactionUpdate); } } } /** * Cleanup any state for this DB. Called once per DB instance; there is one * DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void cleanup() throws DBException { if (INIT_COUNT.decrementAndGet() == 0) { arangoDB.shutdown(); arangoDB = null; logger.info("Local cleaned up."); } } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { BaseDocument toInsert = new BaseDocument(key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { toInsert.addAttribute(entry.getKey(), byteIteratorToString(entry.getValue())); } DocumentCreateOptions options = new DocumentCreateOptions().waitForSync(waitForSync); arangoDB.db(databaseName).collection(table).insertDocument(toInsert, options); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying insert {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Read a record from the database. Each field/value pair from the result * will be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { VPackSlice document = arangoDB.db(databaseName).collection(table).getDocument(key, VPackSlice.class, null); if (!this.fillMap(result, document, fields)) { return Status.ERROR; } return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying read {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { if (!transactionUpdate) { BaseDocument updateDoc = new BaseDocument(); for (Entry<String, ByteIterator> field : values.entrySet()) { updateDoc.addAttribute(field.getKey(), byteIteratorToString(field.getValue())); } arangoDB.db(databaseName).collection(table).updateDocument(key, updateDoc); return Status.OK; } else { // id for documentHandle String transactionAction = "function (id) {" // use internal database functions + "var db = require('internal').db;" // collection.update(document, data, overwrite, keepNull, waitForSync) + String.format("db._update(id, %s, true, false, %s);}", mapToJson(values), Boolean.toString(waitForSync).toLowerCase()); TransactionOptions options = new TransactionOptions(); options.writeCollections(table); options.params(createDocumentHandle(table, key)); arangoDB.db(databaseName).transaction(transactionAction, Void.class, options); return Status.OK; } } catch (ArangoDBException e) { logger.error("Exception while trying update {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status delete(String table, String key) { try { arangoDB.db(databaseName).collection(table).deleteDocument(key); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying delete {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Perform a range scan for a set of records in the database. Each * field/value pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { ArangoCursor<VPackSlice> cursor = null; try { String aqlQuery = String.format( "FOR target IN %s FILTER target._key >= @key SORT target._key ASC LIMIT %d RETURN %s ", table, recordcount, constructReturnForAQL(fields, "target")); Map<String, Object> bindVars = new MapBuilder().put("key", startkey).get(); cursor = arangoDB.db(databaseName).query(aqlQuery, bindVars, null, VPackSlice.class); while (cursor.hasNext()) { VPackSlice aDocument = cursor.next(); HashMap<String, ByteIterator> aMap = new HashMap<String, ByteIterator>(aDocument.size()); if (!this.fillMap(aMap, aDocument)) { return Status.ERROR; } result.add(aMap); } return Status.OK; } catch (Exception e) { logger.error("Exception while trying scan {} {} {} with ex {}", table, startkey, recordcount, e.toString()); } finally { if (cursor != null) { try { cursor.close(); } catch (IOException e) { logger.error("Fail to close cursor", e); } } } return Status.ERROR; } private String createDocumentHandle(String collection, String documentKey) throws ArangoDBException { validateCollectionName(collection); return collection + "/" + documentKey; } private void validateCollectionName(String name) throws ArangoDBException { if (name.indexOf('/') != -1) { throw new ArangoDBException("does not allow '/' in name."); } } private String constructReturnForAQL(Set<String> fields, String targetName) { // Construct the AQL query string. String resultDes = targetName; if (fields != null && fields.size() != 0) { StringBuilder builder = new StringBuilder("{"); for (String field : fields) { builder.append(String.format("\n\"%s\" : %s.%s,", field, targetName, field)); } //Replace last ',' to newline. builder.setCharAt(builder.length() - 1, '\n'); builder.append("}"); resultDes = builder.toString(); } return resultDes; } private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document) { return fillMap(resultMap, document, null); } /** * Fills the map with the properties from the BaseDocument. * * @param resultMap * The map to fill/ * @param document * The record to read from * @param fields * The list of fields to read, or null for all of them * @return isSuccess */ private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document, Set<String> fields) { if (fields == null || fields.size() == 0) { for (Iterator<Entry<String, VPackSlice>> iterator = document.objectIterator(); iterator.hasNext();) { Entry<String, VPackSlice> next = iterator.next(); VPackSlice value = next.getValue(); if (value.isString()) { resultMap.put(next.getKey(), stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } else { for (String field : fields) { VPackSlice value = document.get(field); if (value.isString()) { resultMap.put(field, stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } return true; } private String byteIteratorToString(ByteIterator byteIter) { return new String(byteIter.toArray()); } private ByteIterator stringToByteIterator(String content) {<FILL_FUNCTION_BODY>} private String mapToJson(Map<String, ByteIterator> values) { VPackBuilder builder = new VPackBuilder().add(ValueType.OBJECT); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { builder.add(entry.getKey(), byteIteratorToString(entry.getValue())); } builder.close(); return arangoDB.util().deserialize(builder.slice(), String.class); } }
return new StringByteIterator(content);
brianfrankcooper_YCSB
YCSB/arangodb/src/main/java/site/ycsb/db/arangodb/ArangoDBClient.java
ArangoDBClient
mapToJson
class ArangoDBClient extends DB { private static Logger logger = LoggerFactory.getLogger(ArangoDBClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); /** ArangoDB Driver related, Singleton. */ private ArangoDB arangoDB; private String databaseName = "ycsb"; private String collectionName; private Boolean dropDBBeforeRun; private Boolean waitForSync = false; private Boolean transactionUpdate = false; /** * Initialize any state for this DB. Called once per DB instance; there is * one DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void init() throws DBException { synchronized (ArangoDBClient.class) { Properties props = getProperties(); collectionName = props.getProperty("table", "usertable"); // Set the DB address String ip = props.getProperty("arangodb.ip", "localhost"); String portStr = props.getProperty("arangodb.port", "8529"); int port = Integer.parseInt(portStr); // Set network protocol String protocolStr = props.getProperty("arangodb.protocol", "VST"); Protocol protocol = Protocol.valueOf(protocolStr); // If clear db before run String dropDBBeforeRunStr = props.getProperty("arangodb.dropDBBeforeRun", "false"); dropDBBeforeRun = Boolean.parseBoolean(dropDBBeforeRunStr); // Set the sync mode String waitForSyncStr = props.getProperty("arangodb.waitForSync", "false"); waitForSync = Boolean.parseBoolean(waitForSyncStr); // Set if transaction for update String transactionUpdateStr = props.getProperty("arangodb.transactionUpdate", "false"); transactionUpdate = Boolean.parseBoolean(transactionUpdateStr); // Init ArangoDB connection try { arangoDB = new ArangoDB.Builder().host(ip).port(port).useProtocol(protocol).build(); } catch (Exception e) { logger.error("Failed to initialize ArangoDB", e); System.exit(-1); } if(INIT_COUNT.getAndIncrement() == 0) { // Init the database if (dropDBBeforeRun) { // Try delete first try { arangoDB.db(databaseName).drop(); } catch (ArangoDBException e) { logger.info("Fail to delete DB: {}", databaseName); } } try { arangoDB.createDatabase(databaseName); logger.info("Database created: " + databaseName); } catch (ArangoDBException e) { logger.error("Failed to create database: {} with ex: {}", databaseName, e.toString()); } try { arangoDB.db(databaseName).createCollection(collectionName); logger.info("Collection created: " + collectionName); } catch (ArangoDBException e) { logger.error("Failed to create collection: {} with ex: {}", collectionName, e.toString()); } logger.info("ArangoDB client connection created to {}:{}", ip, port); // Log the configuration logger.info("Arango Configuration: dropDBBeforeRun: {}; address: {}:{}; databaseName: {};" + " waitForSync: {}; transactionUpdate: {};", dropDBBeforeRun, ip, port, databaseName, waitForSync, transactionUpdate); } } } /** * Cleanup any state for this DB. Called once per DB instance; there is one * DB instance per client thread. * * Actually, one client process will share one DB instance here.(Coincide to * mongoDB driver) */ @Override public void cleanup() throws DBException { if (INIT_COUNT.decrementAndGet() == 0) { arangoDB.shutdown(); arangoDB = null; logger.info("Local cleaned up."); } } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { BaseDocument toInsert = new BaseDocument(key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { toInsert.addAttribute(entry.getKey(), byteIteratorToString(entry.getValue())); } DocumentCreateOptions options = new DocumentCreateOptions().waitForSync(waitForSync); arangoDB.db(databaseName).collection(table).insertDocument(toInsert, options); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying insert {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Read a record from the database. Each field/value pair from the result * will be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error or "not found". */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { VPackSlice document = arangoDB.db(databaseName).collection(table).getDocument(key, VPackSlice.class, null); if (!this.fillMap(result, document, fields)) { return Status.ERROR; } return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying read {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error. See this class's * description for a discussion of error codes. */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { if (!transactionUpdate) { BaseDocument updateDoc = new BaseDocument(); for (Entry<String, ByteIterator> field : values.entrySet()) { updateDoc.addAttribute(field.getKey(), byteIteratorToString(field.getValue())); } arangoDB.db(databaseName).collection(table).updateDocument(key, updateDoc); return Status.OK; } else { // id for documentHandle String transactionAction = "function (id) {" // use internal database functions + "var db = require('internal').db;" // collection.update(document, data, overwrite, keepNull, waitForSync) + String.format("db._update(id, %s, true, false, %s);}", mapToJson(values), Boolean.toString(waitForSync).toLowerCase()); TransactionOptions options = new TransactionOptions(); options.writeCollections(table); options.params(createDocumentHandle(table, key)); arangoDB.db(databaseName).transaction(transactionAction, Void.class, options); return Status.OK; } } catch (ArangoDBException e) { logger.error("Exception while trying update {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status delete(String table, String key) { try { arangoDB.db(databaseName).collection(table).deleteDocument(key); return Status.OK; } catch (ArangoDBException e) { logger.error("Exception while trying delete {} {} with ex {}", table, key, e.toString()); } return Status.ERROR; } /** * Perform a range scan for a set of records in the database. Each * field/value pair from the result will be stored in a HashMap. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error. See the * {@link DB} class's description for a discussion of error codes. */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { ArangoCursor<VPackSlice> cursor = null; try { String aqlQuery = String.format( "FOR target IN %s FILTER target._key >= @key SORT target._key ASC LIMIT %d RETURN %s ", table, recordcount, constructReturnForAQL(fields, "target")); Map<String, Object> bindVars = new MapBuilder().put("key", startkey).get(); cursor = arangoDB.db(databaseName).query(aqlQuery, bindVars, null, VPackSlice.class); while (cursor.hasNext()) { VPackSlice aDocument = cursor.next(); HashMap<String, ByteIterator> aMap = new HashMap<String, ByteIterator>(aDocument.size()); if (!this.fillMap(aMap, aDocument)) { return Status.ERROR; } result.add(aMap); } return Status.OK; } catch (Exception e) { logger.error("Exception while trying scan {} {} {} with ex {}", table, startkey, recordcount, e.toString()); } finally { if (cursor != null) { try { cursor.close(); } catch (IOException e) { logger.error("Fail to close cursor", e); } } } return Status.ERROR; } private String createDocumentHandle(String collection, String documentKey) throws ArangoDBException { validateCollectionName(collection); return collection + "/" + documentKey; } private void validateCollectionName(String name) throws ArangoDBException { if (name.indexOf('/') != -1) { throw new ArangoDBException("does not allow '/' in name."); } } private String constructReturnForAQL(Set<String> fields, String targetName) { // Construct the AQL query string. String resultDes = targetName; if (fields != null && fields.size() != 0) { StringBuilder builder = new StringBuilder("{"); for (String field : fields) { builder.append(String.format("\n\"%s\" : %s.%s,", field, targetName, field)); } //Replace last ',' to newline. builder.setCharAt(builder.length() - 1, '\n'); builder.append("}"); resultDes = builder.toString(); } return resultDes; } private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document) { return fillMap(resultMap, document, null); } /** * Fills the map with the properties from the BaseDocument. * * @param resultMap * The map to fill/ * @param document * The record to read from * @param fields * The list of fields to read, or null for all of them * @return isSuccess */ private boolean fillMap(Map<String, ByteIterator> resultMap, VPackSlice document, Set<String> fields) { if (fields == null || fields.size() == 0) { for (Iterator<Entry<String, VPackSlice>> iterator = document.objectIterator(); iterator.hasNext();) { Entry<String, VPackSlice> next = iterator.next(); VPackSlice value = next.getValue(); if (value.isString()) { resultMap.put(next.getKey(), stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } else { for (String field : fields) { VPackSlice value = document.get(field); if (value.isString()) { resultMap.put(field, stringToByteIterator(value.getAsString())); } else if (!value.isCustom()) { logger.error("Error! Not the format expected! Actually is {}", value.getClass().getName()); return false; } } } return true; } private String byteIteratorToString(ByteIterator byteIter) { return new String(byteIter.toArray()); } private ByteIterator stringToByteIterator(String content) { return new StringByteIterator(content); } private String mapToJson(Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>} }
VPackBuilder builder = new VPackBuilder().add(ValueType.OBJECT); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { builder.add(entry.getKey(), byteIteratorToString(entry.getValue())); } builder.close(); return arangoDB.util().deserialize(builder.slice(), String.class);
brianfrankcooper_YCSB
YCSB/asynchbase/src/main/java/site/ycsb/db/AsyncHBaseClient.java
AsyncHBaseClient
init
class AsyncHBaseClient extends site.ycsb.DB { public static final Charset UTF8_CHARSET = Charset.forName("UTF8"); private static final String CLIENT_SIDE_BUFFERING_PROPERTY = "clientbuffering"; private static final String DURABILITY_PROPERTY = "durability"; private static final String PREFETCH_META_PROPERTY = "prefetchmeta"; private static final String CONFIG_PROPERTY = "config"; private static final String COLUMN_FAMILY_PROPERTY = "columnfamily"; private static final String JOIN_TIMEOUT_PROPERTY = "jointimeout"; private static final String JOIN_TIMEOUT_PROPERTY_DEFAULT = "30000"; /** Mutex for instantiating a single instance of the client. */ private static final Object MUTEX = new Object(); /** Use for tracking running thread counts so we know when to shutdown the client. */ private static int threadCount = 0; /** The client that's used for all threads. */ private static HBaseClient client; /** Print debug information to standard out. */ private boolean debug = false; /** The column family use for the workload. */ private byte[] columnFamilyBytes; /** Cache for the last table name/ID to avoid byte conversions. */ private String lastTable = ""; private byte[] lastTableBytes; private long joinTimeout; /** Whether or not to bypass the WAL for puts and deletes. */ private boolean durability = true; /** * If true, buffer mutations on the client. This is the default behavior for * AsyncHBase. For measuring insert/update/delete latencies, client side * buffering should be disabled. * * A single instance of this */ private boolean clientSideBuffering = false; @Override public void init() throws DBException {<FILL_FUNCTION_BODY>} @Override public void cleanup() throws DBException { synchronized (MUTEX) { --threadCount; if (client != null && threadCount < 1) { try { if (debug) { System.out.println("Shutting down client"); } client.shutdown().joinUninterruptibly(joinTimeout); } catch (Exception e) { System.err.println("Failed to shutdown the AsyncHBase client " + "properly: " + e.getMessage()); } client = null; } } } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { setTable(table); final GetRequest get = new GetRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (fields != null) { get.qualifiers(getQualifierList(fields)); } try { if (debug) { System.out.println("Doing read from HBase columnfamily " + Bytes.pretty(columnFamilyBytes)); System.out.println("Doing read for key: " + key); } final ArrayList<KeyValue> row = client.get(get).join(joinTimeout); if (row == null || row.isEmpty()) { return Status.NOT_FOUND; } // got something so populate the results for (final KeyValue column : row) { result.put(new String(column.qualifier()), // TODO - do we need to clone this array? YCSB may keep it in memory // for a while which would mean the entire KV would hang out and won't // be GC'd. new ByteArrayByteIterator(column.value())); if (debug) { System.out.println( "Result for field: " + Bytes.pretty(column.qualifier()) + " is: " + Bytes.pretty(column.value())); } } return Status.OK; } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } return Status.ERROR; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { setTable(table); final Scanner scanner = client.newScanner(lastTableBytes); scanner.setFamily(columnFamilyBytes); scanner.setStartKey(startkey.getBytes(UTF8_CHARSET)); // No end key... *sniff* if (fields != null) { scanner.setQualifiers(getQualifierList(fields)); } // no filters? *sniff* ArrayList<ArrayList<KeyValue>> rows = null; try { int numResults = 0; while ((rows = scanner.nextRows().join(joinTimeout)) != null) { for (final ArrayList<KeyValue> row : rows) { final HashMap<String, ByteIterator> rowResult = new HashMap<String, ByteIterator>(row.size()); for (final KeyValue column : row) { rowResult.put(new String(column.qualifier()), // TODO - do we need to clone this array? YCSB may keep it in memory // for a while which would mean the entire KV would hang out and won't // be GC'd. new ByteArrayByteIterator(column.value())); if (debug) { System.out.println("Got scan result for key: " + Bytes.pretty(column.key())); } } result.add(rowResult); numResults++; if (numResults >= recordcount) {// if hit recordcount, bail out break; } } } scanner.close().join(joinTimeout); return Status.OK; } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + startkey + ": " + e.getMessage()); return Status.ERROR; } return Status.ERROR; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { setTable(table); if (debug) { System.out.println("Setting up put for key: " + key); } final byte[][] qualifiers = new byte[values.size()][]; final byte[][] byteValues = new byte[values.size()][]; int idx = 0; for (final Entry<String, ByteIterator> entry : values.entrySet()) { qualifiers[idx] = entry.getKey().getBytes(); byteValues[idx++] = entry.getValue().toArray(); if (debug) { System.out.println("Adding field/value " + entry.getKey() + "/" + Bytes.pretty(entry.getValue().toArray()) + " to put request"); } } final PutRequest put = new PutRequest(lastTableBytes, key.getBytes(), columnFamilyBytes, qualifiers, byteValues); if (!durability) { put.setDurable(false); } if (!clientSideBuffering) { put.setBufferable(false); try { client.put(put).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } } else { // hooray! Asynchronous write. But without a callback and an async // YCSB call we don't know whether it succeeded or not client.put(put); } return Status.OK; } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { return update(table, key, values); } @Override public Status delete(String table, String key) { setTable(table); if (debug) { System.out.println("Doing delete for key: " + key); } final DeleteRequest delete = new DeleteRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (!durability) { delete.setDurable(false); } if (!clientSideBuffering) { delete.setBufferable(false); try { client.delete(delete).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } } else { // hooray! Asynchronous write. But without a callback and an async // YCSB call we don't know whether it succeeded or not client.delete(delete); } return Status.OK; } /** * Little helper to set the table byte array. If it's different than the last * table we reset the byte array. Otherwise we just use the existing array. * @param table The table we're operating against */ private void setTable(final String table) { if (!lastTable.equals(table)) { lastTable = table; lastTableBytes = table.getBytes(); } } /** * Little helper to build a qualifier byte array from a field set. * @param fields The fields to fetch. * @return The column qualifier byte arrays. */ private byte[][] getQualifierList(final Set<String> fields) { final byte[][] qualifiers = new byte[fields.size()][]; int idx = 0; for (final String field : fields) { qualifiers[idx++] = field.getBytes(); } return qualifiers; } }
if (getProperties().getProperty(CLIENT_SIDE_BUFFERING_PROPERTY, "false") .toLowerCase().equals("true")) { clientSideBuffering = true; } if (getProperties().getProperty(DURABILITY_PROPERTY, "true") .toLowerCase().equals("false")) { durability = false; } final String columnFamily = getProperties().getProperty(COLUMN_FAMILY_PROPERTY); if (columnFamily == null || columnFamily.isEmpty()) { System.err.println("Error, must specify a columnfamily for HBase table"); throw new DBException("No columnfamily specified"); } columnFamilyBytes = columnFamily.getBytes(); if ((getProperties().getProperty("debug") != null) && (getProperties().getProperty("debug").compareTo("true") == 0)) { debug = true; } joinTimeout = Integer.parseInt(getProperties().getProperty( JOIN_TIMEOUT_PROPERTY, JOIN_TIMEOUT_PROPERTY_DEFAULT)); final boolean prefetchMeta = getProperties() .getProperty(PREFETCH_META_PROPERTY, "false") .toLowerCase().equals("true") ? true : false; try { synchronized (MUTEX) { ++threadCount; if (client == null) { final String configPath = getProperties().getProperty(CONFIG_PROPERTY); final Config config; if (configPath == null || configPath.isEmpty()) { config = new Config(); final Iterator<Entry<Object, Object>> iterator = getProperties() .entrySet().iterator(); while (iterator.hasNext()) { final Entry<Object, Object> property = iterator.next(); config.overrideConfig((String)property.getKey(), (String)property.getValue()); } } else { config = new Config(configPath); } client = new HBaseClient(config); // Terminate right now if table does not exist, since the client // will not propagate this error upstream once the workload // starts. String table = getProperties().getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); try { client.ensureTableExists(table).join(joinTimeout); } catch (InterruptedException e1) { Thread.currentThread().interrupt(); } catch (Exception e) { throw new DBException(e); } if (prefetchMeta) { try { if (debug) { System.out.println("Starting meta prefetch for table " + table); } client.prefetchMeta(table).join(joinTimeout); if (debug) { System.out.println("Completed meta prefetch for table " + table); } } catch (InterruptedException e) { System.err.println("Interrupted during prefetch"); Thread.currentThread().interrupt(); } catch (Exception e) { throw new DBException("Failed prefetch", e); } } } } } catch (IOException e) { throw new DBException("Failed instantiation of client", e); }
brianfrankcooper_YCSB
YCSB/asynchbase/src/main/java/site/ycsb/db/AsyncHBaseClient.java
AsyncHBaseClient
cleanup
class AsyncHBaseClient extends site.ycsb.DB { public static final Charset UTF8_CHARSET = Charset.forName("UTF8"); private static final String CLIENT_SIDE_BUFFERING_PROPERTY = "clientbuffering"; private static final String DURABILITY_PROPERTY = "durability"; private static final String PREFETCH_META_PROPERTY = "prefetchmeta"; private static final String CONFIG_PROPERTY = "config"; private static final String COLUMN_FAMILY_PROPERTY = "columnfamily"; private static final String JOIN_TIMEOUT_PROPERTY = "jointimeout"; private static final String JOIN_TIMEOUT_PROPERTY_DEFAULT = "30000"; /** Mutex for instantiating a single instance of the client. */ private static final Object MUTEX = new Object(); /** Use for tracking running thread counts so we know when to shutdown the client. */ private static int threadCount = 0; /** The client that's used for all threads. */ private static HBaseClient client; /** Print debug information to standard out. */ private boolean debug = false; /** The column family use for the workload. */ private byte[] columnFamilyBytes; /** Cache for the last table name/ID to avoid byte conversions. */ private String lastTable = ""; private byte[] lastTableBytes; private long joinTimeout; /** Whether or not to bypass the WAL for puts and deletes. */ private boolean durability = true; /** * If true, buffer mutations on the client. This is the default behavior for * AsyncHBase. For measuring insert/update/delete latencies, client side * buffering should be disabled. * * A single instance of this */ private boolean clientSideBuffering = false; @Override public void init() throws DBException { if (getProperties().getProperty(CLIENT_SIDE_BUFFERING_PROPERTY, "false") .toLowerCase().equals("true")) { clientSideBuffering = true; } if (getProperties().getProperty(DURABILITY_PROPERTY, "true") .toLowerCase().equals("false")) { durability = false; } final String columnFamily = getProperties().getProperty(COLUMN_FAMILY_PROPERTY); if (columnFamily == null || columnFamily.isEmpty()) { System.err.println("Error, must specify a columnfamily for HBase table"); throw new DBException("No columnfamily specified"); } columnFamilyBytes = columnFamily.getBytes(); if ((getProperties().getProperty("debug") != null) && (getProperties().getProperty("debug").compareTo("true") == 0)) { debug = true; } joinTimeout = Integer.parseInt(getProperties().getProperty( JOIN_TIMEOUT_PROPERTY, JOIN_TIMEOUT_PROPERTY_DEFAULT)); final boolean prefetchMeta = getProperties() .getProperty(PREFETCH_META_PROPERTY, "false") .toLowerCase().equals("true") ? true : false; try { synchronized (MUTEX) { ++threadCount; if (client == null) { final String configPath = getProperties().getProperty(CONFIG_PROPERTY); final Config config; if (configPath == null || configPath.isEmpty()) { config = new Config(); final Iterator<Entry<Object, Object>> iterator = getProperties() .entrySet().iterator(); while (iterator.hasNext()) { final Entry<Object, Object> property = iterator.next(); config.overrideConfig((String)property.getKey(), (String)property.getValue()); } } else { config = new Config(configPath); } client = new HBaseClient(config); // Terminate right now if table does not exist, since the client // will not propagate this error upstream once the workload // starts. String table = getProperties().getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); try { client.ensureTableExists(table).join(joinTimeout); } catch (InterruptedException e1) { Thread.currentThread().interrupt(); } catch (Exception e) { throw new DBException(e); } if (prefetchMeta) { try { if (debug) { System.out.println("Starting meta prefetch for table " + table); } client.prefetchMeta(table).join(joinTimeout); if (debug) { System.out.println("Completed meta prefetch for table " + table); } } catch (InterruptedException e) { System.err.println("Interrupted during prefetch"); Thread.currentThread().interrupt(); } catch (Exception e) { throw new DBException("Failed prefetch", e); } } } } } catch (IOException e) { throw new DBException("Failed instantiation of client", e); } } @Override public void cleanup() throws DBException {<FILL_FUNCTION_BODY>} @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { setTable(table); final GetRequest get = new GetRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (fields != null) { get.qualifiers(getQualifierList(fields)); } try { if (debug) { System.out.println("Doing read from HBase columnfamily " + Bytes.pretty(columnFamilyBytes)); System.out.println("Doing read for key: " + key); } final ArrayList<KeyValue> row = client.get(get).join(joinTimeout); if (row == null || row.isEmpty()) { return Status.NOT_FOUND; } // got something so populate the results for (final KeyValue column : row) { result.put(new String(column.qualifier()), // TODO - do we need to clone this array? YCSB may keep it in memory // for a while which would mean the entire KV would hang out and won't // be GC'd. new ByteArrayByteIterator(column.value())); if (debug) { System.out.println( "Result for field: " + Bytes.pretty(column.qualifier()) + " is: " + Bytes.pretty(column.value())); } } return Status.OK; } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } return Status.ERROR; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { setTable(table); final Scanner scanner = client.newScanner(lastTableBytes); scanner.setFamily(columnFamilyBytes); scanner.setStartKey(startkey.getBytes(UTF8_CHARSET)); // No end key... *sniff* if (fields != null) { scanner.setQualifiers(getQualifierList(fields)); } // no filters? *sniff* ArrayList<ArrayList<KeyValue>> rows = null; try { int numResults = 0; while ((rows = scanner.nextRows().join(joinTimeout)) != null) { for (final ArrayList<KeyValue> row : rows) { final HashMap<String, ByteIterator> rowResult = new HashMap<String, ByteIterator>(row.size()); for (final KeyValue column : row) { rowResult.put(new String(column.qualifier()), // TODO - do we need to clone this array? YCSB may keep it in memory // for a while which would mean the entire KV would hang out and won't // be GC'd. new ByteArrayByteIterator(column.value())); if (debug) { System.out.println("Got scan result for key: " + Bytes.pretty(column.key())); } } result.add(rowResult); numResults++; if (numResults >= recordcount) {// if hit recordcount, bail out break; } } } scanner.close().join(joinTimeout); return Status.OK; } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + startkey + ": " + e.getMessage()); return Status.ERROR; } return Status.ERROR; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { setTable(table); if (debug) { System.out.println("Setting up put for key: " + key); } final byte[][] qualifiers = new byte[values.size()][]; final byte[][] byteValues = new byte[values.size()][]; int idx = 0; for (final Entry<String, ByteIterator> entry : values.entrySet()) { qualifiers[idx] = entry.getKey().getBytes(); byteValues[idx++] = entry.getValue().toArray(); if (debug) { System.out.println("Adding field/value " + entry.getKey() + "/" + Bytes.pretty(entry.getValue().toArray()) + " to put request"); } } final PutRequest put = new PutRequest(lastTableBytes, key.getBytes(), columnFamilyBytes, qualifiers, byteValues); if (!durability) { put.setDurable(false); } if (!clientSideBuffering) { put.setBufferable(false); try { client.put(put).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } } else { // hooray! Asynchronous write. But without a callback and an async // YCSB call we don't know whether it succeeded or not client.put(put); } return Status.OK; } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { return update(table, key, values); } @Override public Status delete(String table, String key) { setTable(table); if (debug) { System.out.println("Doing delete for key: " + key); } final DeleteRequest delete = new DeleteRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (!durability) { delete.setDurable(false); } if (!clientSideBuffering) { delete.setBufferable(false); try { client.delete(delete).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } } else { // hooray! Asynchronous write. But without a callback and an async // YCSB call we don't know whether it succeeded or not client.delete(delete); } return Status.OK; } /** * Little helper to set the table byte array. If it's different than the last * table we reset the byte array. Otherwise we just use the existing array. * @param table The table we're operating against */ private void setTable(final String table) { if (!lastTable.equals(table)) { lastTable = table; lastTableBytes = table.getBytes(); } } /** * Little helper to build a qualifier byte array from a field set. * @param fields The fields to fetch. * @return The column qualifier byte arrays. */ private byte[][] getQualifierList(final Set<String> fields) { final byte[][] qualifiers = new byte[fields.size()][]; int idx = 0; for (final String field : fields) { qualifiers[idx++] = field.getBytes(); } return qualifiers; } }
synchronized (MUTEX) { --threadCount; if (client != null && threadCount < 1) { try { if (debug) { System.out.println("Shutting down client"); } client.shutdown().joinUninterruptibly(joinTimeout); } catch (Exception e) { System.err.println("Failed to shutdown the AsyncHBase client " + "properly: " + e.getMessage()); } client = null; } }
brianfrankcooper_YCSB
YCSB/asynchbase/src/main/java/site/ycsb/db/AsyncHBaseClient.java
AsyncHBaseClient
read
class AsyncHBaseClient extends site.ycsb.DB { public static final Charset UTF8_CHARSET = Charset.forName("UTF8"); private static final String CLIENT_SIDE_BUFFERING_PROPERTY = "clientbuffering"; private static final String DURABILITY_PROPERTY = "durability"; private static final String PREFETCH_META_PROPERTY = "prefetchmeta"; private static final String CONFIG_PROPERTY = "config"; private static final String COLUMN_FAMILY_PROPERTY = "columnfamily"; private static final String JOIN_TIMEOUT_PROPERTY = "jointimeout"; private static final String JOIN_TIMEOUT_PROPERTY_DEFAULT = "30000"; /** Mutex for instantiating a single instance of the client. */ private static final Object MUTEX = new Object(); /** Use for tracking running thread counts so we know when to shutdown the client. */ private static int threadCount = 0; /** The client that's used for all threads. */ private static HBaseClient client; /** Print debug information to standard out. */ private boolean debug = false; /** The column family use for the workload. */ private byte[] columnFamilyBytes; /** Cache for the last table name/ID to avoid byte conversions. */ private String lastTable = ""; private byte[] lastTableBytes; private long joinTimeout; /** Whether or not to bypass the WAL for puts and deletes. */ private boolean durability = true; /** * If true, buffer mutations on the client. This is the default behavior for * AsyncHBase. For measuring insert/update/delete latencies, client side * buffering should be disabled. * * A single instance of this */ private boolean clientSideBuffering = false; @Override public void init() throws DBException { if (getProperties().getProperty(CLIENT_SIDE_BUFFERING_PROPERTY, "false") .toLowerCase().equals("true")) { clientSideBuffering = true; } if (getProperties().getProperty(DURABILITY_PROPERTY, "true") .toLowerCase().equals("false")) { durability = false; } final String columnFamily = getProperties().getProperty(COLUMN_FAMILY_PROPERTY); if (columnFamily == null || columnFamily.isEmpty()) { System.err.println("Error, must specify a columnfamily for HBase table"); throw new DBException("No columnfamily specified"); } columnFamilyBytes = columnFamily.getBytes(); if ((getProperties().getProperty("debug") != null) && (getProperties().getProperty("debug").compareTo("true") == 0)) { debug = true; } joinTimeout = Integer.parseInt(getProperties().getProperty( JOIN_TIMEOUT_PROPERTY, JOIN_TIMEOUT_PROPERTY_DEFAULT)); final boolean prefetchMeta = getProperties() .getProperty(PREFETCH_META_PROPERTY, "false") .toLowerCase().equals("true") ? true : false; try { synchronized (MUTEX) { ++threadCount; if (client == null) { final String configPath = getProperties().getProperty(CONFIG_PROPERTY); final Config config; if (configPath == null || configPath.isEmpty()) { config = new Config(); final Iterator<Entry<Object, Object>> iterator = getProperties() .entrySet().iterator(); while (iterator.hasNext()) { final Entry<Object, Object> property = iterator.next(); config.overrideConfig((String)property.getKey(), (String)property.getValue()); } } else { config = new Config(configPath); } client = new HBaseClient(config); // Terminate right now if table does not exist, since the client // will not propagate this error upstream once the workload // starts. String table = getProperties().getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); try { client.ensureTableExists(table).join(joinTimeout); } catch (InterruptedException e1) { Thread.currentThread().interrupt(); } catch (Exception e) { throw new DBException(e); } if (prefetchMeta) { try { if (debug) { System.out.println("Starting meta prefetch for table " + table); } client.prefetchMeta(table).join(joinTimeout); if (debug) { System.out.println("Completed meta prefetch for table " + table); } } catch (InterruptedException e) { System.err.println("Interrupted during prefetch"); Thread.currentThread().interrupt(); } catch (Exception e) { throw new DBException("Failed prefetch", e); } } } } } catch (IOException e) { throw new DBException("Failed instantiation of client", e); } } @Override public void cleanup() throws DBException { synchronized (MUTEX) { --threadCount; if (client != null && threadCount < 1) { try { if (debug) { System.out.println("Shutting down client"); } client.shutdown().joinUninterruptibly(joinTimeout); } catch (Exception e) { System.err.println("Failed to shutdown the AsyncHBase client " + "properly: " + e.getMessage()); } client = null; } } } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {<FILL_FUNCTION_BODY>} @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { setTable(table); final Scanner scanner = client.newScanner(lastTableBytes); scanner.setFamily(columnFamilyBytes); scanner.setStartKey(startkey.getBytes(UTF8_CHARSET)); // No end key... *sniff* if (fields != null) { scanner.setQualifiers(getQualifierList(fields)); } // no filters? *sniff* ArrayList<ArrayList<KeyValue>> rows = null; try { int numResults = 0; while ((rows = scanner.nextRows().join(joinTimeout)) != null) { for (final ArrayList<KeyValue> row : rows) { final HashMap<String, ByteIterator> rowResult = new HashMap<String, ByteIterator>(row.size()); for (final KeyValue column : row) { rowResult.put(new String(column.qualifier()), // TODO - do we need to clone this array? YCSB may keep it in memory // for a while which would mean the entire KV would hang out and won't // be GC'd. new ByteArrayByteIterator(column.value())); if (debug) { System.out.println("Got scan result for key: " + Bytes.pretty(column.key())); } } result.add(rowResult); numResults++; if (numResults >= recordcount) {// if hit recordcount, bail out break; } } } scanner.close().join(joinTimeout); return Status.OK; } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + startkey + ": " + e.getMessage()); return Status.ERROR; } return Status.ERROR; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { setTable(table); if (debug) { System.out.println("Setting up put for key: " + key); } final byte[][] qualifiers = new byte[values.size()][]; final byte[][] byteValues = new byte[values.size()][]; int idx = 0; for (final Entry<String, ByteIterator> entry : values.entrySet()) { qualifiers[idx] = entry.getKey().getBytes(); byteValues[idx++] = entry.getValue().toArray(); if (debug) { System.out.println("Adding field/value " + entry.getKey() + "/" + Bytes.pretty(entry.getValue().toArray()) + " to put request"); } } final PutRequest put = new PutRequest(lastTableBytes, key.getBytes(), columnFamilyBytes, qualifiers, byteValues); if (!durability) { put.setDurable(false); } if (!clientSideBuffering) { put.setBufferable(false); try { client.put(put).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } } else { // hooray! Asynchronous write. But without a callback and an async // YCSB call we don't know whether it succeeded or not client.put(put); } return Status.OK; } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { return update(table, key, values); } @Override public Status delete(String table, String key) { setTable(table); if (debug) { System.out.println("Doing delete for key: " + key); } final DeleteRequest delete = new DeleteRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (!durability) { delete.setDurable(false); } if (!clientSideBuffering) { delete.setBufferable(false); try { client.delete(delete).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } } else { // hooray! Asynchronous write. But without a callback and an async // YCSB call we don't know whether it succeeded or not client.delete(delete); } return Status.OK; } /** * Little helper to set the table byte array. If it's different than the last * table we reset the byte array. Otherwise we just use the existing array. * @param table The table we're operating against */ private void setTable(final String table) { if (!lastTable.equals(table)) { lastTable = table; lastTableBytes = table.getBytes(); } } /** * Little helper to build a qualifier byte array from a field set. * @param fields The fields to fetch. * @return The column qualifier byte arrays. */ private byte[][] getQualifierList(final Set<String> fields) { final byte[][] qualifiers = new byte[fields.size()][]; int idx = 0; for (final String field : fields) { qualifiers[idx++] = field.getBytes(); } return qualifiers; } }
setTable(table); final GetRequest get = new GetRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (fields != null) { get.qualifiers(getQualifierList(fields)); } try { if (debug) { System.out.println("Doing read from HBase columnfamily " + Bytes.pretty(columnFamilyBytes)); System.out.println("Doing read for key: " + key); } final ArrayList<KeyValue> row = client.get(get).join(joinTimeout); if (row == null || row.isEmpty()) { return Status.NOT_FOUND; } // got something so populate the results for (final KeyValue column : row) { result.put(new String(column.qualifier()), // TODO - do we need to clone this array? YCSB may keep it in memory // for a while which would mean the entire KV would hang out and won't // be GC'd. new ByteArrayByteIterator(column.value())); if (debug) { System.out.println( "Result for field: " + Bytes.pretty(column.qualifier()) + " is: " + Bytes.pretty(column.value())); } } return Status.OK; } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } return Status.ERROR;
brianfrankcooper_YCSB
YCSB/asynchbase/src/main/java/site/ycsb/db/AsyncHBaseClient.java
AsyncHBaseClient
scan
class AsyncHBaseClient extends site.ycsb.DB { public static final Charset UTF8_CHARSET = Charset.forName("UTF8"); private static final String CLIENT_SIDE_BUFFERING_PROPERTY = "clientbuffering"; private static final String DURABILITY_PROPERTY = "durability"; private static final String PREFETCH_META_PROPERTY = "prefetchmeta"; private static final String CONFIG_PROPERTY = "config"; private static final String COLUMN_FAMILY_PROPERTY = "columnfamily"; private static final String JOIN_TIMEOUT_PROPERTY = "jointimeout"; private static final String JOIN_TIMEOUT_PROPERTY_DEFAULT = "30000"; /** Mutex for instantiating a single instance of the client. */ private static final Object MUTEX = new Object(); /** Use for tracking running thread counts so we know when to shutdown the client. */ private static int threadCount = 0; /** The client that's used for all threads. */ private static HBaseClient client; /** Print debug information to standard out. */ private boolean debug = false; /** The column family use for the workload. */ private byte[] columnFamilyBytes; /** Cache for the last table name/ID to avoid byte conversions. */ private String lastTable = ""; private byte[] lastTableBytes; private long joinTimeout; /** Whether or not to bypass the WAL for puts and deletes. */ private boolean durability = true; /** * If true, buffer mutations on the client. This is the default behavior for * AsyncHBase. For measuring insert/update/delete latencies, client side * buffering should be disabled. * * A single instance of this */ private boolean clientSideBuffering = false; @Override public void init() throws DBException { if (getProperties().getProperty(CLIENT_SIDE_BUFFERING_PROPERTY, "false") .toLowerCase().equals("true")) { clientSideBuffering = true; } if (getProperties().getProperty(DURABILITY_PROPERTY, "true") .toLowerCase().equals("false")) { durability = false; } final String columnFamily = getProperties().getProperty(COLUMN_FAMILY_PROPERTY); if (columnFamily == null || columnFamily.isEmpty()) { System.err.println("Error, must specify a columnfamily for HBase table"); throw new DBException("No columnfamily specified"); } columnFamilyBytes = columnFamily.getBytes(); if ((getProperties().getProperty("debug") != null) && (getProperties().getProperty("debug").compareTo("true") == 0)) { debug = true; } joinTimeout = Integer.parseInt(getProperties().getProperty( JOIN_TIMEOUT_PROPERTY, JOIN_TIMEOUT_PROPERTY_DEFAULT)); final boolean prefetchMeta = getProperties() .getProperty(PREFETCH_META_PROPERTY, "false") .toLowerCase().equals("true") ? true : false; try { synchronized (MUTEX) { ++threadCount; if (client == null) { final String configPath = getProperties().getProperty(CONFIG_PROPERTY); final Config config; if (configPath == null || configPath.isEmpty()) { config = new Config(); final Iterator<Entry<Object, Object>> iterator = getProperties() .entrySet().iterator(); while (iterator.hasNext()) { final Entry<Object, Object> property = iterator.next(); config.overrideConfig((String)property.getKey(), (String)property.getValue()); } } else { config = new Config(configPath); } client = new HBaseClient(config); // Terminate right now if table does not exist, since the client // will not propagate this error upstream once the workload // starts. String table = getProperties().getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); try { client.ensureTableExists(table).join(joinTimeout); } catch (InterruptedException e1) { Thread.currentThread().interrupt(); } catch (Exception e) { throw new DBException(e); } if (prefetchMeta) { try { if (debug) { System.out.println("Starting meta prefetch for table " + table); } client.prefetchMeta(table).join(joinTimeout); if (debug) { System.out.println("Completed meta prefetch for table " + table); } } catch (InterruptedException e) { System.err.println("Interrupted during prefetch"); Thread.currentThread().interrupt(); } catch (Exception e) { throw new DBException("Failed prefetch", e); } } } } } catch (IOException e) { throw new DBException("Failed instantiation of client", e); } } @Override public void cleanup() throws DBException { synchronized (MUTEX) { --threadCount; if (client != null && threadCount < 1) { try { if (debug) { System.out.println("Shutting down client"); } client.shutdown().joinUninterruptibly(joinTimeout); } catch (Exception e) { System.err.println("Failed to shutdown the AsyncHBase client " + "properly: " + e.getMessage()); } client = null; } } } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { setTable(table); final GetRequest get = new GetRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (fields != null) { get.qualifiers(getQualifierList(fields)); } try { if (debug) { System.out.println("Doing read from HBase columnfamily " + Bytes.pretty(columnFamilyBytes)); System.out.println("Doing read for key: " + key); } final ArrayList<KeyValue> row = client.get(get).join(joinTimeout); if (row == null || row.isEmpty()) { return Status.NOT_FOUND; } // got something so populate the results for (final KeyValue column : row) { result.put(new String(column.qualifier()), // TODO - do we need to clone this array? YCSB may keep it in memory // for a while which would mean the entire KV would hang out and won't // be GC'd. new ByteArrayByteIterator(column.value())); if (debug) { System.out.println( "Result for field: " + Bytes.pretty(column.qualifier()) + " is: " + Bytes.pretty(column.value())); } } return Status.OK; } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } return Status.ERROR; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {<FILL_FUNCTION_BODY>} @Override public Status update(String table, String key, Map<String, ByteIterator> values) { setTable(table); if (debug) { System.out.println("Setting up put for key: " + key); } final byte[][] qualifiers = new byte[values.size()][]; final byte[][] byteValues = new byte[values.size()][]; int idx = 0; for (final Entry<String, ByteIterator> entry : values.entrySet()) { qualifiers[idx] = entry.getKey().getBytes(); byteValues[idx++] = entry.getValue().toArray(); if (debug) { System.out.println("Adding field/value " + entry.getKey() + "/" + Bytes.pretty(entry.getValue().toArray()) + " to put request"); } } final PutRequest put = new PutRequest(lastTableBytes, key.getBytes(), columnFamilyBytes, qualifiers, byteValues); if (!durability) { put.setDurable(false); } if (!clientSideBuffering) { put.setBufferable(false); try { client.put(put).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } } else { // hooray! Asynchronous write. But without a callback and an async // YCSB call we don't know whether it succeeded or not client.put(put); } return Status.OK; } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { return update(table, key, values); } @Override public Status delete(String table, String key) { setTable(table); if (debug) { System.out.println("Doing delete for key: " + key); } final DeleteRequest delete = new DeleteRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (!durability) { delete.setDurable(false); } if (!clientSideBuffering) { delete.setBufferable(false); try { client.delete(delete).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } } else { // hooray! Asynchronous write. But without a callback and an async // YCSB call we don't know whether it succeeded or not client.delete(delete); } return Status.OK; } /** * Little helper to set the table byte array. If it's different than the last * table we reset the byte array. Otherwise we just use the existing array. * @param table The table we're operating against */ private void setTable(final String table) { if (!lastTable.equals(table)) { lastTable = table; lastTableBytes = table.getBytes(); } } /** * Little helper to build a qualifier byte array from a field set. * @param fields The fields to fetch. * @return The column qualifier byte arrays. */ private byte[][] getQualifierList(final Set<String> fields) { final byte[][] qualifiers = new byte[fields.size()][]; int idx = 0; for (final String field : fields) { qualifiers[idx++] = field.getBytes(); } return qualifiers; } }
setTable(table); final Scanner scanner = client.newScanner(lastTableBytes); scanner.setFamily(columnFamilyBytes); scanner.setStartKey(startkey.getBytes(UTF8_CHARSET)); // No end key... *sniff* if (fields != null) { scanner.setQualifiers(getQualifierList(fields)); } // no filters? *sniff* ArrayList<ArrayList<KeyValue>> rows = null; try { int numResults = 0; while ((rows = scanner.nextRows().join(joinTimeout)) != null) { for (final ArrayList<KeyValue> row : rows) { final HashMap<String, ByteIterator> rowResult = new HashMap<String, ByteIterator>(row.size()); for (final KeyValue column : row) { rowResult.put(new String(column.qualifier()), // TODO - do we need to clone this array? YCSB may keep it in memory // for a while which would mean the entire KV would hang out and won't // be GC'd. new ByteArrayByteIterator(column.value())); if (debug) { System.out.println("Got scan result for key: " + Bytes.pretty(column.key())); } } result.add(rowResult); numResults++; if (numResults >= recordcount) {// if hit recordcount, bail out break; } } } scanner.close().join(joinTimeout); return Status.OK; } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + startkey + ": " + e.getMessage()); return Status.ERROR; } return Status.ERROR;
brianfrankcooper_YCSB
YCSB/asynchbase/src/main/java/site/ycsb/db/AsyncHBaseClient.java
AsyncHBaseClient
update
class AsyncHBaseClient extends site.ycsb.DB { public static final Charset UTF8_CHARSET = Charset.forName("UTF8"); private static final String CLIENT_SIDE_BUFFERING_PROPERTY = "clientbuffering"; private static final String DURABILITY_PROPERTY = "durability"; private static final String PREFETCH_META_PROPERTY = "prefetchmeta"; private static final String CONFIG_PROPERTY = "config"; private static final String COLUMN_FAMILY_PROPERTY = "columnfamily"; private static final String JOIN_TIMEOUT_PROPERTY = "jointimeout"; private static final String JOIN_TIMEOUT_PROPERTY_DEFAULT = "30000"; /** Mutex for instantiating a single instance of the client. */ private static final Object MUTEX = new Object(); /** Use for tracking running thread counts so we know when to shutdown the client. */ private static int threadCount = 0; /** The client that's used for all threads. */ private static HBaseClient client; /** Print debug information to standard out. */ private boolean debug = false; /** The column family use for the workload. */ private byte[] columnFamilyBytes; /** Cache for the last table name/ID to avoid byte conversions. */ private String lastTable = ""; private byte[] lastTableBytes; private long joinTimeout; /** Whether or not to bypass the WAL for puts and deletes. */ private boolean durability = true; /** * If true, buffer mutations on the client. This is the default behavior for * AsyncHBase. For measuring insert/update/delete latencies, client side * buffering should be disabled. * * A single instance of this */ private boolean clientSideBuffering = false; @Override public void init() throws DBException { if (getProperties().getProperty(CLIENT_SIDE_BUFFERING_PROPERTY, "false") .toLowerCase().equals("true")) { clientSideBuffering = true; } if (getProperties().getProperty(DURABILITY_PROPERTY, "true") .toLowerCase().equals("false")) { durability = false; } final String columnFamily = getProperties().getProperty(COLUMN_FAMILY_PROPERTY); if (columnFamily == null || columnFamily.isEmpty()) { System.err.println("Error, must specify a columnfamily for HBase table"); throw new DBException("No columnfamily specified"); } columnFamilyBytes = columnFamily.getBytes(); if ((getProperties().getProperty("debug") != null) && (getProperties().getProperty("debug").compareTo("true") == 0)) { debug = true; } joinTimeout = Integer.parseInt(getProperties().getProperty( JOIN_TIMEOUT_PROPERTY, JOIN_TIMEOUT_PROPERTY_DEFAULT)); final boolean prefetchMeta = getProperties() .getProperty(PREFETCH_META_PROPERTY, "false") .toLowerCase().equals("true") ? true : false; try { synchronized (MUTEX) { ++threadCount; if (client == null) { final String configPath = getProperties().getProperty(CONFIG_PROPERTY); final Config config; if (configPath == null || configPath.isEmpty()) { config = new Config(); final Iterator<Entry<Object, Object>> iterator = getProperties() .entrySet().iterator(); while (iterator.hasNext()) { final Entry<Object, Object> property = iterator.next(); config.overrideConfig((String)property.getKey(), (String)property.getValue()); } } else { config = new Config(configPath); } client = new HBaseClient(config); // Terminate right now if table does not exist, since the client // will not propagate this error upstream once the workload // starts. String table = getProperties().getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); try { client.ensureTableExists(table).join(joinTimeout); } catch (InterruptedException e1) { Thread.currentThread().interrupt(); } catch (Exception e) { throw new DBException(e); } if (prefetchMeta) { try { if (debug) { System.out.println("Starting meta prefetch for table " + table); } client.prefetchMeta(table).join(joinTimeout); if (debug) { System.out.println("Completed meta prefetch for table " + table); } } catch (InterruptedException e) { System.err.println("Interrupted during prefetch"); Thread.currentThread().interrupt(); } catch (Exception e) { throw new DBException("Failed prefetch", e); } } } } } catch (IOException e) { throw new DBException("Failed instantiation of client", e); } } @Override public void cleanup() throws DBException { synchronized (MUTEX) { --threadCount; if (client != null && threadCount < 1) { try { if (debug) { System.out.println("Shutting down client"); } client.shutdown().joinUninterruptibly(joinTimeout); } catch (Exception e) { System.err.println("Failed to shutdown the AsyncHBase client " + "properly: " + e.getMessage()); } client = null; } } } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { setTable(table); final GetRequest get = new GetRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (fields != null) { get.qualifiers(getQualifierList(fields)); } try { if (debug) { System.out.println("Doing read from HBase columnfamily " + Bytes.pretty(columnFamilyBytes)); System.out.println("Doing read for key: " + key); } final ArrayList<KeyValue> row = client.get(get).join(joinTimeout); if (row == null || row.isEmpty()) { return Status.NOT_FOUND; } // got something so populate the results for (final KeyValue column : row) { result.put(new String(column.qualifier()), // TODO - do we need to clone this array? YCSB may keep it in memory // for a while which would mean the entire KV would hang out and won't // be GC'd. new ByteArrayByteIterator(column.value())); if (debug) { System.out.println( "Result for field: " + Bytes.pretty(column.qualifier()) + " is: " + Bytes.pretty(column.value())); } } return Status.OK; } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } return Status.ERROR; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { setTable(table); final Scanner scanner = client.newScanner(lastTableBytes); scanner.setFamily(columnFamilyBytes); scanner.setStartKey(startkey.getBytes(UTF8_CHARSET)); // No end key... *sniff* if (fields != null) { scanner.setQualifiers(getQualifierList(fields)); } // no filters? *sniff* ArrayList<ArrayList<KeyValue>> rows = null; try { int numResults = 0; while ((rows = scanner.nextRows().join(joinTimeout)) != null) { for (final ArrayList<KeyValue> row : rows) { final HashMap<String, ByteIterator> rowResult = new HashMap<String, ByteIterator>(row.size()); for (final KeyValue column : row) { rowResult.put(new String(column.qualifier()), // TODO - do we need to clone this array? YCSB may keep it in memory // for a while which would mean the entire KV would hang out and won't // be GC'd. new ByteArrayByteIterator(column.value())); if (debug) { System.out.println("Got scan result for key: " + Bytes.pretty(column.key())); } } result.add(rowResult); numResults++; if (numResults >= recordcount) {// if hit recordcount, bail out break; } } } scanner.close().join(joinTimeout); return Status.OK; } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + startkey + ": " + e.getMessage()); return Status.ERROR; } return Status.ERROR; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>} @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { return update(table, key, values); } @Override public Status delete(String table, String key) { setTable(table); if (debug) { System.out.println("Doing delete for key: " + key); } final DeleteRequest delete = new DeleteRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (!durability) { delete.setDurable(false); } if (!clientSideBuffering) { delete.setBufferable(false); try { client.delete(delete).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } } else { // hooray! Asynchronous write. But without a callback and an async // YCSB call we don't know whether it succeeded or not client.delete(delete); } return Status.OK; } /** * Little helper to set the table byte array. If it's different than the last * table we reset the byte array. Otherwise we just use the existing array. * @param table The table we're operating against */ private void setTable(final String table) { if (!lastTable.equals(table)) { lastTable = table; lastTableBytes = table.getBytes(); } } /** * Little helper to build a qualifier byte array from a field set. * @param fields The fields to fetch. * @return The column qualifier byte arrays. */ private byte[][] getQualifierList(final Set<String> fields) { final byte[][] qualifiers = new byte[fields.size()][]; int idx = 0; for (final String field : fields) { qualifiers[idx++] = field.getBytes(); } return qualifiers; } }
setTable(table); if (debug) { System.out.println("Setting up put for key: " + key); } final byte[][] qualifiers = new byte[values.size()][]; final byte[][] byteValues = new byte[values.size()][]; int idx = 0; for (final Entry<String, ByteIterator> entry : values.entrySet()) { qualifiers[idx] = entry.getKey().getBytes(); byteValues[idx++] = entry.getValue().toArray(); if (debug) { System.out.println("Adding field/value " + entry.getKey() + "/" + Bytes.pretty(entry.getValue().toArray()) + " to put request"); } } final PutRequest put = new PutRequest(lastTableBytes, key.getBytes(), columnFamilyBytes, qualifiers, byteValues); if (!durability) { put.setDurable(false); } if (!clientSideBuffering) { put.setBufferable(false); try { client.put(put).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } } else { // hooray! Asynchronous write. But without a callback and an async // YCSB call we don't know whether it succeeded or not client.put(put); } return Status.OK;
brianfrankcooper_YCSB
YCSB/asynchbase/src/main/java/site/ycsb/db/AsyncHBaseClient.java
AsyncHBaseClient
insert
class AsyncHBaseClient extends site.ycsb.DB { public static final Charset UTF8_CHARSET = Charset.forName("UTF8"); private static final String CLIENT_SIDE_BUFFERING_PROPERTY = "clientbuffering"; private static final String DURABILITY_PROPERTY = "durability"; private static final String PREFETCH_META_PROPERTY = "prefetchmeta"; private static final String CONFIG_PROPERTY = "config"; private static final String COLUMN_FAMILY_PROPERTY = "columnfamily"; private static final String JOIN_TIMEOUT_PROPERTY = "jointimeout"; private static final String JOIN_TIMEOUT_PROPERTY_DEFAULT = "30000"; /** Mutex for instantiating a single instance of the client. */ private static final Object MUTEX = new Object(); /** Use for tracking running thread counts so we know when to shutdown the client. */ private static int threadCount = 0; /** The client that's used for all threads. */ private static HBaseClient client; /** Print debug information to standard out. */ private boolean debug = false; /** The column family use for the workload. */ private byte[] columnFamilyBytes; /** Cache for the last table name/ID to avoid byte conversions. */ private String lastTable = ""; private byte[] lastTableBytes; private long joinTimeout; /** Whether or not to bypass the WAL for puts and deletes. */ private boolean durability = true; /** * If true, buffer mutations on the client. This is the default behavior for * AsyncHBase. For measuring insert/update/delete latencies, client side * buffering should be disabled. * * A single instance of this */ private boolean clientSideBuffering = false; @Override public void init() throws DBException { if (getProperties().getProperty(CLIENT_SIDE_BUFFERING_PROPERTY, "false") .toLowerCase().equals("true")) { clientSideBuffering = true; } if (getProperties().getProperty(DURABILITY_PROPERTY, "true") .toLowerCase().equals("false")) { durability = false; } final String columnFamily = getProperties().getProperty(COLUMN_FAMILY_PROPERTY); if (columnFamily == null || columnFamily.isEmpty()) { System.err.println("Error, must specify a columnfamily for HBase table"); throw new DBException("No columnfamily specified"); } columnFamilyBytes = columnFamily.getBytes(); if ((getProperties().getProperty("debug") != null) && (getProperties().getProperty("debug").compareTo("true") == 0)) { debug = true; } joinTimeout = Integer.parseInt(getProperties().getProperty( JOIN_TIMEOUT_PROPERTY, JOIN_TIMEOUT_PROPERTY_DEFAULT)); final boolean prefetchMeta = getProperties() .getProperty(PREFETCH_META_PROPERTY, "false") .toLowerCase().equals("true") ? true : false; try { synchronized (MUTEX) { ++threadCount; if (client == null) { final String configPath = getProperties().getProperty(CONFIG_PROPERTY); final Config config; if (configPath == null || configPath.isEmpty()) { config = new Config(); final Iterator<Entry<Object, Object>> iterator = getProperties() .entrySet().iterator(); while (iterator.hasNext()) { final Entry<Object, Object> property = iterator.next(); config.overrideConfig((String)property.getKey(), (String)property.getValue()); } } else { config = new Config(configPath); } client = new HBaseClient(config); // Terminate right now if table does not exist, since the client // will not propagate this error upstream once the workload // starts. String table = getProperties().getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); try { client.ensureTableExists(table).join(joinTimeout); } catch (InterruptedException e1) { Thread.currentThread().interrupt(); } catch (Exception e) { throw new DBException(e); } if (prefetchMeta) { try { if (debug) { System.out.println("Starting meta prefetch for table " + table); } client.prefetchMeta(table).join(joinTimeout); if (debug) { System.out.println("Completed meta prefetch for table " + table); } } catch (InterruptedException e) { System.err.println("Interrupted during prefetch"); Thread.currentThread().interrupt(); } catch (Exception e) { throw new DBException("Failed prefetch", e); } } } } } catch (IOException e) { throw new DBException("Failed instantiation of client", e); } } @Override public void cleanup() throws DBException { synchronized (MUTEX) { --threadCount; if (client != null && threadCount < 1) { try { if (debug) { System.out.println("Shutting down client"); } client.shutdown().joinUninterruptibly(joinTimeout); } catch (Exception e) { System.err.println("Failed to shutdown the AsyncHBase client " + "properly: " + e.getMessage()); } client = null; } } } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { setTable(table); final GetRequest get = new GetRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (fields != null) { get.qualifiers(getQualifierList(fields)); } try { if (debug) { System.out.println("Doing read from HBase columnfamily " + Bytes.pretty(columnFamilyBytes)); System.out.println("Doing read for key: " + key); } final ArrayList<KeyValue> row = client.get(get).join(joinTimeout); if (row == null || row.isEmpty()) { return Status.NOT_FOUND; } // got something so populate the results for (final KeyValue column : row) { result.put(new String(column.qualifier()), // TODO - do we need to clone this array? YCSB may keep it in memory // for a while which would mean the entire KV would hang out and won't // be GC'd. new ByteArrayByteIterator(column.value())); if (debug) { System.out.println( "Result for field: " + Bytes.pretty(column.qualifier()) + " is: " + Bytes.pretty(column.value())); } } return Status.OK; } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } return Status.ERROR; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { setTable(table); final Scanner scanner = client.newScanner(lastTableBytes); scanner.setFamily(columnFamilyBytes); scanner.setStartKey(startkey.getBytes(UTF8_CHARSET)); // No end key... *sniff* if (fields != null) { scanner.setQualifiers(getQualifierList(fields)); } // no filters? *sniff* ArrayList<ArrayList<KeyValue>> rows = null; try { int numResults = 0; while ((rows = scanner.nextRows().join(joinTimeout)) != null) { for (final ArrayList<KeyValue> row : rows) { final HashMap<String, ByteIterator> rowResult = new HashMap<String, ByteIterator>(row.size()); for (final KeyValue column : row) { rowResult.put(new String(column.qualifier()), // TODO - do we need to clone this array? YCSB may keep it in memory // for a while which would mean the entire KV would hang out and won't // be GC'd. new ByteArrayByteIterator(column.value())); if (debug) { System.out.println("Got scan result for key: " + Bytes.pretty(column.key())); } } result.add(rowResult); numResults++; if (numResults >= recordcount) {// if hit recordcount, bail out break; } } } scanner.close().join(joinTimeout); return Status.OK; } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + startkey + ": " + e.getMessage()); return Status.ERROR; } return Status.ERROR; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { setTable(table); if (debug) { System.out.println("Setting up put for key: " + key); } final byte[][] qualifiers = new byte[values.size()][]; final byte[][] byteValues = new byte[values.size()][]; int idx = 0; for (final Entry<String, ByteIterator> entry : values.entrySet()) { qualifiers[idx] = entry.getKey().getBytes(); byteValues[idx++] = entry.getValue().toArray(); if (debug) { System.out.println("Adding field/value " + entry.getKey() + "/" + Bytes.pretty(entry.getValue().toArray()) + " to put request"); } } final PutRequest put = new PutRequest(lastTableBytes, key.getBytes(), columnFamilyBytes, qualifiers, byteValues); if (!durability) { put.setDurable(false); } if (!clientSideBuffering) { put.setBufferable(false); try { client.put(put).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } } else { // hooray! Asynchronous write. But without a callback and an async // YCSB call we don't know whether it succeeded or not client.put(put); } return Status.OK; } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>} @Override public Status delete(String table, String key) { setTable(table); if (debug) { System.out.println("Doing delete for key: " + key); } final DeleteRequest delete = new DeleteRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (!durability) { delete.setDurable(false); } if (!clientSideBuffering) { delete.setBufferable(false); try { client.delete(delete).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } } else { // hooray! Asynchronous write. But without a callback and an async // YCSB call we don't know whether it succeeded or not client.delete(delete); } return Status.OK; } /** * Little helper to set the table byte array. If it's different than the last * table we reset the byte array. Otherwise we just use the existing array. * @param table The table we're operating against */ private void setTable(final String table) { if (!lastTable.equals(table)) { lastTable = table; lastTableBytes = table.getBytes(); } } /** * Little helper to build a qualifier byte array from a field set. * @param fields The fields to fetch. * @return The column qualifier byte arrays. */ private byte[][] getQualifierList(final Set<String> fields) { final byte[][] qualifiers = new byte[fields.size()][]; int idx = 0; for (final String field : fields) { qualifiers[idx++] = field.getBytes(); } return qualifiers; } }
return update(table, key, values);
brianfrankcooper_YCSB
YCSB/asynchbase/src/main/java/site/ycsb/db/AsyncHBaseClient.java
AsyncHBaseClient
delete
class AsyncHBaseClient extends site.ycsb.DB { public static final Charset UTF8_CHARSET = Charset.forName("UTF8"); private static final String CLIENT_SIDE_BUFFERING_PROPERTY = "clientbuffering"; private static final String DURABILITY_PROPERTY = "durability"; private static final String PREFETCH_META_PROPERTY = "prefetchmeta"; private static final String CONFIG_PROPERTY = "config"; private static final String COLUMN_FAMILY_PROPERTY = "columnfamily"; private static final String JOIN_TIMEOUT_PROPERTY = "jointimeout"; private static final String JOIN_TIMEOUT_PROPERTY_DEFAULT = "30000"; /** Mutex for instantiating a single instance of the client. */ private static final Object MUTEX = new Object(); /** Use for tracking running thread counts so we know when to shutdown the client. */ private static int threadCount = 0; /** The client that's used for all threads. */ private static HBaseClient client; /** Print debug information to standard out. */ private boolean debug = false; /** The column family use for the workload. */ private byte[] columnFamilyBytes; /** Cache for the last table name/ID to avoid byte conversions. */ private String lastTable = ""; private byte[] lastTableBytes; private long joinTimeout; /** Whether or not to bypass the WAL for puts and deletes. */ private boolean durability = true; /** * If true, buffer mutations on the client. This is the default behavior for * AsyncHBase. For measuring insert/update/delete latencies, client side * buffering should be disabled. * * A single instance of this */ private boolean clientSideBuffering = false; @Override public void init() throws DBException { if (getProperties().getProperty(CLIENT_SIDE_BUFFERING_PROPERTY, "false") .toLowerCase().equals("true")) { clientSideBuffering = true; } if (getProperties().getProperty(DURABILITY_PROPERTY, "true") .toLowerCase().equals("false")) { durability = false; } final String columnFamily = getProperties().getProperty(COLUMN_FAMILY_PROPERTY); if (columnFamily == null || columnFamily.isEmpty()) { System.err.println("Error, must specify a columnfamily for HBase table"); throw new DBException("No columnfamily specified"); } columnFamilyBytes = columnFamily.getBytes(); if ((getProperties().getProperty("debug") != null) && (getProperties().getProperty("debug").compareTo("true") == 0)) { debug = true; } joinTimeout = Integer.parseInt(getProperties().getProperty( JOIN_TIMEOUT_PROPERTY, JOIN_TIMEOUT_PROPERTY_DEFAULT)); final boolean prefetchMeta = getProperties() .getProperty(PREFETCH_META_PROPERTY, "false") .toLowerCase().equals("true") ? true : false; try { synchronized (MUTEX) { ++threadCount; if (client == null) { final String configPath = getProperties().getProperty(CONFIG_PROPERTY); final Config config; if (configPath == null || configPath.isEmpty()) { config = new Config(); final Iterator<Entry<Object, Object>> iterator = getProperties() .entrySet().iterator(); while (iterator.hasNext()) { final Entry<Object, Object> property = iterator.next(); config.overrideConfig((String)property.getKey(), (String)property.getValue()); } } else { config = new Config(configPath); } client = new HBaseClient(config); // Terminate right now if table does not exist, since the client // will not propagate this error upstream once the workload // starts. String table = getProperties().getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); try { client.ensureTableExists(table).join(joinTimeout); } catch (InterruptedException e1) { Thread.currentThread().interrupt(); } catch (Exception e) { throw new DBException(e); } if (prefetchMeta) { try { if (debug) { System.out.println("Starting meta prefetch for table " + table); } client.prefetchMeta(table).join(joinTimeout); if (debug) { System.out.println("Completed meta prefetch for table " + table); } } catch (InterruptedException e) { System.err.println("Interrupted during prefetch"); Thread.currentThread().interrupt(); } catch (Exception e) { throw new DBException("Failed prefetch", e); } } } } } catch (IOException e) { throw new DBException("Failed instantiation of client", e); } } @Override public void cleanup() throws DBException { synchronized (MUTEX) { --threadCount; if (client != null && threadCount < 1) { try { if (debug) { System.out.println("Shutting down client"); } client.shutdown().joinUninterruptibly(joinTimeout); } catch (Exception e) { System.err.println("Failed to shutdown the AsyncHBase client " + "properly: " + e.getMessage()); } client = null; } } } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { setTable(table); final GetRequest get = new GetRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (fields != null) { get.qualifiers(getQualifierList(fields)); } try { if (debug) { System.out.println("Doing read from HBase columnfamily " + Bytes.pretty(columnFamilyBytes)); System.out.println("Doing read for key: " + key); } final ArrayList<KeyValue> row = client.get(get).join(joinTimeout); if (row == null || row.isEmpty()) { return Status.NOT_FOUND; } // got something so populate the results for (final KeyValue column : row) { result.put(new String(column.qualifier()), // TODO - do we need to clone this array? YCSB may keep it in memory // for a while which would mean the entire KV would hang out and won't // be GC'd. new ByteArrayByteIterator(column.value())); if (debug) { System.out.println( "Result for field: " + Bytes.pretty(column.qualifier()) + " is: " + Bytes.pretty(column.value())); } } return Status.OK; } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } return Status.ERROR; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { setTable(table); final Scanner scanner = client.newScanner(lastTableBytes); scanner.setFamily(columnFamilyBytes); scanner.setStartKey(startkey.getBytes(UTF8_CHARSET)); // No end key... *sniff* if (fields != null) { scanner.setQualifiers(getQualifierList(fields)); } // no filters? *sniff* ArrayList<ArrayList<KeyValue>> rows = null; try { int numResults = 0; while ((rows = scanner.nextRows().join(joinTimeout)) != null) { for (final ArrayList<KeyValue> row : rows) { final HashMap<String, ByteIterator> rowResult = new HashMap<String, ByteIterator>(row.size()); for (final KeyValue column : row) { rowResult.put(new String(column.qualifier()), // TODO - do we need to clone this array? YCSB may keep it in memory // for a while which would mean the entire KV would hang out and won't // be GC'd. new ByteArrayByteIterator(column.value())); if (debug) { System.out.println("Got scan result for key: " + Bytes.pretty(column.key())); } } result.add(rowResult); numResults++; if (numResults >= recordcount) {// if hit recordcount, bail out break; } } } scanner.close().join(joinTimeout); return Status.OK; } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + startkey + ": " + e.getMessage()); return Status.ERROR; } return Status.ERROR; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { setTable(table); if (debug) { System.out.println("Setting up put for key: " + key); } final byte[][] qualifiers = new byte[values.size()][]; final byte[][] byteValues = new byte[values.size()][]; int idx = 0; for (final Entry<String, ByteIterator> entry : values.entrySet()) { qualifiers[idx] = entry.getKey().getBytes(); byteValues[idx++] = entry.getValue().toArray(); if (debug) { System.out.println("Adding field/value " + entry.getKey() + "/" + Bytes.pretty(entry.getValue().toArray()) + " to put request"); } } final PutRequest put = new PutRequest(lastTableBytes, key.getBytes(), columnFamilyBytes, qualifiers, byteValues); if (!durability) { put.setDurable(false); } if (!clientSideBuffering) { put.setBufferable(false); try { client.put(put).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } } else { // hooray! Asynchronous write. But without a callback and an async // YCSB call we don't know whether it succeeded or not client.put(put); } return Status.OK; } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { return update(table, key, values); } @Override public Status delete(String table, String key) {<FILL_FUNCTION_BODY>} /** * Little helper to set the table byte array. If it's different than the last * table we reset the byte array. Otherwise we just use the existing array. * @param table The table we're operating against */ private void setTable(final String table) { if (!lastTable.equals(table)) { lastTable = table; lastTableBytes = table.getBytes(); } } /** * Little helper to build a qualifier byte array from a field set. * @param fields The fields to fetch. * @return The column qualifier byte arrays. */ private byte[][] getQualifierList(final Set<String> fields) { final byte[][] qualifiers = new byte[fields.size()][]; int idx = 0; for (final String field : fields) { qualifiers[idx++] = field.getBytes(); } return qualifiers; } }
setTable(table); if (debug) { System.out.println("Doing delete for key: " + key); } final DeleteRequest delete = new DeleteRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (!durability) { delete.setDurable(false); } if (!clientSideBuffering) { delete.setBufferable(false); try { client.delete(delete).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } } else { // hooray! Asynchronous write. But without a callback and an async // YCSB call we don't know whether it succeeded or not client.delete(delete); } return Status.OK;
brianfrankcooper_YCSB
YCSB/asynchbase/src/main/java/site/ycsb/db/AsyncHBaseClient.java
AsyncHBaseClient
setTable
class AsyncHBaseClient extends site.ycsb.DB { public static final Charset UTF8_CHARSET = Charset.forName("UTF8"); private static final String CLIENT_SIDE_BUFFERING_PROPERTY = "clientbuffering"; private static final String DURABILITY_PROPERTY = "durability"; private static final String PREFETCH_META_PROPERTY = "prefetchmeta"; private static final String CONFIG_PROPERTY = "config"; private static final String COLUMN_FAMILY_PROPERTY = "columnfamily"; private static final String JOIN_TIMEOUT_PROPERTY = "jointimeout"; private static final String JOIN_TIMEOUT_PROPERTY_DEFAULT = "30000"; /** Mutex for instantiating a single instance of the client. */ private static final Object MUTEX = new Object(); /** Use for tracking running thread counts so we know when to shutdown the client. */ private static int threadCount = 0; /** The client that's used for all threads. */ private static HBaseClient client; /** Print debug information to standard out. */ private boolean debug = false; /** The column family use for the workload. */ private byte[] columnFamilyBytes; /** Cache for the last table name/ID to avoid byte conversions. */ private String lastTable = ""; private byte[] lastTableBytes; private long joinTimeout; /** Whether or not to bypass the WAL for puts and deletes. */ private boolean durability = true; /** * If true, buffer mutations on the client. This is the default behavior for * AsyncHBase. For measuring insert/update/delete latencies, client side * buffering should be disabled. * * A single instance of this */ private boolean clientSideBuffering = false; @Override public void init() throws DBException { if (getProperties().getProperty(CLIENT_SIDE_BUFFERING_PROPERTY, "false") .toLowerCase().equals("true")) { clientSideBuffering = true; } if (getProperties().getProperty(DURABILITY_PROPERTY, "true") .toLowerCase().equals("false")) { durability = false; } final String columnFamily = getProperties().getProperty(COLUMN_FAMILY_PROPERTY); if (columnFamily == null || columnFamily.isEmpty()) { System.err.println("Error, must specify a columnfamily for HBase table"); throw new DBException("No columnfamily specified"); } columnFamilyBytes = columnFamily.getBytes(); if ((getProperties().getProperty("debug") != null) && (getProperties().getProperty("debug").compareTo("true") == 0)) { debug = true; } joinTimeout = Integer.parseInt(getProperties().getProperty( JOIN_TIMEOUT_PROPERTY, JOIN_TIMEOUT_PROPERTY_DEFAULT)); final boolean prefetchMeta = getProperties() .getProperty(PREFETCH_META_PROPERTY, "false") .toLowerCase().equals("true") ? true : false; try { synchronized (MUTEX) { ++threadCount; if (client == null) { final String configPath = getProperties().getProperty(CONFIG_PROPERTY); final Config config; if (configPath == null || configPath.isEmpty()) { config = new Config(); final Iterator<Entry<Object, Object>> iterator = getProperties() .entrySet().iterator(); while (iterator.hasNext()) { final Entry<Object, Object> property = iterator.next(); config.overrideConfig((String)property.getKey(), (String)property.getValue()); } } else { config = new Config(configPath); } client = new HBaseClient(config); // Terminate right now if table does not exist, since the client // will not propagate this error upstream once the workload // starts. String table = getProperties().getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); try { client.ensureTableExists(table).join(joinTimeout); } catch (InterruptedException e1) { Thread.currentThread().interrupt(); } catch (Exception e) { throw new DBException(e); } if (prefetchMeta) { try { if (debug) { System.out.println("Starting meta prefetch for table " + table); } client.prefetchMeta(table).join(joinTimeout); if (debug) { System.out.println("Completed meta prefetch for table " + table); } } catch (InterruptedException e) { System.err.println("Interrupted during prefetch"); Thread.currentThread().interrupt(); } catch (Exception e) { throw new DBException("Failed prefetch", e); } } } } } catch (IOException e) { throw new DBException("Failed instantiation of client", e); } } @Override public void cleanup() throws DBException { synchronized (MUTEX) { --threadCount; if (client != null && threadCount < 1) { try { if (debug) { System.out.println("Shutting down client"); } client.shutdown().joinUninterruptibly(joinTimeout); } catch (Exception e) { System.err.println("Failed to shutdown the AsyncHBase client " + "properly: " + e.getMessage()); } client = null; } } } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { setTable(table); final GetRequest get = new GetRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (fields != null) { get.qualifiers(getQualifierList(fields)); } try { if (debug) { System.out.println("Doing read from HBase columnfamily " + Bytes.pretty(columnFamilyBytes)); System.out.println("Doing read for key: " + key); } final ArrayList<KeyValue> row = client.get(get).join(joinTimeout); if (row == null || row.isEmpty()) { return Status.NOT_FOUND; } // got something so populate the results for (final KeyValue column : row) { result.put(new String(column.qualifier()), // TODO - do we need to clone this array? YCSB may keep it in memory // for a while which would mean the entire KV would hang out and won't // be GC'd. new ByteArrayByteIterator(column.value())); if (debug) { System.out.println( "Result for field: " + Bytes.pretty(column.qualifier()) + " is: " + Bytes.pretty(column.value())); } } return Status.OK; } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } return Status.ERROR; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { setTable(table); final Scanner scanner = client.newScanner(lastTableBytes); scanner.setFamily(columnFamilyBytes); scanner.setStartKey(startkey.getBytes(UTF8_CHARSET)); // No end key... *sniff* if (fields != null) { scanner.setQualifiers(getQualifierList(fields)); } // no filters? *sniff* ArrayList<ArrayList<KeyValue>> rows = null; try { int numResults = 0; while ((rows = scanner.nextRows().join(joinTimeout)) != null) { for (final ArrayList<KeyValue> row : rows) { final HashMap<String, ByteIterator> rowResult = new HashMap<String, ByteIterator>(row.size()); for (final KeyValue column : row) { rowResult.put(new String(column.qualifier()), // TODO - do we need to clone this array? YCSB may keep it in memory // for a while which would mean the entire KV would hang out and won't // be GC'd. new ByteArrayByteIterator(column.value())); if (debug) { System.out.println("Got scan result for key: " + Bytes.pretty(column.key())); } } result.add(rowResult); numResults++; if (numResults >= recordcount) {// if hit recordcount, bail out break; } } } scanner.close().join(joinTimeout); return Status.OK; } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + startkey + ": " + e.getMessage()); return Status.ERROR; } return Status.ERROR; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { setTable(table); if (debug) { System.out.println("Setting up put for key: " + key); } final byte[][] qualifiers = new byte[values.size()][]; final byte[][] byteValues = new byte[values.size()][]; int idx = 0; for (final Entry<String, ByteIterator> entry : values.entrySet()) { qualifiers[idx] = entry.getKey().getBytes(); byteValues[idx++] = entry.getValue().toArray(); if (debug) { System.out.println("Adding field/value " + entry.getKey() + "/" + Bytes.pretty(entry.getValue().toArray()) + " to put request"); } } final PutRequest put = new PutRequest(lastTableBytes, key.getBytes(), columnFamilyBytes, qualifiers, byteValues); if (!durability) { put.setDurable(false); } if (!clientSideBuffering) { put.setBufferable(false); try { client.put(put).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } } else { // hooray! Asynchronous write. But without a callback and an async // YCSB call we don't know whether it succeeded or not client.put(put); } return Status.OK; } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { return update(table, key, values); } @Override public Status delete(String table, String key) { setTable(table); if (debug) { System.out.println("Doing delete for key: " + key); } final DeleteRequest delete = new DeleteRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (!durability) { delete.setDurable(false); } if (!clientSideBuffering) { delete.setBufferable(false); try { client.delete(delete).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } } else { // hooray! Asynchronous write. But without a callback and an async // YCSB call we don't know whether it succeeded or not client.delete(delete); } return Status.OK; } /** * Little helper to set the table byte array. If it's different than the last * table we reset the byte array. Otherwise we just use the existing array. * @param table The table we're operating against */ private void setTable(final String table) {<FILL_FUNCTION_BODY>} /** * Little helper to build a qualifier byte array from a field set. * @param fields The fields to fetch. * @return The column qualifier byte arrays. */ private byte[][] getQualifierList(final Set<String> fields) { final byte[][] qualifiers = new byte[fields.size()][]; int idx = 0; for (final String field : fields) { qualifiers[idx++] = field.getBytes(); } return qualifiers; } }
if (!lastTable.equals(table)) { lastTable = table; lastTableBytes = table.getBytes(); }
brianfrankcooper_YCSB
YCSB/asynchbase/src/main/java/site/ycsb/db/AsyncHBaseClient.java
AsyncHBaseClient
getQualifierList
class AsyncHBaseClient extends site.ycsb.DB { public static final Charset UTF8_CHARSET = Charset.forName("UTF8"); private static final String CLIENT_SIDE_BUFFERING_PROPERTY = "clientbuffering"; private static final String DURABILITY_PROPERTY = "durability"; private static final String PREFETCH_META_PROPERTY = "prefetchmeta"; private static final String CONFIG_PROPERTY = "config"; private static final String COLUMN_FAMILY_PROPERTY = "columnfamily"; private static final String JOIN_TIMEOUT_PROPERTY = "jointimeout"; private static final String JOIN_TIMEOUT_PROPERTY_DEFAULT = "30000"; /** Mutex for instantiating a single instance of the client. */ private static final Object MUTEX = new Object(); /** Use for tracking running thread counts so we know when to shutdown the client. */ private static int threadCount = 0; /** The client that's used for all threads. */ private static HBaseClient client; /** Print debug information to standard out. */ private boolean debug = false; /** The column family use for the workload. */ private byte[] columnFamilyBytes; /** Cache for the last table name/ID to avoid byte conversions. */ private String lastTable = ""; private byte[] lastTableBytes; private long joinTimeout; /** Whether or not to bypass the WAL for puts and deletes. */ private boolean durability = true; /** * If true, buffer mutations on the client. This is the default behavior for * AsyncHBase. For measuring insert/update/delete latencies, client side * buffering should be disabled. * * A single instance of this */ private boolean clientSideBuffering = false; @Override public void init() throws DBException { if (getProperties().getProperty(CLIENT_SIDE_BUFFERING_PROPERTY, "false") .toLowerCase().equals("true")) { clientSideBuffering = true; } if (getProperties().getProperty(DURABILITY_PROPERTY, "true") .toLowerCase().equals("false")) { durability = false; } final String columnFamily = getProperties().getProperty(COLUMN_FAMILY_PROPERTY); if (columnFamily == null || columnFamily.isEmpty()) { System.err.println("Error, must specify a columnfamily for HBase table"); throw new DBException("No columnfamily specified"); } columnFamilyBytes = columnFamily.getBytes(); if ((getProperties().getProperty("debug") != null) && (getProperties().getProperty("debug").compareTo("true") == 0)) { debug = true; } joinTimeout = Integer.parseInt(getProperties().getProperty( JOIN_TIMEOUT_PROPERTY, JOIN_TIMEOUT_PROPERTY_DEFAULT)); final boolean prefetchMeta = getProperties() .getProperty(PREFETCH_META_PROPERTY, "false") .toLowerCase().equals("true") ? true : false; try { synchronized (MUTEX) { ++threadCount; if (client == null) { final String configPath = getProperties().getProperty(CONFIG_PROPERTY); final Config config; if (configPath == null || configPath.isEmpty()) { config = new Config(); final Iterator<Entry<Object, Object>> iterator = getProperties() .entrySet().iterator(); while (iterator.hasNext()) { final Entry<Object, Object> property = iterator.next(); config.overrideConfig((String)property.getKey(), (String)property.getValue()); } } else { config = new Config(configPath); } client = new HBaseClient(config); // Terminate right now if table does not exist, since the client // will not propagate this error upstream once the workload // starts. String table = getProperties().getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); try { client.ensureTableExists(table).join(joinTimeout); } catch (InterruptedException e1) { Thread.currentThread().interrupt(); } catch (Exception e) { throw new DBException(e); } if (prefetchMeta) { try { if (debug) { System.out.println("Starting meta prefetch for table " + table); } client.prefetchMeta(table).join(joinTimeout); if (debug) { System.out.println("Completed meta prefetch for table " + table); } } catch (InterruptedException e) { System.err.println("Interrupted during prefetch"); Thread.currentThread().interrupt(); } catch (Exception e) { throw new DBException("Failed prefetch", e); } } } } } catch (IOException e) { throw new DBException("Failed instantiation of client", e); } } @Override public void cleanup() throws DBException { synchronized (MUTEX) { --threadCount; if (client != null && threadCount < 1) { try { if (debug) { System.out.println("Shutting down client"); } client.shutdown().joinUninterruptibly(joinTimeout); } catch (Exception e) { System.err.println("Failed to shutdown the AsyncHBase client " + "properly: " + e.getMessage()); } client = null; } } } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { setTable(table); final GetRequest get = new GetRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (fields != null) { get.qualifiers(getQualifierList(fields)); } try { if (debug) { System.out.println("Doing read from HBase columnfamily " + Bytes.pretty(columnFamilyBytes)); System.out.println("Doing read for key: " + key); } final ArrayList<KeyValue> row = client.get(get).join(joinTimeout); if (row == null || row.isEmpty()) { return Status.NOT_FOUND; } // got something so populate the results for (final KeyValue column : row) { result.put(new String(column.qualifier()), // TODO - do we need to clone this array? YCSB may keep it in memory // for a while which would mean the entire KV would hang out and won't // be GC'd. new ByteArrayByteIterator(column.value())); if (debug) { System.out.println( "Result for field: " + Bytes.pretty(column.qualifier()) + " is: " + Bytes.pretty(column.value())); } } return Status.OK; } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } return Status.ERROR; } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { setTable(table); final Scanner scanner = client.newScanner(lastTableBytes); scanner.setFamily(columnFamilyBytes); scanner.setStartKey(startkey.getBytes(UTF8_CHARSET)); // No end key... *sniff* if (fields != null) { scanner.setQualifiers(getQualifierList(fields)); } // no filters? *sniff* ArrayList<ArrayList<KeyValue>> rows = null; try { int numResults = 0; while ((rows = scanner.nextRows().join(joinTimeout)) != null) { for (final ArrayList<KeyValue> row : rows) { final HashMap<String, ByteIterator> rowResult = new HashMap<String, ByteIterator>(row.size()); for (final KeyValue column : row) { rowResult.put(new String(column.qualifier()), // TODO - do we need to clone this array? YCSB may keep it in memory // for a while which would mean the entire KV would hang out and won't // be GC'd. new ByteArrayByteIterator(column.value())); if (debug) { System.out.println("Got scan result for key: " + Bytes.pretty(column.key())); } } result.add(rowResult); numResults++; if (numResults >= recordcount) {// if hit recordcount, bail out break; } } } scanner.close().join(joinTimeout); return Status.OK; } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + startkey + ": " + e.getMessage()); return Status.ERROR; } return Status.ERROR; } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { setTable(table); if (debug) { System.out.println("Setting up put for key: " + key); } final byte[][] qualifiers = new byte[values.size()][]; final byte[][] byteValues = new byte[values.size()][]; int idx = 0; for (final Entry<String, ByteIterator> entry : values.entrySet()) { qualifiers[idx] = entry.getKey().getBytes(); byteValues[idx++] = entry.getValue().toArray(); if (debug) { System.out.println("Adding field/value " + entry.getKey() + "/" + Bytes.pretty(entry.getValue().toArray()) + " to put request"); } } final PutRequest put = new PutRequest(lastTableBytes, key.getBytes(), columnFamilyBytes, qualifiers, byteValues); if (!durability) { put.setDurable(false); } if (!clientSideBuffering) { put.setBufferable(false); try { client.put(put).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } } else { // hooray! Asynchronous write. But without a callback and an async // YCSB call we don't know whether it succeeded or not client.put(put); } return Status.OK; } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { return update(table, key, values); } @Override public Status delete(String table, String key) { setTable(table); if (debug) { System.out.println("Doing delete for key: " + key); } final DeleteRequest delete = new DeleteRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (!durability) { delete.setDurable(false); } if (!clientSideBuffering) { delete.setBufferable(false); try { client.delete(delete).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); Thread.currentThread().interrupt(); } catch (Exception e) { System.err.println("Failure reading from row with key " + key + ": " + e.getMessage()); return Status.ERROR; } } else { // hooray! Asynchronous write. But without a callback and an async // YCSB call we don't know whether it succeeded or not client.delete(delete); } return Status.OK; } /** * Little helper to set the table byte array. If it's different than the last * table we reset the byte array. Otherwise we just use the existing array. * @param table The table we're operating against */ private void setTable(final String table) { if (!lastTable.equals(table)) { lastTable = table; lastTableBytes = table.getBytes(); } } /** * Little helper to build a qualifier byte array from a field set. * @param fields The fields to fetch. * @return The column qualifier byte arrays. */ private byte[][] getQualifierList(final Set<String> fields) {<FILL_FUNCTION_BODY>} }
final byte[][] qualifiers = new byte[fields.size()][]; int idx = 0; for (final String field : fields) { qualifiers[idx++] = field.getBytes(); } return qualifiers;
brianfrankcooper_YCSB
YCSB/azurecosmos/src/main/java/site/ycsb/db/AzureCosmosClient.java
AzureCosmosClient
init
class AzureCosmosClient extends DB { protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); // Default configuration values private static final ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.SESSION; private static final String DEFAULT_DATABASE_NAME = "ycsb"; private static final boolean DEFAULT_USE_GATEWAY = false; private static final boolean DEFAULT_USE_UPSERT = false; private static final int DEFAULT_MAX_DEGREE_OF_PARALLELISM = -1; private static final int DEFAULT_MAX_BUFFERED_ITEM_COUNT = 0; private static final int DEFAULT_PREFERRED_PAGE_SIZE = -1; public static final int NUM_UPDATE_ATTEMPTS = 4; private static final boolean DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG = false; private static final String DEFAULT_USER_AGENT = "azurecosmos-ycsb"; private static final Logger LOGGER = LoggerFactory.getLogger(AzureCosmosClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static CosmosClient client; private static CosmosDatabase database; private static String databaseName; private static boolean useUpsert; private static int maxDegreeOfParallelism; private static int maxBufferedItemCount; private static int preferredPageSize; private static boolean includeExceptionStackInLog; private static Map<String, CosmosContainer> containerCache; private static String userAgent; @Override public void init() throws DBException {<FILL_FUNCTION_BODY>} private void initAzureCosmosClient() throws DBException { // Connection properties String primaryKey = this.getStringProperty("azurecosmos.primaryKey", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing primary key required to connect to the database."); } String uri = this.getStringProperty("azurecosmos.uri", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing uri required to connect to the database."); } AzureCosmosClient.userAgent = this.getStringProperty("azurecosmos.userAgent", DEFAULT_USER_AGENT); AzureCosmosClient.useUpsert = this.getBooleanProperty("azurecosmos.useUpsert", DEFAULT_USE_UPSERT); AzureCosmosClient.databaseName = this.getStringProperty("azurecosmos.databaseName", DEFAULT_DATABASE_NAME); AzureCosmosClient.maxDegreeOfParallelism = this.getIntProperty("azurecosmos.maxDegreeOfParallelism", DEFAULT_MAX_DEGREE_OF_PARALLELISM); AzureCosmosClient.maxBufferedItemCount = this.getIntProperty("azurecosmos.maxBufferedItemCount", DEFAULT_MAX_BUFFERED_ITEM_COUNT); AzureCosmosClient.preferredPageSize = this.getIntProperty("azurecosmos.preferredPageSize", DEFAULT_PREFERRED_PAGE_SIZE); AzureCosmosClient.includeExceptionStackInLog = this.getBooleanProperty("azurecosmos.includeExceptionStackInLog", DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG); ConsistencyLevel consistencyLevel = ConsistencyLevel.valueOf( this.getStringProperty("azurecosmos.consistencyLevel", DEFAULT_CONSISTENCY_LEVEL.toString().toUpperCase())); boolean useGateway = this.getBooleanProperty("azurecosmos.useGateway", DEFAULT_USE_GATEWAY); ThrottlingRetryOptions retryOptions = new ThrottlingRetryOptions(); int maxRetryAttemptsOnThrottledRequests = this.getIntProperty("azurecosmos.maxRetryAttemptsOnThrottledRequests", -1); if (maxRetryAttemptsOnThrottledRequests != -1) { retryOptions.setMaxRetryAttemptsOnThrottledRequests(maxRetryAttemptsOnThrottledRequests); } // Direct connection config options. DirectConnectionConfig directConnectionConfig = new DirectConnectionConfig(); int directMaxConnectionsPerEndpoint = this.getIntProperty("azurecosmos.directMaxConnectionsPerEndpoint", -1); if (directMaxConnectionsPerEndpoint != -1) { directConnectionConfig.setMaxConnectionsPerEndpoint(directMaxConnectionsPerEndpoint); } int directIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.directIdleConnectionTimeoutInSeconds", -1); if (directIdleConnectionTimeoutInSeconds != -1) { directConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(directIdleConnectionTimeoutInSeconds)); } // Gateway connection config options. GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); int gatewayMaxConnectionPoolSize = this.getIntProperty("azurecosmos.gatewayMaxConnectionPoolSize", -1); if (gatewayMaxConnectionPoolSize != -1) { gatewayConnectionConfig.setMaxConnectionPoolSize(gatewayMaxConnectionPoolSize); } int gatewayIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.gatewayIdleConnectionTimeoutInSeconds", -1); if (gatewayIdleConnectionTimeoutInSeconds != -1) { gatewayConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(gatewayIdleConnectionTimeoutInSeconds)); } try { LOGGER.info( "Creating Cosmos DB client {}, useGateway={}, consistencyLevel={}," + " maxRetryAttemptsOnThrottledRequests={}, maxRetryWaitTimeInSeconds={}" + " useUpsert={}, maxDegreeOfParallelism={}, maxBufferedItemCount={}, preferredPageSize={}", uri, useGateway, consistencyLevel.toString(), retryOptions.getMaxRetryAttemptsOnThrottledRequests(), retryOptions.getMaxRetryWaitTime().toMillis() / 1000, AzureCosmosClient.useUpsert, AzureCosmosClient.maxDegreeOfParallelism, AzureCosmosClient.maxBufferedItemCount, AzureCosmosClient.preferredPageSize); CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(uri).key(primaryKey) .throttlingRetryOptions(retryOptions).consistencyLevel(consistencyLevel).userAgentSuffix(userAgent); if (useGateway) { builder = builder.gatewayMode(gatewayConnectionConfig); } else { builder = builder.directMode(directConnectionConfig); } AzureCosmosClient.client = builder.buildClient(); LOGGER.info("Azure Cosmos DB connection created to {}", uri); } catch (IllegalArgumentException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException("Illegal argument passed in. Check the format of your parameters.", e); } AzureCosmosClient.containerCache = new ConcurrentHashMap<>(); // Verify the database exists try { AzureCosmosClient.database = AzureCosmosClient.client.getDatabase(databaseName); AzureCosmosClient.database.read(); } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException( "Invalid database name (" + AzureCosmosClient.databaseName + ") or failed to read database.", e); } } private String getStringProperty(String propertyName, String defaultValue) { return getProperties().getProperty(propertyName, defaultValue); } private boolean getBooleanProperty(String propertyName, boolean defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } return Boolean.parseBoolean(stringVal); } private int getIntProperty(String propertyName, int defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } try { return Integer.parseInt(stringVal); } catch (NumberFormatException e) { return defaultValue; } } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { synchronized (INIT_COUNT) { if (INIT_COUNT.decrementAndGet() <= 0 && AzureCosmosClient.client != null) { try { AzureCosmosClient.client.close(); } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Could not close DocumentClient", e); } finally { AzureCosmosClient.client = null; } } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); ObjectNode node = response.getItem(); Map<String, String> stringResults = new HashMap<>(node.size()); if (fields == null) { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } StringByteIterator.putAllAsByteIterators(result, stringResults); } else { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); if (fields.contains(pair.getKey())) { stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } } StringByteIterator.putAllAsByteIterators(result, stringResults); } return Status.OK; } catch (CosmosException e) { LOGGER.error("Failed to read key {} in collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); return Status.NOT_FOUND; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set * field/value pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { CosmosQueryRequestOptions queryOptions = new CosmosQueryRequestOptions(); queryOptions.setMaxDegreeOfParallelism(AzureCosmosClient.maxDegreeOfParallelism); queryOptions.setMaxBufferedItemCount(AzureCosmosClient.maxBufferedItemCount); CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } List<SqlParameter> paramList = new ArrayList<>(); paramList.add(new SqlParameter("@startkey", startkey)); SqlQuerySpec querySpec = new SqlQuerySpec( this.createSelectTop(fields, recordcount) + " FROM root r WHERE r.id >= @startkey", paramList); CosmosPagedIterable<ObjectNode> pagedIterable = container.queryItems(querySpec, queryOptions, ObjectNode.class); Iterator<FeedResponse<ObjectNode>> pageIterator = pagedIterable .iterableByPage(AzureCosmosClient.preferredPageSize).iterator(); while (pageIterator.hasNext()) { List<ObjectNode> pageDocs = pageIterator.next().getResults(); for (ObjectNode doc : pageDocs) { Map<String, String> stringResults = new HashMap<>(doc.size()); Iterator<Map.Entry<String, JsonNode>> nodeIterator = doc.fields(); while (nodeIterator.hasNext()) { Entry<String, JsonNode> pair = nodeIterator.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } HashMap<String, ByteIterator> byteResults = new HashMap<>(doc.size()); StringByteIterator.putAllAsByteIterators(byteResults, stringResults); result.add(byteResults); } } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to query key {} from collection {} in database {}", startkey, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key, * overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { String readEtag = ""; // Azure Cosmos DB does not have patch support. Until then, we need to read // the document, update it, and then write it back. // This could be made more efficient by using a stored procedure // and doing the read/modify write on the server side. Perhaps // that will be a future improvement. for (int attempt = 0; attempt < NUM_UPDATE_ATTEMPTS; attempt++) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); readEtag = response.getETag(); ObjectNode node = response.getItem(); for (Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); requestOptions.setIfMatchETag(readEtag); PartitionKey pk = new PartitionKey(key); container.replaceItem(node, key, pk, requestOptions); return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to update key {} to collection {} in database {} on attempt {}", key, table, AzureCosmosClient.databaseName, attempt, e); } } return Status.ERROR; } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Insert key: {} into table: {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } PartitionKey pk = new PartitionKey(key); ObjectNode node = OBJECT_MAPPER.createObjectNode(); node.put("id", key); for (Map.Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } if (AzureCosmosClient.useUpsert) { container.upsertItem(node, pk, new CosmosItemRequestOptions()); } else { container.createItem(node, pk, new CosmosItemRequestOptions()); } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to insert key {} to collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } @Override public Status delete(String table, String key) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Delete key {} from table {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } container.deleteItem(key, new PartitionKey(key), new CosmosItemRequestOptions()); return Status.OK; } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to delete key {} in collection {}", key, table, e); } return Status.ERROR; } private String createSelectTop(Set<String> fields, int top) { if (fields == null) { return "SELECT TOP " + top + " * "; } else { StringBuilder result = new StringBuilder("SELECT TOP ").append(top).append(" "); int initLength = result.length(); for (String field : fields) { if (result.length() != initLength) { result.append(", "); } result.append("r['").append(field).append("'] "); } return result.toString(); } } }
INIT_COUNT.incrementAndGet(); synchronized (INIT_COUNT) { if (client != null) { return; } try { initAzureCosmosClient(); } catch (Exception e) { throw new DBException(e); } }
brianfrankcooper_YCSB
YCSB/azurecosmos/src/main/java/site/ycsb/db/AzureCosmosClient.java
AzureCosmosClient
initAzureCosmosClient
class AzureCosmosClient extends DB { protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); // Default configuration values private static final ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.SESSION; private static final String DEFAULT_DATABASE_NAME = "ycsb"; private static final boolean DEFAULT_USE_GATEWAY = false; private static final boolean DEFAULT_USE_UPSERT = false; private static final int DEFAULT_MAX_DEGREE_OF_PARALLELISM = -1; private static final int DEFAULT_MAX_BUFFERED_ITEM_COUNT = 0; private static final int DEFAULT_PREFERRED_PAGE_SIZE = -1; public static final int NUM_UPDATE_ATTEMPTS = 4; private static final boolean DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG = false; private static final String DEFAULT_USER_AGENT = "azurecosmos-ycsb"; private static final Logger LOGGER = LoggerFactory.getLogger(AzureCosmosClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static CosmosClient client; private static CosmosDatabase database; private static String databaseName; private static boolean useUpsert; private static int maxDegreeOfParallelism; private static int maxBufferedItemCount; private static int preferredPageSize; private static boolean includeExceptionStackInLog; private static Map<String, CosmosContainer> containerCache; private static String userAgent; @Override public void init() throws DBException { INIT_COUNT.incrementAndGet(); synchronized (INIT_COUNT) { if (client != null) { return; } try { initAzureCosmosClient(); } catch (Exception e) { throw new DBException(e); } } } private void initAzureCosmosClient() throws DBException {<FILL_FUNCTION_BODY>} private String getStringProperty(String propertyName, String defaultValue) { return getProperties().getProperty(propertyName, defaultValue); } private boolean getBooleanProperty(String propertyName, boolean defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } return Boolean.parseBoolean(stringVal); } private int getIntProperty(String propertyName, int defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } try { return Integer.parseInt(stringVal); } catch (NumberFormatException e) { return defaultValue; } } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { synchronized (INIT_COUNT) { if (INIT_COUNT.decrementAndGet() <= 0 && AzureCosmosClient.client != null) { try { AzureCosmosClient.client.close(); } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Could not close DocumentClient", e); } finally { AzureCosmosClient.client = null; } } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); ObjectNode node = response.getItem(); Map<String, String> stringResults = new HashMap<>(node.size()); if (fields == null) { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } StringByteIterator.putAllAsByteIterators(result, stringResults); } else { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); if (fields.contains(pair.getKey())) { stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } } StringByteIterator.putAllAsByteIterators(result, stringResults); } return Status.OK; } catch (CosmosException e) { LOGGER.error("Failed to read key {} in collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); return Status.NOT_FOUND; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set * field/value pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { CosmosQueryRequestOptions queryOptions = new CosmosQueryRequestOptions(); queryOptions.setMaxDegreeOfParallelism(AzureCosmosClient.maxDegreeOfParallelism); queryOptions.setMaxBufferedItemCount(AzureCosmosClient.maxBufferedItemCount); CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } List<SqlParameter> paramList = new ArrayList<>(); paramList.add(new SqlParameter("@startkey", startkey)); SqlQuerySpec querySpec = new SqlQuerySpec( this.createSelectTop(fields, recordcount) + " FROM root r WHERE r.id >= @startkey", paramList); CosmosPagedIterable<ObjectNode> pagedIterable = container.queryItems(querySpec, queryOptions, ObjectNode.class); Iterator<FeedResponse<ObjectNode>> pageIterator = pagedIterable .iterableByPage(AzureCosmosClient.preferredPageSize).iterator(); while (pageIterator.hasNext()) { List<ObjectNode> pageDocs = pageIterator.next().getResults(); for (ObjectNode doc : pageDocs) { Map<String, String> stringResults = new HashMap<>(doc.size()); Iterator<Map.Entry<String, JsonNode>> nodeIterator = doc.fields(); while (nodeIterator.hasNext()) { Entry<String, JsonNode> pair = nodeIterator.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } HashMap<String, ByteIterator> byteResults = new HashMap<>(doc.size()); StringByteIterator.putAllAsByteIterators(byteResults, stringResults); result.add(byteResults); } } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to query key {} from collection {} in database {}", startkey, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key, * overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { String readEtag = ""; // Azure Cosmos DB does not have patch support. Until then, we need to read // the document, update it, and then write it back. // This could be made more efficient by using a stored procedure // and doing the read/modify write on the server side. Perhaps // that will be a future improvement. for (int attempt = 0; attempt < NUM_UPDATE_ATTEMPTS; attempt++) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); readEtag = response.getETag(); ObjectNode node = response.getItem(); for (Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); requestOptions.setIfMatchETag(readEtag); PartitionKey pk = new PartitionKey(key); container.replaceItem(node, key, pk, requestOptions); return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to update key {} to collection {} in database {} on attempt {}", key, table, AzureCosmosClient.databaseName, attempt, e); } } return Status.ERROR; } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Insert key: {} into table: {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } PartitionKey pk = new PartitionKey(key); ObjectNode node = OBJECT_MAPPER.createObjectNode(); node.put("id", key); for (Map.Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } if (AzureCosmosClient.useUpsert) { container.upsertItem(node, pk, new CosmosItemRequestOptions()); } else { container.createItem(node, pk, new CosmosItemRequestOptions()); } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to insert key {} to collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } @Override public Status delete(String table, String key) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Delete key {} from table {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } container.deleteItem(key, new PartitionKey(key), new CosmosItemRequestOptions()); return Status.OK; } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to delete key {} in collection {}", key, table, e); } return Status.ERROR; } private String createSelectTop(Set<String> fields, int top) { if (fields == null) { return "SELECT TOP " + top + " * "; } else { StringBuilder result = new StringBuilder("SELECT TOP ").append(top).append(" "); int initLength = result.length(); for (String field : fields) { if (result.length() != initLength) { result.append(", "); } result.append("r['").append(field).append("'] "); } return result.toString(); } } }
// Connection properties String primaryKey = this.getStringProperty("azurecosmos.primaryKey", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing primary key required to connect to the database."); } String uri = this.getStringProperty("azurecosmos.uri", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing uri required to connect to the database."); } AzureCosmosClient.userAgent = this.getStringProperty("azurecosmos.userAgent", DEFAULT_USER_AGENT); AzureCosmosClient.useUpsert = this.getBooleanProperty("azurecosmos.useUpsert", DEFAULT_USE_UPSERT); AzureCosmosClient.databaseName = this.getStringProperty("azurecosmos.databaseName", DEFAULT_DATABASE_NAME); AzureCosmosClient.maxDegreeOfParallelism = this.getIntProperty("azurecosmos.maxDegreeOfParallelism", DEFAULT_MAX_DEGREE_OF_PARALLELISM); AzureCosmosClient.maxBufferedItemCount = this.getIntProperty("azurecosmos.maxBufferedItemCount", DEFAULT_MAX_BUFFERED_ITEM_COUNT); AzureCosmosClient.preferredPageSize = this.getIntProperty("azurecosmos.preferredPageSize", DEFAULT_PREFERRED_PAGE_SIZE); AzureCosmosClient.includeExceptionStackInLog = this.getBooleanProperty("azurecosmos.includeExceptionStackInLog", DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG); ConsistencyLevel consistencyLevel = ConsistencyLevel.valueOf( this.getStringProperty("azurecosmos.consistencyLevel", DEFAULT_CONSISTENCY_LEVEL.toString().toUpperCase())); boolean useGateway = this.getBooleanProperty("azurecosmos.useGateway", DEFAULT_USE_GATEWAY); ThrottlingRetryOptions retryOptions = new ThrottlingRetryOptions(); int maxRetryAttemptsOnThrottledRequests = this.getIntProperty("azurecosmos.maxRetryAttemptsOnThrottledRequests", -1); if (maxRetryAttemptsOnThrottledRequests != -1) { retryOptions.setMaxRetryAttemptsOnThrottledRequests(maxRetryAttemptsOnThrottledRequests); } // Direct connection config options. DirectConnectionConfig directConnectionConfig = new DirectConnectionConfig(); int directMaxConnectionsPerEndpoint = this.getIntProperty("azurecosmos.directMaxConnectionsPerEndpoint", -1); if (directMaxConnectionsPerEndpoint != -1) { directConnectionConfig.setMaxConnectionsPerEndpoint(directMaxConnectionsPerEndpoint); } int directIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.directIdleConnectionTimeoutInSeconds", -1); if (directIdleConnectionTimeoutInSeconds != -1) { directConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(directIdleConnectionTimeoutInSeconds)); } // Gateway connection config options. GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); int gatewayMaxConnectionPoolSize = this.getIntProperty("azurecosmos.gatewayMaxConnectionPoolSize", -1); if (gatewayMaxConnectionPoolSize != -1) { gatewayConnectionConfig.setMaxConnectionPoolSize(gatewayMaxConnectionPoolSize); } int gatewayIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.gatewayIdleConnectionTimeoutInSeconds", -1); if (gatewayIdleConnectionTimeoutInSeconds != -1) { gatewayConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(gatewayIdleConnectionTimeoutInSeconds)); } try { LOGGER.info( "Creating Cosmos DB client {}, useGateway={}, consistencyLevel={}," + " maxRetryAttemptsOnThrottledRequests={}, maxRetryWaitTimeInSeconds={}" + " useUpsert={}, maxDegreeOfParallelism={}, maxBufferedItemCount={}, preferredPageSize={}", uri, useGateway, consistencyLevel.toString(), retryOptions.getMaxRetryAttemptsOnThrottledRequests(), retryOptions.getMaxRetryWaitTime().toMillis() / 1000, AzureCosmosClient.useUpsert, AzureCosmosClient.maxDegreeOfParallelism, AzureCosmosClient.maxBufferedItemCount, AzureCosmosClient.preferredPageSize); CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(uri).key(primaryKey) .throttlingRetryOptions(retryOptions).consistencyLevel(consistencyLevel).userAgentSuffix(userAgent); if (useGateway) { builder = builder.gatewayMode(gatewayConnectionConfig); } else { builder = builder.directMode(directConnectionConfig); } AzureCosmosClient.client = builder.buildClient(); LOGGER.info("Azure Cosmos DB connection created to {}", uri); } catch (IllegalArgumentException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException("Illegal argument passed in. Check the format of your parameters.", e); } AzureCosmosClient.containerCache = new ConcurrentHashMap<>(); // Verify the database exists try { AzureCosmosClient.database = AzureCosmosClient.client.getDatabase(databaseName); AzureCosmosClient.database.read(); } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException( "Invalid database name (" + AzureCosmosClient.databaseName + ") or failed to read database.", e); }
brianfrankcooper_YCSB
YCSB/azurecosmos/src/main/java/site/ycsb/db/AzureCosmosClient.java
AzureCosmosClient
getStringProperty
class AzureCosmosClient extends DB { protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); // Default configuration values private static final ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.SESSION; private static final String DEFAULT_DATABASE_NAME = "ycsb"; private static final boolean DEFAULT_USE_GATEWAY = false; private static final boolean DEFAULT_USE_UPSERT = false; private static final int DEFAULT_MAX_DEGREE_OF_PARALLELISM = -1; private static final int DEFAULT_MAX_BUFFERED_ITEM_COUNT = 0; private static final int DEFAULT_PREFERRED_PAGE_SIZE = -1; public static final int NUM_UPDATE_ATTEMPTS = 4; private static final boolean DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG = false; private static final String DEFAULT_USER_AGENT = "azurecosmos-ycsb"; private static final Logger LOGGER = LoggerFactory.getLogger(AzureCosmosClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static CosmosClient client; private static CosmosDatabase database; private static String databaseName; private static boolean useUpsert; private static int maxDegreeOfParallelism; private static int maxBufferedItemCount; private static int preferredPageSize; private static boolean includeExceptionStackInLog; private static Map<String, CosmosContainer> containerCache; private static String userAgent; @Override public void init() throws DBException { INIT_COUNT.incrementAndGet(); synchronized (INIT_COUNT) { if (client != null) { return; } try { initAzureCosmosClient(); } catch (Exception e) { throw new DBException(e); } } } private void initAzureCosmosClient() throws DBException { // Connection properties String primaryKey = this.getStringProperty("azurecosmos.primaryKey", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing primary key required to connect to the database."); } String uri = this.getStringProperty("azurecosmos.uri", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing uri required to connect to the database."); } AzureCosmosClient.userAgent = this.getStringProperty("azurecosmos.userAgent", DEFAULT_USER_AGENT); AzureCosmosClient.useUpsert = this.getBooleanProperty("azurecosmos.useUpsert", DEFAULT_USE_UPSERT); AzureCosmosClient.databaseName = this.getStringProperty("azurecosmos.databaseName", DEFAULT_DATABASE_NAME); AzureCosmosClient.maxDegreeOfParallelism = this.getIntProperty("azurecosmos.maxDegreeOfParallelism", DEFAULT_MAX_DEGREE_OF_PARALLELISM); AzureCosmosClient.maxBufferedItemCount = this.getIntProperty("azurecosmos.maxBufferedItemCount", DEFAULT_MAX_BUFFERED_ITEM_COUNT); AzureCosmosClient.preferredPageSize = this.getIntProperty("azurecosmos.preferredPageSize", DEFAULT_PREFERRED_PAGE_SIZE); AzureCosmosClient.includeExceptionStackInLog = this.getBooleanProperty("azurecosmos.includeExceptionStackInLog", DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG); ConsistencyLevel consistencyLevel = ConsistencyLevel.valueOf( this.getStringProperty("azurecosmos.consistencyLevel", DEFAULT_CONSISTENCY_LEVEL.toString().toUpperCase())); boolean useGateway = this.getBooleanProperty("azurecosmos.useGateway", DEFAULT_USE_GATEWAY); ThrottlingRetryOptions retryOptions = new ThrottlingRetryOptions(); int maxRetryAttemptsOnThrottledRequests = this.getIntProperty("azurecosmos.maxRetryAttemptsOnThrottledRequests", -1); if (maxRetryAttemptsOnThrottledRequests != -1) { retryOptions.setMaxRetryAttemptsOnThrottledRequests(maxRetryAttemptsOnThrottledRequests); } // Direct connection config options. DirectConnectionConfig directConnectionConfig = new DirectConnectionConfig(); int directMaxConnectionsPerEndpoint = this.getIntProperty("azurecosmos.directMaxConnectionsPerEndpoint", -1); if (directMaxConnectionsPerEndpoint != -1) { directConnectionConfig.setMaxConnectionsPerEndpoint(directMaxConnectionsPerEndpoint); } int directIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.directIdleConnectionTimeoutInSeconds", -1); if (directIdleConnectionTimeoutInSeconds != -1) { directConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(directIdleConnectionTimeoutInSeconds)); } // Gateway connection config options. GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); int gatewayMaxConnectionPoolSize = this.getIntProperty("azurecosmos.gatewayMaxConnectionPoolSize", -1); if (gatewayMaxConnectionPoolSize != -1) { gatewayConnectionConfig.setMaxConnectionPoolSize(gatewayMaxConnectionPoolSize); } int gatewayIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.gatewayIdleConnectionTimeoutInSeconds", -1); if (gatewayIdleConnectionTimeoutInSeconds != -1) { gatewayConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(gatewayIdleConnectionTimeoutInSeconds)); } try { LOGGER.info( "Creating Cosmos DB client {}, useGateway={}, consistencyLevel={}," + " maxRetryAttemptsOnThrottledRequests={}, maxRetryWaitTimeInSeconds={}" + " useUpsert={}, maxDegreeOfParallelism={}, maxBufferedItemCount={}, preferredPageSize={}", uri, useGateway, consistencyLevel.toString(), retryOptions.getMaxRetryAttemptsOnThrottledRequests(), retryOptions.getMaxRetryWaitTime().toMillis() / 1000, AzureCosmosClient.useUpsert, AzureCosmosClient.maxDegreeOfParallelism, AzureCosmosClient.maxBufferedItemCount, AzureCosmosClient.preferredPageSize); CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(uri).key(primaryKey) .throttlingRetryOptions(retryOptions).consistencyLevel(consistencyLevel).userAgentSuffix(userAgent); if (useGateway) { builder = builder.gatewayMode(gatewayConnectionConfig); } else { builder = builder.directMode(directConnectionConfig); } AzureCosmosClient.client = builder.buildClient(); LOGGER.info("Azure Cosmos DB connection created to {}", uri); } catch (IllegalArgumentException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException("Illegal argument passed in. Check the format of your parameters.", e); } AzureCosmosClient.containerCache = new ConcurrentHashMap<>(); // Verify the database exists try { AzureCosmosClient.database = AzureCosmosClient.client.getDatabase(databaseName); AzureCosmosClient.database.read(); } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException( "Invalid database name (" + AzureCosmosClient.databaseName + ") or failed to read database.", e); } } private String getStringProperty(String propertyName, String defaultValue) {<FILL_FUNCTION_BODY>} private boolean getBooleanProperty(String propertyName, boolean defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } return Boolean.parseBoolean(stringVal); } private int getIntProperty(String propertyName, int defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } try { return Integer.parseInt(stringVal); } catch (NumberFormatException e) { return defaultValue; } } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { synchronized (INIT_COUNT) { if (INIT_COUNT.decrementAndGet() <= 0 && AzureCosmosClient.client != null) { try { AzureCosmosClient.client.close(); } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Could not close DocumentClient", e); } finally { AzureCosmosClient.client = null; } } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); ObjectNode node = response.getItem(); Map<String, String> stringResults = new HashMap<>(node.size()); if (fields == null) { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } StringByteIterator.putAllAsByteIterators(result, stringResults); } else { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); if (fields.contains(pair.getKey())) { stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } } StringByteIterator.putAllAsByteIterators(result, stringResults); } return Status.OK; } catch (CosmosException e) { LOGGER.error("Failed to read key {} in collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); return Status.NOT_FOUND; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set * field/value pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { CosmosQueryRequestOptions queryOptions = new CosmosQueryRequestOptions(); queryOptions.setMaxDegreeOfParallelism(AzureCosmosClient.maxDegreeOfParallelism); queryOptions.setMaxBufferedItemCount(AzureCosmosClient.maxBufferedItemCount); CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } List<SqlParameter> paramList = new ArrayList<>(); paramList.add(new SqlParameter("@startkey", startkey)); SqlQuerySpec querySpec = new SqlQuerySpec( this.createSelectTop(fields, recordcount) + " FROM root r WHERE r.id >= @startkey", paramList); CosmosPagedIterable<ObjectNode> pagedIterable = container.queryItems(querySpec, queryOptions, ObjectNode.class); Iterator<FeedResponse<ObjectNode>> pageIterator = pagedIterable .iterableByPage(AzureCosmosClient.preferredPageSize).iterator(); while (pageIterator.hasNext()) { List<ObjectNode> pageDocs = pageIterator.next().getResults(); for (ObjectNode doc : pageDocs) { Map<String, String> stringResults = new HashMap<>(doc.size()); Iterator<Map.Entry<String, JsonNode>> nodeIterator = doc.fields(); while (nodeIterator.hasNext()) { Entry<String, JsonNode> pair = nodeIterator.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } HashMap<String, ByteIterator> byteResults = new HashMap<>(doc.size()); StringByteIterator.putAllAsByteIterators(byteResults, stringResults); result.add(byteResults); } } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to query key {} from collection {} in database {}", startkey, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key, * overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { String readEtag = ""; // Azure Cosmos DB does not have patch support. Until then, we need to read // the document, update it, and then write it back. // This could be made more efficient by using a stored procedure // and doing the read/modify write on the server side. Perhaps // that will be a future improvement. for (int attempt = 0; attempt < NUM_UPDATE_ATTEMPTS; attempt++) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); readEtag = response.getETag(); ObjectNode node = response.getItem(); for (Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); requestOptions.setIfMatchETag(readEtag); PartitionKey pk = new PartitionKey(key); container.replaceItem(node, key, pk, requestOptions); return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to update key {} to collection {} in database {} on attempt {}", key, table, AzureCosmosClient.databaseName, attempt, e); } } return Status.ERROR; } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Insert key: {} into table: {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } PartitionKey pk = new PartitionKey(key); ObjectNode node = OBJECT_MAPPER.createObjectNode(); node.put("id", key); for (Map.Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } if (AzureCosmosClient.useUpsert) { container.upsertItem(node, pk, new CosmosItemRequestOptions()); } else { container.createItem(node, pk, new CosmosItemRequestOptions()); } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to insert key {} to collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } @Override public Status delete(String table, String key) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Delete key {} from table {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } container.deleteItem(key, new PartitionKey(key), new CosmosItemRequestOptions()); return Status.OK; } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to delete key {} in collection {}", key, table, e); } return Status.ERROR; } private String createSelectTop(Set<String> fields, int top) { if (fields == null) { return "SELECT TOP " + top + " * "; } else { StringBuilder result = new StringBuilder("SELECT TOP ").append(top).append(" "); int initLength = result.length(); for (String field : fields) { if (result.length() != initLength) { result.append(", "); } result.append("r['").append(field).append("'] "); } return result.toString(); } } }
return getProperties().getProperty(propertyName, defaultValue);
brianfrankcooper_YCSB
YCSB/azurecosmos/src/main/java/site/ycsb/db/AzureCosmosClient.java
AzureCosmosClient
getBooleanProperty
class AzureCosmosClient extends DB { protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); // Default configuration values private static final ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.SESSION; private static final String DEFAULT_DATABASE_NAME = "ycsb"; private static final boolean DEFAULT_USE_GATEWAY = false; private static final boolean DEFAULT_USE_UPSERT = false; private static final int DEFAULT_MAX_DEGREE_OF_PARALLELISM = -1; private static final int DEFAULT_MAX_BUFFERED_ITEM_COUNT = 0; private static final int DEFAULT_PREFERRED_PAGE_SIZE = -1; public static final int NUM_UPDATE_ATTEMPTS = 4; private static final boolean DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG = false; private static final String DEFAULT_USER_AGENT = "azurecosmos-ycsb"; private static final Logger LOGGER = LoggerFactory.getLogger(AzureCosmosClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static CosmosClient client; private static CosmosDatabase database; private static String databaseName; private static boolean useUpsert; private static int maxDegreeOfParallelism; private static int maxBufferedItemCount; private static int preferredPageSize; private static boolean includeExceptionStackInLog; private static Map<String, CosmosContainer> containerCache; private static String userAgent; @Override public void init() throws DBException { INIT_COUNT.incrementAndGet(); synchronized (INIT_COUNT) { if (client != null) { return; } try { initAzureCosmosClient(); } catch (Exception e) { throw new DBException(e); } } } private void initAzureCosmosClient() throws DBException { // Connection properties String primaryKey = this.getStringProperty("azurecosmos.primaryKey", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing primary key required to connect to the database."); } String uri = this.getStringProperty("azurecosmos.uri", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing uri required to connect to the database."); } AzureCosmosClient.userAgent = this.getStringProperty("azurecosmos.userAgent", DEFAULT_USER_AGENT); AzureCosmosClient.useUpsert = this.getBooleanProperty("azurecosmos.useUpsert", DEFAULT_USE_UPSERT); AzureCosmosClient.databaseName = this.getStringProperty("azurecosmos.databaseName", DEFAULT_DATABASE_NAME); AzureCosmosClient.maxDegreeOfParallelism = this.getIntProperty("azurecosmos.maxDegreeOfParallelism", DEFAULT_MAX_DEGREE_OF_PARALLELISM); AzureCosmosClient.maxBufferedItemCount = this.getIntProperty("azurecosmos.maxBufferedItemCount", DEFAULT_MAX_BUFFERED_ITEM_COUNT); AzureCosmosClient.preferredPageSize = this.getIntProperty("azurecosmos.preferredPageSize", DEFAULT_PREFERRED_PAGE_SIZE); AzureCosmosClient.includeExceptionStackInLog = this.getBooleanProperty("azurecosmos.includeExceptionStackInLog", DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG); ConsistencyLevel consistencyLevel = ConsistencyLevel.valueOf( this.getStringProperty("azurecosmos.consistencyLevel", DEFAULT_CONSISTENCY_LEVEL.toString().toUpperCase())); boolean useGateway = this.getBooleanProperty("azurecosmos.useGateway", DEFAULT_USE_GATEWAY); ThrottlingRetryOptions retryOptions = new ThrottlingRetryOptions(); int maxRetryAttemptsOnThrottledRequests = this.getIntProperty("azurecosmos.maxRetryAttemptsOnThrottledRequests", -1); if (maxRetryAttemptsOnThrottledRequests != -1) { retryOptions.setMaxRetryAttemptsOnThrottledRequests(maxRetryAttemptsOnThrottledRequests); } // Direct connection config options. DirectConnectionConfig directConnectionConfig = new DirectConnectionConfig(); int directMaxConnectionsPerEndpoint = this.getIntProperty("azurecosmos.directMaxConnectionsPerEndpoint", -1); if (directMaxConnectionsPerEndpoint != -1) { directConnectionConfig.setMaxConnectionsPerEndpoint(directMaxConnectionsPerEndpoint); } int directIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.directIdleConnectionTimeoutInSeconds", -1); if (directIdleConnectionTimeoutInSeconds != -1) { directConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(directIdleConnectionTimeoutInSeconds)); } // Gateway connection config options. GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); int gatewayMaxConnectionPoolSize = this.getIntProperty("azurecosmos.gatewayMaxConnectionPoolSize", -1); if (gatewayMaxConnectionPoolSize != -1) { gatewayConnectionConfig.setMaxConnectionPoolSize(gatewayMaxConnectionPoolSize); } int gatewayIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.gatewayIdleConnectionTimeoutInSeconds", -1); if (gatewayIdleConnectionTimeoutInSeconds != -1) { gatewayConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(gatewayIdleConnectionTimeoutInSeconds)); } try { LOGGER.info( "Creating Cosmos DB client {}, useGateway={}, consistencyLevel={}," + " maxRetryAttemptsOnThrottledRequests={}, maxRetryWaitTimeInSeconds={}" + " useUpsert={}, maxDegreeOfParallelism={}, maxBufferedItemCount={}, preferredPageSize={}", uri, useGateway, consistencyLevel.toString(), retryOptions.getMaxRetryAttemptsOnThrottledRequests(), retryOptions.getMaxRetryWaitTime().toMillis() / 1000, AzureCosmosClient.useUpsert, AzureCosmosClient.maxDegreeOfParallelism, AzureCosmosClient.maxBufferedItemCount, AzureCosmosClient.preferredPageSize); CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(uri).key(primaryKey) .throttlingRetryOptions(retryOptions).consistencyLevel(consistencyLevel).userAgentSuffix(userAgent); if (useGateway) { builder = builder.gatewayMode(gatewayConnectionConfig); } else { builder = builder.directMode(directConnectionConfig); } AzureCosmosClient.client = builder.buildClient(); LOGGER.info("Azure Cosmos DB connection created to {}", uri); } catch (IllegalArgumentException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException("Illegal argument passed in. Check the format of your parameters.", e); } AzureCosmosClient.containerCache = new ConcurrentHashMap<>(); // Verify the database exists try { AzureCosmosClient.database = AzureCosmosClient.client.getDatabase(databaseName); AzureCosmosClient.database.read(); } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException( "Invalid database name (" + AzureCosmosClient.databaseName + ") or failed to read database.", e); } } private String getStringProperty(String propertyName, String defaultValue) { return getProperties().getProperty(propertyName, defaultValue); } private boolean getBooleanProperty(String propertyName, boolean defaultValue) {<FILL_FUNCTION_BODY>} private int getIntProperty(String propertyName, int defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } try { return Integer.parseInt(stringVal); } catch (NumberFormatException e) { return defaultValue; } } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { synchronized (INIT_COUNT) { if (INIT_COUNT.decrementAndGet() <= 0 && AzureCosmosClient.client != null) { try { AzureCosmosClient.client.close(); } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Could not close DocumentClient", e); } finally { AzureCosmosClient.client = null; } } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); ObjectNode node = response.getItem(); Map<String, String> stringResults = new HashMap<>(node.size()); if (fields == null) { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } StringByteIterator.putAllAsByteIterators(result, stringResults); } else { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); if (fields.contains(pair.getKey())) { stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } } StringByteIterator.putAllAsByteIterators(result, stringResults); } return Status.OK; } catch (CosmosException e) { LOGGER.error("Failed to read key {} in collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); return Status.NOT_FOUND; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set * field/value pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { CosmosQueryRequestOptions queryOptions = new CosmosQueryRequestOptions(); queryOptions.setMaxDegreeOfParallelism(AzureCosmosClient.maxDegreeOfParallelism); queryOptions.setMaxBufferedItemCount(AzureCosmosClient.maxBufferedItemCount); CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } List<SqlParameter> paramList = new ArrayList<>(); paramList.add(new SqlParameter("@startkey", startkey)); SqlQuerySpec querySpec = new SqlQuerySpec( this.createSelectTop(fields, recordcount) + " FROM root r WHERE r.id >= @startkey", paramList); CosmosPagedIterable<ObjectNode> pagedIterable = container.queryItems(querySpec, queryOptions, ObjectNode.class); Iterator<FeedResponse<ObjectNode>> pageIterator = pagedIterable .iterableByPage(AzureCosmosClient.preferredPageSize).iterator(); while (pageIterator.hasNext()) { List<ObjectNode> pageDocs = pageIterator.next().getResults(); for (ObjectNode doc : pageDocs) { Map<String, String> stringResults = new HashMap<>(doc.size()); Iterator<Map.Entry<String, JsonNode>> nodeIterator = doc.fields(); while (nodeIterator.hasNext()) { Entry<String, JsonNode> pair = nodeIterator.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } HashMap<String, ByteIterator> byteResults = new HashMap<>(doc.size()); StringByteIterator.putAllAsByteIterators(byteResults, stringResults); result.add(byteResults); } } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to query key {} from collection {} in database {}", startkey, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key, * overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { String readEtag = ""; // Azure Cosmos DB does not have patch support. Until then, we need to read // the document, update it, and then write it back. // This could be made more efficient by using a stored procedure // and doing the read/modify write on the server side. Perhaps // that will be a future improvement. for (int attempt = 0; attempt < NUM_UPDATE_ATTEMPTS; attempt++) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); readEtag = response.getETag(); ObjectNode node = response.getItem(); for (Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); requestOptions.setIfMatchETag(readEtag); PartitionKey pk = new PartitionKey(key); container.replaceItem(node, key, pk, requestOptions); return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to update key {} to collection {} in database {} on attempt {}", key, table, AzureCosmosClient.databaseName, attempt, e); } } return Status.ERROR; } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Insert key: {} into table: {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } PartitionKey pk = new PartitionKey(key); ObjectNode node = OBJECT_MAPPER.createObjectNode(); node.put("id", key); for (Map.Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } if (AzureCosmosClient.useUpsert) { container.upsertItem(node, pk, new CosmosItemRequestOptions()); } else { container.createItem(node, pk, new CosmosItemRequestOptions()); } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to insert key {} to collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } @Override public Status delete(String table, String key) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Delete key {} from table {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } container.deleteItem(key, new PartitionKey(key), new CosmosItemRequestOptions()); return Status.OK; } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to delete key {} in collection {}", key, table, e); } return Status.ERROR; } private String createSelectTop(Set<String> fields, int top) { if (fields == null) { return "SELECT TOP " + top + " * "; } else { StringBuilder result = new StringBuilder("SELECT TOP ").append(top).append(" "); int initLength = result.length(); for (String field : fields) { if (result.length() != initLength) { result.append(", "); } result.append("r['").append(field).append("'] "); } return result.toString(); } } }
String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } return Boolean.parseBoolean(stringVal);
brianfrankcooper_YCSB
YCSB/azurecosmos/src/main/java/site/ycsb/db/AzureCosmosClient.java
AzureCosmosClient
getIntProperty
class AzureCosmosClient extends DB { protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); // Default configuration values private static final ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.SESSION; private static final String DEFAULT_DATABASE_NAME = "ycsb"; private static final boolean DEFAULT_USE_GATEWAY = false; private static final boolean DEFAULT_USE_UPSERT = false; private static final int DEFAULT_MAX_DEGREE_OF_PARALLELISM = -1; private static final int DEFAULT_MAX_BUFFERED_ITEM_COUNT = 0; private static final int DEFAULT_PREFERRED_PAGE_SIZE = -1; public static final int NUM_UPDATE_ATTEMPTS = 4; private static final boolean DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG = false; private static final String DEFAULT_USER_AGENT = "azurecosmos-ycsb"; private static final Logger LOGGER = LoggerFactory.getLogger(AzureCosmosClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static CosmosClient client; private static CosmosDatabase database; private static String databaseName; private static boolean useUpsert; private static int maxDegreeOfParallelism; private static int maxBufferedItemCount; private static int preferredPageSize; private static boolean includeExceptionStackInLog; private static Map<String, CosmosContainer> containerCache; private static String userAgent; @Override public void init() throws DBException { INIT_COUNT.incrementAndGet(); synchronized (INIT_COUNT) { if (client != null) { return; } try { initAzureCosmosClient(); } catch (Exception e) { throw new DBException(e); } } } private void initAzureCosmosClient() throws DBException { // Connection properties String primaryKey = this.getStringProperty("azurecosmos.primaryKey", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing primary key required to connect to the database."); } String uri = this.getStringProperty("azurecosmos.uri", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing uri required to connect to the database."); } AzureCosmosClient.userAgent = this.getStringProperty("azurecosmos.userAgent", DEFAULT_USER_AGENT); AzureCosmosClient.useUpsert = this.getBooleanProperty("azurecosmos.useUpsert", DEFAULT_USE_UPSERT); AzureCosmosClient.databaseName = this.getStringProperty("azurecosmos.databaseName", DEFAULT_DATABASE_NAME); AzureCosmosClient.maxDegreeOfParallelism = this.getIntProperty("azurecosmos.maxDegreeOfParallelism", DEFAULT_MAX_DEGREE_OF_PARALLELISM); AzureCosmosClient.maxBufferedItemCount = this.getIntProperty("azurecosmos.maxBufferedItemCount", DEFAULT_MAX_BUFFERED_ITEM_COUNT); AzureCosmosClient.preferredPageSize = this.getIntProperty("azurecosmos.preferredPageSize", DEFAULT_PREFERRED_PAGE_SIZE); AzureCosmosClient.includeExceptionStackInLog = this.getBooleanProperty("azurecosmos.includeExceptionStackInLog", DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG); ConsistencyLevel consistencyLevel = ConsistencyLevel.valueOf( this.getStringProperty("azurecosmos.consistencyLevel", DEFAULT_CONSISTENCY_LEVEL.toString().toUpperCase())); boolean useGateway = this.getBooleanProperty("azurecosmos.useGateway", DEFAULT_USE_GATEWAY); ThrottlingRetryOptions retryOptions = new ThrottlingRetryOptions(); int maxRetryAttemptsOnThrottledRequests = this.getIntProperty("azurecosmos.maxRetryAttemptsOnThrottledRequests", -1); if (maxRetryAttemptsOnThrottledRequests != -1) { retryOptions.setMaxRetryAttemptsOnThrottledRequests(maxRetryAttemptsOnThrottledRequests); } // Direct connection config options. DirectConnectionConfig directConnectionConfig = new DirectConnectionConfig(); int directMaxConnectionsPerEndpoint = this.getIntProperty("azurecosmos.directMaxConnectionsPerEndpoint", -1); if (directMaxConnectionsPerEndpoint != -1) { directConnectionConfig.setMaxConnectionsPerEndpoint(directMaxConnectionsPerEndpoint); } int directIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.directIdleConnectionTimeoutInSeconds", -1); if (directIdleConnectionTimeoutInSeconds != -1) { directConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(directIdleConnectionTimeoutInSeconds)); } // Gateway connection config options. GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); int gatewayMaxConnectionPoolSize = this.getIntProperty("azurecosmos.gatewayMaxConnectionPoolSize", -1); if (gatewayMaxConnectionPoolSize != -1) { gatewayConnectionConfig.setMaxConnectionPoolSize(gatewayMaxConnectionPoolSize); } int gatewayIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.gatewayIdleConnectionTimeoutInSeconds", -1); if (gatewayIdleConnectionTimeoutInSeconds != -1) { gatewayConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(gatewayIdleConnectionTimeoutInSeconds)); } try { LOGGER.info( "Creating Cosmos DB client {}, useGateway={}, consistencyLevel={}," + " maxRetryAttemptsOnThrottledRequests={}, maxRetryWaitTimeInSeconds={}" + " useUpsert={}, maxDegreeOfParallelism={}, maxBufferedItemCount={}, preferredPageSize={}", uri, useGateway, consistencyLevel.toString(), retryOptions.getMaxRetryAttemptsOnThrottledRequests(), retryOptions.getMaxRetryWaitTime().toMillis() / 1000, AzureCosmosClient.useUpsert, AzureCosmosClient.maxDegreeOfParallelism, AzureCosmosClient.maxBufferedItemCount, AzureCosmosClient.preferredPageSize); CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(uri).key(primaryKey) .throttlingRetryOptions(retryOptions).consistencyLevel(consistencyLevel).userAgentSuffix(userAgent); if (useGateway) { builder = builder.gatewayMode(gatewayConnectionConfig); } else { builder = builder.directMode(directConnectionConfig); } AzureCosmosClient.client = builder.buildClient(); LOGGER.info("Azure Cosmos DB connection created to {}", uri); } catch (IllegalArgumentException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException("Illegal argument passed in. Check the format of your parameters.", e); } AzureCosmosClient.containerCache = new ConcurrentHashMap<>(); // Verify the database exists try { AzureCosmosClient.database = AzureCosmosClient.client.getDatabase(databaseName); AzureCosmosClient.database.read(); } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException( "Invalid database name (" + AzureCosmosClient.databaseName + ") or failed to read database.", e); } } private String getStringProperty(String propertyName, String defaultValue) { return getProperties().getProperty(propertyName, defaultValue); } private boolean getBooleanProperty(String propertyName, boolean defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } return Boolean.parseBoolean(stringVal); } private int getIntProperty(String propertyName, int defaultValue) {<FILL_FUNCTION_BODY>} /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { synchronized (INIT_COUNT) { if (INIT_COUNT.decrementAndGet() <= 0 && AzureCosmosClient.client != null) { try { AzureCosmosClient.client.close(); } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Could not close DocumentClient", e); } finally { AzureCosmosClient.client = null; } } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); ObjectNode node = response.getItem(); Map<String, String> stringResults = new HashMap<>(node.size()); if (fields == null) { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } StringByteIterator.putAllAsByteIterators(result, stringResults); } else { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); if (fields.contains(pair.getKey())) { stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } } StringByteIterator.putAllAsByteIterators(result, stringResults); } return Status.OK; } catch (CosmosException e) { LOGGER.error("Failed to read key {} in collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); return Status.NOT_FOUND; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set * field/value pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { CosmosQueryRequestOptions queryOptions = new CosmosQueryRequestOptions(); queryOptions.setMaxDegreeOfParallelism(AzureCosmosClient.maxDegreeOfParallelism); queryOptions.setMaxBufferedItemCount(AzureCosmosClient.maxBufferedItemCount); CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } List<SqlParameter> paramList = new ArrayList<>(); paramList.add(new SqlParameter("@startkey", startkey)); SqlQuerySpec querySpec = new SqlQuerySpec( this.createSelectTop(fields, recordcount) + " FROM root r WHERE r.id >= @startkey", paramList); CosmosPagedIterable<ObjectNode> pagedIterable = container.queryItems(querySpec, queryOptions, ObjectNode.class); Iterator<FeedResponse<ObjectNode>> pageIterator = pagedIterable .iterableByPage(AzureCosmosClient.preferredPageSize).iterator(); while (pageIterator.hasNext()) { List<ObjectNode> pageDocs = pageIterator.next().getResults(); for (ObjectNode doc : pageDocs) { Map<String, String> stringResults = new HashMap<>(doc.size()); Iterator<Map.Entry<String, JsonNode>> nodeIterator = doc.fields(); while (nodeIterator.hasNext()) { Entry<String, JsonNode> pair = nodeIterator.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } HashMap<String, ByteIterator> byteResults = new HashMap<>(doc.size()); StringByteIterator.putAllAsByteIterators(byteResults, stringResults); result.add(byteResults); } } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to query key {} from collection {} in database {}", startkey, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key, * overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { String readEtag = ""; // Azure Cosmos DB does not have patch support. Until then, we need to read // the document, update it, and then write it back. // This could be made more efficient by using a stored procedure // and doing the read/modify write on the server side. Perhaps // that will be a future improvement. for (int attempt = 0; attempt < NUM_UPDATE_ATTEMPTS; attempt++) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); readEtag = response.getETag(); ObjectNode node = response.getItem(); for (Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); requestOptions.setIfMatchETag(readEtag); PartitionKey pk = new PartitionKey(key); container.replaceItem(node, key, pk, requestOptions); return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to update key {} to collection {} in database {} on attempt {}", key, table, AzureCosmosClient.databaseName, attempt, e); } } return Status.ERROR; } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Insert key: {} into table: {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } PartitionKey pk = new PartitionKey(key); ObjectNode node = OBJECT_MAPPER.createObjectNode(); node.put("id", key); for (Map.Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } if (AzureCosmosClient.useUpsert) { container.upsertItem(node, pk, new CosmosItemRequestOptions()); } else { container.createItem(node, pk, new CosmosItemRequestOptions()); } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to insert key {} to collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } @Override public Status delete(String table, String key) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Delete key {} from table {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } container.deleteItem(key, new PartitionKey(key), new CosmosItemRequestOptions()); return Status.OK; } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to delete key {} in collection {}", key, table, e); } return Status.ERROR; } private String createSelectTop(Set<String> fields, int top) { if (fields == null) { return "SELECT TOP " + top + " * "; } else { StringBuilder result = new StringBuilder("SELECT TOP ").append(top).append(" "); int initLength = result.length(); for (String field : fields) { if (result.length() != initLength) { result.append(", "); } result.append("r['").append(field).append("'] "); } return result.toString(); } } }
String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } try { return Integer.parseInt(stringVal); } catch (NumberFormatException e) { return defaultValue; }
brianfrankcooper_YCSB
YCSB/azurecosmos/src/main/java/site/ycsb/db/AzureCosmosClient.java
AzureCosmosClient
cleanup
class AzureCosmosClient extends DB { protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); // Default configuration values private static final ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.SESSION; private static final String DEFAULT_DATABASE_NAME = "ycsb"; private static final boolean DEFAULT_USE_GATEWAY = false; private static final boolean DEFAULT_USE_UPSERT = false; private static final int DEFAULT_MAX_DEGREE_OF_PARALLELISM = -1; private static final int DEFAULT_MAX_BUFFERED_ITEM_COUNT = 0; private static final int DEFAULT_PREFERRED_PAGE_SIZE = -1; public static final int NUM_UPDATE_ATTEMPTS = 4; private static final boolean DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG = false; private static final String DEFAULT_USER_AGENT = "azurecosmos-ycsb"; private static final Logger LOGGER = LoggerFactory.getLogger(AzureCosmosClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static CosmosClient client; private static CosmosDatabase database; private static String databaseName; private static boolean useUpsert; private static int maxDegreeOfParallelism; private static int maxBufferedItemCount; private static int preferredPageSize; private static boolean includeExceptionStackInLog; private static Map<String, CosmosContainer> containerCache; private static String userAgent; @Override public void init() throws DBException { INIT_COUNT.incrementAndGet(); synchronized (INIT_COUNT) { if (client != null) { return; } try { initAzureCosmosClient(); } catch (Exception e) { throw new DBException(e); } } } private void initAzureCosmosClient() throws DBException { // Connection properties String primaryKey = this.getStringProperty("azurecosmos.primaryKey", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing primary key required to connect to the database."); } String uri = this.getStringProperty("azurecosmos.uri", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing uri required to connect to the database."); } AzureCosmosClient.userAgent = this.getStringProperty("azurecosmos.userAgent", DEFAULT_USER_AGENT); AzureCosmosClient.useUpsert = this.getBooleanProperty("azurecosmos.useUpsert", DEFAULT_USE_UPSERT); AzureCosmosClient.databaseName = this.getStringProperty("azurecosmos.databaseName", DEFAULT_DATABASE_NAME); AzureCosmosClient.maxDegreeOfParallelism = this.getIntProperty("azurecosmos.maxDegreeOfParallelism", DEFAULT_MAX_DEGREE_OF_PARALLELISM); AzureCosmosClient.maxBufferedItemCount = this.getIntProperty("azurecosmos.maxBufferedItemCount", DEFAULT_MAX_BUFFERED_ITEM_COUNT); AzureCosmosClient.preferredPageSize = this.getIntProperty("azurecosmos.preferredPageSize", DEFAULT_PREFERRED_PAGE_SIZE); AzureCosmosClient.includeExceptionStackInLog = this.getBooleanProperty("azurecosmos.includeExceptionStackInLog", DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG); ConsistencyLevel consistencyLevel = ConsistencyLevel.valueOf( this.getStringProperty("azurecosmos.consistencyLevel", DEFAULT_CONSISTENCY_LEVEL.toString().toUpperCase())); boolean useGateway = this.getBooleanProperty("azurecosmos.useGateway", DEFAULT_USE_GATEWAY); ThrottlingRetryOptions retryOptions = new ThrottlingRetryOptions(); int maxRetryAttemptsOnThrottledRequests = this.getIntProperty("azurecosmos.maxRetryAttemptsOnThrottledRequests", -1); if (maxRetryAttemptsOnThrottledRequests != -1) { retryOptions.setMaxRetryAttemptsOnThrottledRequests(maxRetryAttemptsOnThrottledRequests); } // Direct connection config options. DirectConnectionConfig directConnectionConfig = new DirectConnectionConfig(); int directMaxConnectionsPerEndpoint = this.getIntProperty("azurecosmos.directMaxConnectionsPerEndpoint", -1); if (directMaxConnectionsPerEndpoint != -1) { directConnectionConfig.setMaxConnectionsPerEndpoint(directMaxConnectionsPerEndpoint); } int directIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.directIdleConnectionTimeoutInSeconds", -1); if (directIdleConnectionTimeoutInSeconds != -1) { directConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(directIdleConnectionTimeoutInSeconds)); } // Gateway connection config options. GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); int gatewayMaxConnectionPoolSize = this.getIntProperty("azurecosmos.gatewayMaxConnectionPoolSize", -1); if (gatewayMaxConnectionPoolSize != -1) { gatewayConnectionConfig.setMaxConnectionPoolSize(gatewayMaxConnectionPoolSize); } int gatewayIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.gatewayIdleConnectionTimeoutInSeconds", -1); if (gatewayIdleConnectionTimeoutInSeconds != -1) { gatewayConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(gatewayIdleConnectionTimeoutInSeconds)); } try { LOGGER.info( "Creating Cosmos DB client {}, useGateway={}, consistencyLevel={}," + " maxRetryAttemptsOnThrottledRequests={}, maxRetryWaitTimeInSeconds={}" + " useUpsert={}, maxDegreeOfParallelism={}, maxBufferedItemCount={}, preferredPageSize={}", uri, useGateway, consistencyLevel.toString(), retryOptions.getMaxRetryAttemptsOnThrottledRequests(), retryOptions.getMaxRetryWaitTime().toMillis() / 1000, AzureCosmosClient.useUpsert, AzureCosmosClient.maxDegreeOfParallelism, AzureCosmosClient.maxBufferedItemCount, AzureCosmosClient.preferredPageSize); CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(uri).key(primaryKey) .throttlingRetryOptions(retryOptions).consistencyLevel(consistencyLevel).userAgentSuffix(userAgent); if (useGateway) { builder = builder.gatewayMode(gatewayConnectionConfig); } else { builder = builder.directMode(directConnectionConfig); } AzureCosmosClient.client = builder.buildClient(); LOGGER.info("Azure Cosmos DB connection created to {}", uri); } catch (IllegalArgumentException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException("Illegal argument passed in. Check the format of your parameters.", e); } AzureCosmosClient.containerCache = new ConcurrentHashMap<>(); // Verify the database exists try { AzureCosmosClient.database = AzureCosmosClient.client.getDatabase(databaseName); AzureCosmosClient.database.read(); } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException( "Invalid database name (" + AzureCosmosClient.databaseName + ") or failed to read database.", e); } } private String getStringProperty(String propertyName, String defaultValue) { return getProperties().getProperty(propertyName, defaultValue); } private boolean getBooleanProperty(String propertyName, boolean defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } return Boolean.parseBoolean(stringVal); } private int getIntProperty(String propertyName, int defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } try { return Integer.parseInt(stringVal); } catch (NumberFormatException e) { return defaultValue; } } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException {<FILL_FUNCTION_BODY>} /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); ObjectNode node = response.getItem(); Map<String, String> stringResults = new HashMap<>(node.size()); if (fields == null) { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } StringByteIterator.putAllAsByteIterators(result, stringResults); } else { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); if (fields.contains(pair.getKey())) { stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } } StringByteIterator.putAllAsByteIterators(result, stringResults); } return Status.OK; } catch (CosmosException e) { LOGGER.error("Failed to read key {} in collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); return Status.NOT_FOUND; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set * field/value pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { CosmosQueryRequestOptions queryOptions = new CosmosQueryRequestOptions(); queryOptions.setMaxDegreeOfParallelism(AzureCosmosClient.maxDegreeOfParallelism); queryOptions.setMaxBufferedItemCount(AzureCosmosClient.maxBufferedItemCount); CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } List<SqlParameter> paramList = new ArrayList<>(); paramList.add(new SqlParameter("@startkey", startkey)); SqlQuerySpec querySpec = new SqlQuerySpec( this.createSelectTop(fields, recordcount) + " FROM root r WHERE r.id >= @startkey", paramList); CosmosPagedIterable<ObjectNode> pagedIterable = container.queryItems(querySpec, queryOptions, ObjectNode.class); Iterator<FeedResponse<ObjectNode>> pageIterator = pagedIterable .iterableByPage(AzureCosmosClient.preferredPageSize).iterator(); while (pageIterator.hasNext()) { List<ObjectNode> pageDocs = pageIterator.next().getResults(); for (ObjectNode doc : pageDocs) { Map<String, String> stringResults = new HashMap<>(doc.size()); Iterator<Map.Entry<String, JsonNode>> nodeIterator = doc.fields(); while (nodeIterator.hasNext()) { Entry<String, JsonNode> pair = nodeIterator.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } HashMap<String, ByteIterator> byteResults = new HashMap<>(doc.size()); StringByteIterator.putAllAsByteIterators(byteResults, stringResults); result.add(byteResults); } } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to query key {} from collection {} in database {}", startkey, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key, * overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { String readEtag = ""; // Azure Cosmos DB does not have patch support. Until then, we need to read // the document, update it, and then write it back. // This could be made more efficient by using a stored procedure // and doing the read/modify write on the server side. Perhaps // that will be a future improvement. for (int attempt = 0; attempt < NUM_UPDATE_ATTEMPTS; attempt++) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); readEtag = response.getETag(); ObjectNode node = response.getItem(); for (Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); requestOptions.setIfMatchETag(readEtag); PartitionKey pk = new PartitionKey(key); container.replaceItem(node, key, pk, requestOptions); return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to update key {} to collection {} in database {} on attempt {}", key, table, AzureCosmosClient.databaseName, attempt, e); } } return Status.ERROR; } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Insert key: {} into table: {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } PartitionKey pk = new PartitionKey(key); ObjectNode node = OBJECT_MAPPER.createObjectNode(); node.put("id", key); for (Map.Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } if (AzureCosmosClient.useUpsert) { container.upsertItem(node, pk, new CosmosItemRequestOptions()); } else { container.createItem(node, pk, new CosmosItemRequestOptions()); } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to insert key {} to collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } @Override public Status delete(String table, String key) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Delete key {} from table {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } container.deleteItem(key, new PartitionKey(key), new CosmosItemRequestOptions()); return Status.OK; } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to delete key {} in collection {}", key, table, e); } return Status.ERROR; } private String createSelectTop(Set<String> fields, int top) { if (fields == null) { return "SELECT TOP " + top + " * "; } else { StringBuilder result = new StringBuilder("SELECT TOP ").append(top).append(" "); int initLength = result.length(); for (String field : fields) { if (result.length() != initLength) { result.append(", "); } result.append("r['").append(field).append("'] "); } return result.toString(); } } }
synchronized (INIT_COUNT) { if (INIT_COUNT.decrementAndGet() <= 0 && AzureCosmosClient.client != null) { try { AzureCosmosClient.client.close(); } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Could not close DocumentClient", e); } finally { AzureCosmosClient.client = null; } } }
brianfrankcooper_YCSB
YCSB/azurecosmos/src/main/java/site/ycsb/db/AzureCosmosClient.java
AzureCosmosClient
read
class AzureCosmosClient extends DB { protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); // Default configuration values private static final ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.SESSION; private static final String DEFAULT_DATABASE_NAME = "ycsb"; private static final boolean DEFAULT_USE_GATEWAY = false; private static final boolean DEFAULT_USE_UPSERT = false; private static final int DEFAULT_MAX_DEGREE_OF_PARALLELISM = -1; private static final int DEFAULT_MAX_BUFFERED_ITEM_COUNT = 0; private static final int DEFAULT_PREFERRED_PAGE_SIZE = -1; public static final int NUM_UPDATE_ATTEMPTS = 4; private static final boolean DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG = false; private static final String DEFAULT_USER_AGENT = "azurecosmos-ycsb"; private static final Logger LOGGER = LoggerFactory.getLogger(AzureCosmosClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static CosmosClient client; private static CosmosDatabase database; private static String databaseName; private static boolean useUpsert; private static int maxDegreeOfParallelism; private static int maxBufferedItemCount; private static int preferredPageSize; private static boolean includeExceptionStackInLog; private static Map<String, CosmosContainer> containerCache; private static String userAgent; @Override public void init() throws DBException { INIT_COUNT.incrementAndGet(); synchronized (INIT_COUNT) { if (client != null) { return; } try { initAzureCosmosClient(); } catch (Exception e) { throw new DBException(e); } } } private void initAzureCosmosClient() throws DBException { // Connection properties String primaryKey = this.getStringProperty("azurecosmos.primaryKey", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing primary key required to connect to the database."); } String uri = this.getStringProperty("azurecosmos.uri", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing uri required to connect to the database."); } AzureCosmosClient.userAgent = this.getStringProperty("azurecosmos.userAgent", DEFAULT_USER_AGENT); AzureCosmosClient.useUpsert = this.getBooleanProperty("azurecosmos.useUpsert", DEFAULT_USE_UPSERT); AzureCosmosClient.databaseName = this.getStringProperty("azurecosmos.databaseName", DEFAULT_DATABASE_NAME); AzureCosmosClient.maxDegreeOfParallelism = this.getIntProperty("azurecosmos.maxDegreeOfParallelism", DEFAULT_MAX_DEGREE_OF_PARALLELISM); AzureCosmosClient.maxBufferedItemCount = this.getIntProperty("azurecosmos.maxBufferedItemCount", DEFAULT_MAX_BUFFERED_ITEM_COUNT); AzureCosmosClient.preferredPageSize = this.getIntProperty("azurecosmos.preferredPageSize", DEFAULT_PREFERRED_PAGE_SIZE); AzureCosmosClient.includeExceptionStackInLog = this.getBooleanProperty("azurecosmos.includeExceptionStackInLog", DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG); ConsistencyLevel consistencyLevel = ConsistencyLevel.valueOf( this.getStringProperty("azurecosmos.consistencyLevel", DEFAULT_CONSISTENCY_LEVEL.toString().toUpperCase())); boolean useGateway = this.getBooleanProperty("azurecosmos.useGateway", DEFAULT_USE_GATEWAY); ThrottlingRetryOptions retryOptions = new ThrottlingRetryOptions(); int maxRetryAttemptsOnThrottledRequests = this.getIntProperty("azurecosmos.maxRetryAttemptsOnThrottledRequests", -1); if (maxRetryAttemptsOnThrottledRequests != -1) { retryOptions.setMaxRetryAttemptsOnThrottledRequests(maxRetryAttemptsOnThrottledRequests); } // Direct connection config options. DirectConnectionConfig directConnectionConfig = new DirectConnectionConfig(); int directMaxConnectionsPerEndpoint = this.getIntProperty("azurecosmos.directMaxConnectionsPerEndpoint", -1); if (directMaxConnectionsPerEndpoint != -1) { directConnectionConfig.setMaxConnectionsPerEndpoint(directMaxConnectionsPerEndpoint); } int directIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.directIdleConnectionTimeoutInSeconds", -1); if (directIdleConnectionTimeoutInSeconds != -1) { directConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(directIdleConnectionTimeoutInSeconds)); } // Gateway connection config options. GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); int gatewayMaxConnectionPoolSize = this.getIntProperty("azurecosmos.gatewayMaxConnectionPoolSize", -1); if (gatewayMaxConnectionPoolSize != -1) { gatewayConnectionConfig.setMaxConnectionPoolSize(gatewayMaxConnectionPoolSize); } int gatewayIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.gatewayIdleConnectionTimeoutInSeconds", -1); if (gatewayIdleConnectionTimeoutInSeconds != -1) { gatewayConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(gatewayIdleConnectionTimeoutInSeconds)); } try { LOGGER.info( "Creating Cosmos DB client {}, useGateway={}, consistencyLevel={}," + " maxRetryAttemptsOnThrottledRequests={}, maxRetryWaitTimeInSeconds={}" + " useUpsert={}, maxDegreeOfParallelism={}, maxBufferedItemCount={}, preferredPageSize={}", uri, useGateway, consistencyLevel.toString(), retryOptions.getMaxRetryAttemptsOnThrottledRequests(), retryOptions.getMaxRetryWaitTime().toMillis() / 1000, AzureCosmosClient.useUpsert, AzureCosmosClient.maxDegreeOfParallelism, AzureCosmosClient.maxBufferedItemCount, AzureCosmosClient.preferredPageSize); CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(uri).key(primaryKey) .throttlingRetryOptions(retryOptions).consistencyLevel(consistencyLevel).userAgentSuffix(userAgent); if (useGateway) { builder = builder.gatewayMode(gatewayConnectionConfig); } else { builder = builder.directMode(directConnectionConfig); } AzureCosmosClient.client = builder.buildClient(); LOGGER.info("Azure Cosmos DB connection created to {}", uri); } catch (IllegalArgumentException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException("Illegal argument passed in. Check the format of your parameters.", e); } AzureCosmosClient.containerCache = new ConcurrentHashMap<>(); // Verify the database exists try { AzureCosmosClient.database = AzureCosmosClient.client.getDatabase(databaseName); AzureCosmosClient.database.read(); } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException( "Invalid database name (" + AzureCosmosClient.databaseName + ") or failed to read database.", e); } } private String getStringProperty(String propertyName, String defaultValue) { return getProperties().getProperty(propertyName, defaultValue); } private boolean getBooleanProperty(String propertyName, boolean defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } return Boolean.parseBoolean(stringVal); } private int getIntProperty(String propertyName, int defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } try { return Integer.parseInt(stringVal); } catch (NumberFormatException e) { return defaultValue; } } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { synchronized (INIT_COUNT) { if (INIT_COUNT.decrementAndGet() <= 0 && AzureCosmosClient.client != null) { try { AzureCosmosClient.client.close(); } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Could not close DocumentClient", e); } finally { AzureCosmosClient.client = null; } } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {<FILL_FUNCTION_BODY>} /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set * field/value pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { CosmosQueryRequestOptions queryOptions = new CosmosQueryRequestOptions(); queryOptions.setMaxDegreeOfParallelism(AzureCosmosClient.maxDegreeOfParallelism); queryOptions.setMaxBufferedItemCount(AzureCosmosClient.maxBufferedItemCount); CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } List<SqlParameter> paramList = new ArrayList<>(); paramList.add(new SqlParameter("@startkey", startkey)); SqlQuerySpec querySpec = new SqlQuerySpec( this.createSelectTop(fields, recordcount) + " FROM root r WHERE r.id >= @startkey", paramList); CosmosPagedIterable<ObjectNode> pagedIterable = container.queryItems(querySpec, queryOptions, ObjectNode.class); Iterator<FeedResponse<ObjectNode>> pageIterator = pagedIterable .iterableByPage(AzureCosmosClient.preferredPageSize).iterator(); while (pageIterator.hasNext()) { List<ObjectNode> pageDocs = pageIterator.next().getResults(); for (ObjectNode doc : pageDocs) { Map<String, String> stringResults = new HashMap<>(doc.size()); Iterator<Map.Entry<String, JsonNode>> nodeIterator = doc.fields(); while (nodeIterator.hasNext()) { Entry<String, JsonNode> pair = nodeIterator.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } HashMap<String, ByteIterator> byteResults = new HashMap<>(doc.size()); StringByteIterator.putAllAsByteIterators(byteResults, stringResults); result.add(byteResults); } } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to query key {} from collection {} in database {}", startkey, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key, * overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { String readEtag = ""; // Azure Cosmos DB does not have patch support. Until then, we need to read // the document, update it, and then write it back. // This could be made more efficient by using a stored procedure // and doing the read/modify write on the server side. Perhaps // that will be a future improvement. for (int attempt = 0; attempt < NUM_UPDATE_ATTEMPTS; attempt++) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); readEtag = response.getETag(); ObjectNode node = response.getItem(); for (Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); requestOptions.setIfMatchETag(readEtag); PartitionKey pk = new PartitionKey(key); container.replaceItem(node, key, pk, requestOptions); return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to update key {} to collection {} in database {} on attempt {}", key, table, AzureCosmosClient.databaseName, attempt, e); } } return Status.ERROR; } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Insert key: {} into table: {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } PartitionKey pk = new PartitionKey(key); ObjectNode node = OBJECT_MAPPER.createObjectNode(); node.put("id", key); for (Map.Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } if (AzureCosmosClient.useUpsert) { container.upsertItem(node, pk, new CosmosItemRequestOptions()); } else { container.createItem(node, pk, new CosmosItemRequestOptions()); } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to insert key {} to collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } @Override public Status delete(String table, String key) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Delete key {} from table {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } container.deleteItem(key, new PartitionKey(key), new CosmosItemRequestOptions()); return Status.OK; } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to delete key {} in collection {}", key, table, e); } return Status.ERROR; } private String createSelectTop(Set<String> fields, int top) { if (fields == null) { return "SELECT TOP " + top + " * "; } else { StringBuilder result = new StringBuilder("SELECT TOP ").append(top).append(" "); int initLength = result.length(); for (String field : fields) { if (result.length() != initLength) { result.append(", "); } result.append("r['").append(field).append("'] "); } return result.toString(); } } }
try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); ObjectNode node = response.getItem(); Map<String, String> stringResults = new HashMap<>(node.size()); if (fields == null) { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } StringByteIterator.putAllAsByteIterators(result, stringResults); } else { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); if (fields.contains(pair.getKey())) { stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } } StringByteIterator.putAllAsByteIterators(result, stringResults); } return Status.OK; } catch (CosmosException e) { LOGGER.error("Failed to read key {} in collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); return Status.NOT_FOUND; }
brianfrankcooper_YCSB
YCSB/azurecosmos/src/main/java/site/ycsb/db/AzureCosmosClient.java
AzureCosmosClient
scan
class AzureCosmosClient extends DB { protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); // Default configuration values private static final ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.SESSION; private static final String DEFAULT_DATABASE_NAME = "ycsb"; private static final boolean DEFAULT_USE_GATEWAY = false; private static final boolean DEFAULT_USE_UPSERT = false; private static final int DEFAULT_MAX_DEGREE_OF_PARALLELISM = -1; private static final int DEFAULT_MAX_BUFFERED_ITEM_COUNT = 0; private static final int DEFAULT_PREFERRED_PAGE_SIZE = -1; public static final int NUM_UPDATE_ATTEMPTS = 4; private static final boolean DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG = false; private static final String DEFAULT_USER_AGENT = "azurecosmos-ycsb"; private static final Logger LOGGER = LoggerFactory.getLogger(AzureCosmosClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static CosmosClient client; private static CosmosDatabase database; private static String databaseName; private static boolean useUpsert; private static int maxDegreeOfParallelism; private static int maxBufferedItemCount; private static int preferredPageSize; private static boolean includeExceptionStackInLog; private static Map<String, CosmosContainer> containerCache; private static String userAgent; @Override public void init() throws DBException { INIT_COUNT.incrementAndGet(); synchronized (INIT_COUNT) { if (client != null) { return; } try { initAzureCosmosClient(); } catch (Exception e) { throw new DBException(e); } } } private void initAzureCosmosClient() throws DBException { // Connection properties String primaryKey = this.getStringProperty("azurecosmos.primaryKey", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing primary key required to connect to the database."); } String uri = this.getStringProperty("azurecosmos.uri", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing uri required to connect to the database."); } AzureCosmosClient.userAgent = this.getStringProperty("azurecosmos.userAgent", DEFAULT_USER_AGENT); AzureCosmosClient.useUpsert = this.getBooleanProperty("azurecosmos.useUpsert", DEFAULT_USE_UPSERT); AzureCosmosClient.databaseName = this.getStringProperty("azurecosmos.databaseName", DEFAULT_DATABASE_NAME); AzureCosmosClient.maxDegreeOfParallelism = this.getIntProperty("azurecosmos.maxDegreeOfParallelism", DEFAULT_MAX_DEGREE_OF_PARALLELISM); AzureCosmosClient.maxBufferedItemCount = this.getIntProperty("azurecosmos.maxBufferedItemCount", DEFAULT_MAX_BUFFERED_ITEM_COUNT); AzureCosmosClient.preferredPageSize = this.getIntProperty("azurecosmos.preferredPageSize", DEFAULT_PREFERRED_PAGE_SIZE); AzureCosmosClient.includeExceptionStackInLog = this.getBooleanProperty("azurecosmos.includeExceptionStackInLog", DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG); ConsistencyLevel consistencyLevel = ConsistencyLevel.valueOf( this.getStringProperty("azurecosmos.consistencyLevel", DEFAULT_CONSISTENCY_LEVEL.toString().toUpperCase())); boolean useGateway = this.getBooleanProperty("azurecosmos.useGateway", DEFAULT_USE_GATEWAY); ThrottlingRetryOptions retryOptions = new ThrottlingRetryOptions(); int maxRetryAttemptsOnThrottledRequests = this.getIntProperty("azurecosmos.maxRetryAttemptsOnThrottledRequests", -1); if (maxRetryAttemptsOnThrottledRequests != -1) { retryOptions.setMaxRetryAttemptsOnThrottledRequests(maxRetryAttemptsOnThrottledRequests); } // Direct connection config options. DirectConnectionConfig directConnectionConfig = new DirectConnectionConfig(); int directMaxConnectionsPerEndpoint = this.getIntProperty("azurecosmos.directMaxConnectionsPerEndpoint", -1); if (directMaxConnectionsPerEndpoint != -1) { directConnectionConfig.setMaxConnectionsPerEndpoint(directMaxConnectionsPerEndpoint); } int directIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.directIdleConnectionTimeoutInSeconds", -1); if (directIdleConnectionTimeoutInSeconds != -1) { directConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(directIdleConnectionTimeoutInSeconds)); } // Gateway connection config options. GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); int gatewayMaxConnectionPoolSize = this.getIntProperty("azurecosmos.gatewayMaxConnectionPoolSize", -1); if (gatewayMaxConnectionPoolSize != -1) { gatewayConnectionConfig.setMaxConnectionPoolSize(gatewayMaxConnectionPoolSize); } int gatewayIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.gatewayIdleConnectionTimeoutInSeconds", -1); if (gatewayIdleConnectionTimeoutInSeconds != -1) { gatewayConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(gatewayIdleConnectionTimeoutInSeconds)); } try { LOGGER.info( "Creating Cosmos DB client {}, useGateway={}, consistencyLevel={}," + " maxRetryAttemptsOnThrottledRequests={}, maxRetryWaitTimeInSeconds={}" + " useUpsert={}, maxDegreeOfParallelism={}, maxBufferedItemCount={}, preferredPageSize={}", uri, useGateway, consistencyLevel.toString(), retryOptions.getMaxRetryAttemptsOnThrottledRequests(), retryOptions.getMaxRetryWaitTime().toMillis() / 1000, AzureCosmosClient.useUpsert, AzureCosmosClient.maxDegreeOfParallelism, AzureCosmosClient.maxBufferedItemCount, AzureCosmosClient.preferredPageSize); CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(uri).key(primaryKey) .throttlingRetryOptions(retryOptions).consistencyLevel(consistencyLevel).userAgentSuffix(userAgent); if (useGateway) { builder = builder.gatewayMode(gatewayConnectionConfig); } else { builder = builder.directMode(directConnectionConfig); } AzureCosmosClient.client = builder.buildClient(); LOGGER.info("Azure Cosmos DB connection created to {}", uri); } catch (IllegalArgumentException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException("Illegal argument passed in. Check the format of your parameters.", e); } AzureCosmosClient.containerCache = new ConcurrentHashMap<>(); // Verify the database exists try { AzureCosmosClient.database = AzureCosmosClient.client.getDatabase(databaseName); AzureCosmosClient.database.read(); } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException( "Invalid database name (" + AzureCosmosClient.databaseName + ") or failed to read database.", e); } } private String getStringProperty(String propertyName, String defaultValue) { return getProperties().getProperty(propertyName, defaultValue); } private boolean getBooleanProperty(String propertyName, boolean defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } return Boolean.parseBoolean(stringVal); } private int getIntProperty(String propertyName, int defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } try { return Integer.parseInt(stringVal); } catch (NumberFormatException e) { return defaultValue; } } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { synchronized (INIT_COUNT) { if (INIT_COUNT.decrementAndGet() <= 0 && AzureCosmosClient.client != null) { try { AzureCosmosClient.client.close(); } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Could not close DocumentClient", e); } finally { AzureCosmosClient.client = null; } } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); ObjectNode node = response.getItem(); Map<String, String> stringResults = new HashMap<>(node.size()); if (fields == null) { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } StringByteIterator.putAllAsByteIterators(result, stringResults); } else { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); if (fields.contains(pair.getKey())) { stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } } StringByteIterator.putAllAsByteIterators(result, stringResults); } return Status.OK; } catch (CosmosException e) { LOGGER.error("Failed to read key {} in collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); return Status.NOT_FOUND; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set * field/value pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {<FILL_FUNCTION_BODY>} /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key, * overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { String readEtag = ""; // Azure Cosmos DB does not have patch support. Until then, we need to read // the document, update it, and then write it back. // This could be made more efficient by using a stored procedure // and doing the read/modify write on the server side. Perhaps // that will be a future improvement. for (int attempt = 0; attempt < NUM_UPDATE_ATTEMPTS; attempt++) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); readEtag = response.getETag(); ObjectNode node = response.getItem(); for (Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); requestOptions.setIfMatchETag(readEtag); PartitionKey pk = new PartitionKey(key); container.replaceItem(node, key, pk, requestOptions); return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to update key {} to collection {} in database {} on attempt {}", key, table, AzureCosmosClient.databaseName, attempt, e); } } return Status.ERROR; } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Insert key: {} into table: {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } PartitionKey pk = new PartitionKey(key); ObjectNode node = OBJECT_MAPPER.createObjectNode(); node.put("id", key); for (Map.Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } if (AzureCosmosClient.useUpsert) { container.upsertItem(node, pk, new CosmosItemRequestOptions()); } else { container.createItem(node, pk, new CosmosItemRequestOptions()); } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to insert key {} to collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } @Override public Status delete(String table, String key) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Delete key {} from table {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } container.deleteItem(key, new PartitionKey(key), new CosmosItemRequestOptions()); return Status.OK; } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to delete key {} in collection {}", key, table, e); } return Status.ERROR; } private String createSelectTop(Set<String> fields, int top) { if (fields == null) { return "SELECT TOP " + top + " * "; } else { StringBuilder result = new StringBuilder("SELECT TOP ").append(top).append(" "); int initLength = result.length(); for (String field : fields) { if (result.length() != initLength) { result.append(", "); } result.append("r['").append(field).append("'] "); } return result.toString(); } } }
try { CosmosQueryRequestOptions queryOptions = new CosmosQueryRequestOptions(); queryOptions.setMaxDegreeOfParallelism(AzureCosmosClient.maxDegreeOfParallelism); queryOptions.setMaxBufferedItemCount(AzureCosmosClient.maxBufferedItemCount); CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } List<SqlParameter> paramList = new ArrayList<>(); paramList.add(new SqlParameter("@startkey", startkey)); SqlQuerySpec querySpec = new SqlQuerySpec( this.createSelectTop(fields, recordcount) + " FROM root r WHERE r.id >= @startkey", paramList); CosmosPagedIterable<ObjectNode> pagedIterable = container.queryItems(querySpec, queryOptions, ObjectNode.class); Iterator<FeedResponse<ObjectNode>> pageIterator = pagedIterable .iterableByPage(AzureCosmosClient.preferredPageSize).iterator(); while (pageIterator.hasNext()) { List<ObjectNode> pageDocs = pageIterator.next().getResults(); for (ObjectNode doc : pageDocs) { Map<String, String> stringResults = new HashMap<>(doc.size()); Iterator<Map.Entry<String, JsonNode>> nodeIterator = doc.fields(); while (nodeIterator.hasNext()) { Entry<String, JsonNode> pair = nodeIterator.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } HashMap<String, ByteIterator> byteResults = new HashMap<>(doc.size()); StringByteIterator.putAllAsByteIterators(byteResults, stringResults); result.add(byteResults); } } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to query key {} from collection {} in database {}", startkey, table, AzureCosmosClient.databaseName, e); } return Status.ERROR;
brianfrankcooper_YCSB
YCSB/azurecosmos/src/main/java/site/ycsb/db/AzureCosmosClient.java
AzureCosmosClient
update
class AzureCosmosClient extends DB { protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); // Default configuration values private static final ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.SESSION; private static final String DEFAULT_DATABASE_NAME = "ycsb"; private static final boolean DEFAULT_USE_GATEWAY = false; private static final boolean DEFAULT_USE_UPSERT = false; private static final int DEFAULT_MAX_DEGREE_OF_PARALLELISM = -1; private static final int DEFAULT_MAX_BUFFERED_ITEM_COUNT = 0; private static final int DEFAULT_PREFERRED_PAGE_SIZE = -1; public static final int NUM_UPDATE_ATTEMPTS = 4; private static final boolean DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG = false; private static final String DEFAULT_USER_AGENT = "azurecosmos-ycsb"; private static final Logger LOGGER = LoggerFactory.getLogger(AzureCosmosClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static CosmosClient client; private static CosmosDatabase database; private static String databaseName; private static boolean useUpsert; private static int maxDegreeOfParallelism; private static int maxBufferedItemCount; private static int preferredPageSize; private static boolean includeExceptionStackInLog; private static Map<String, CosmosContainer> containerCache; private static String userAgent; @Override public void init() throws DBException { INIT_COUNT.incrementAndGet(); synchronized (INIT_COUNT) { if (client != null) { return; } try { initAzureCosmosClient(); } catch (Exception e) { throw new DBException(e); } } } private void initAzureCosmosClient() throws DBException { // Connection properties String primaryKey = this.getStringProperty("azurecosmos.primaryKey", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing primary key required to connect to the database."); } String uri = this.getStringProperty("azurecosmos.uri", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing uri required to connect to the database."); } AzureCosmosClient.userAgent = this.getStringProperty("azurecosmos.userAgent", DEFAULT_USER_AGENT); AzureCosmosClient.useUpsert = this.getBooleanProperty("azurecosmos.useUpsert", DEFAULT_USE_UPSERT); AzureCosmosClient.databaseName = this.getStringProperty("azurecosmos.databaseName", DEFAULT_DATABASE_NAME); AzureCosmosClient.maxDegreeOfParallelism = this.getIntProperty("azurecosmos.maxDegreeOfParallelism", DEFAULT_MAX_DEGREE_OF_PARALLELISM); AzureCosmosClient.maxBufferedItemCount = this.getIntProperty("azurecosmos.maxBufferedItemCount", DEFAULT_MAX_BUFFERED_ITEM_COUNT); AzureCosmosClient.preferredPageSize = this.getIntProperty("azurecosmos.preferredPageSize", DEFAULT_PREFERRED_PAGE_SIZE); AzureCosmosClient.includeExceptionStackInLog = this.getBooleanProperty("azurecosmos.includeExceptionStackInLog", DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG); ConsistencyLevel consistencyLevel = ConsistencyLevel.valueOf( this.getStringProperty("azurecosmos.consistencyLevel", DEFAULT_CONSISTENCY_LEVEL.toString().toUpperCase())); boolean useGateway = this.getBooleanProperty("azurecosmos.useGateway", DEFAULT_USE_GATEWAY); ThrottlingRetryOptions retryOptions = new ThrottlingRetryOptions(); int maxRetryAttemptsOnThrottledRequests = this.getIntProperty("azurecosmos.maxRetryAttemptsOnThrottledRequests", -1); if (maxRetryAttemptsOnThrottledRequests != -1) { retryOptions.setMaxRetryAttemptsOnThrottledRequests(maxRetryAttemptsOnThrottledRequests); } // Direct connection config options. DirectConnectionConfig directConnectionConfig = new DirectConnectionConfig(); int directMaxConnectionsPerEndpoint = this.getIntProperty("azurecosmos.directMaxConnectionsPerEndpoint", -1); if (directMaxConnectionsPerEndpoint != -1) { directConnectionConfig.setMaxConnectionsPerEndpoint(directMaxConnectionsPerEndpoint); } int directIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.directIdleConnectionTimeoutInSeconds", -1); if (directIdleConnectionTimeoutInSeconds != -1) { directConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(directIdleConnectionTimeoutInSeconds)); } // Gateway connection config options. GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); int gatewayMaxConnectionPoolSize = this.getIntProperty("azurecosmos.gatewayMaxConnectionPoolSize", -1); if (gatewayMaxConnectionPoolSize != -1) { gatewayConnectionConfig.setMaxConnectionPoolSize(gatewayMaxConnectionPoolSize); } int gatewayIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.gatewayIdleConnectionTimeoutInSeconds", -1); if (gatewayIdleConnectionTimeoutInSeconds != -1) { gatewayConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(gatewayIdleConnectionTimeoutInSeconds)); } try { LOGGER.info( "Creating Cosmos DB client {}, useGateway={}, consistencyLevel={}," + " maxRetryAttemptsOnThrottledRequests={}, maxRetryWaitTimeInSeconds={}" + " useUpsert={}, maxDegreeOfParallelism={}, maxBufferedItemCount={}, preferredPageSize={}", uri, useGateway, consistencyLevel.toString(), retryOptions.getMaxRetryAttemptsOnThrottledRequests(), retryOptions.getMaxRetryWaitTime().toMillis() / 1000, AzureCosmosClient.useUpsert, AzureCosmosClient.maxDegreeOfParallelism, AzureCosmosClient.maxBufferedItemCount, AzureCosmosClient.preferredPageSize); CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(uri).key(primaryKey) .throttlingRetryOptions(retryOptions).consistencyLevel(consistencyLevel).userAgentSuffix(userAgent); if (useGateway) { builder = builder.gatewayMode(gatewayConnectionConfig); } else { builder = builder.directMode(directConnectionConfig); } AzureCosmosClient.client = builder.buildClient(); LOGGER.info("Azure Cosmos DB connection created to {}", uri); } catch (IllegalArgumentException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException("Illegal argument passed in. Check the format of your parameters.", e); } AzureCosmosClient.containerCache = new ConcurrentHashMap<>(); // Verify the database exists try { AzureCosmosClient.database = AzureCosmosClient.client.getDatabase(databaseName); AzureCosmosClient.database.read(); } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException( "Invalid database name (" + AzureCosmosClient.databaseName + ") or failed to read database.", e); } } private String getStringProperty(String propertyName, String defaultValue) { return getProperties().getProperty(propertyName, defaultValue); } private boolean getBooleanProperty(String propertyName, boolean defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } return Boolean.parseBoolean(stringVal); } private int getIntProperty(String propertyName, int defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } try { return Integer.parseInt(stringVal); } catch (NumberFormatException e) { return defaultValue; } } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { synchronized (INIT_COUNT) { if (INIT_COUNT.decrementAndGet() <= 0 && AzureCosmosClient.client != null) { try { AzureCosmosClient.client.close(); } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Could not close DocumentClient", e); } finally { AzureCosmosClient.client = null; } } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); ObjectNode node = response.getItem(); Map<String, String> stringResults = new HashMap<>(node.size()); if (fields == null) { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } StringByteIterator.putAllAsByteIterators(result, stringResults); } else { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); if (fields.contains(pair.getKey())) { stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } } StringByteIterator.putAllAsByteIterators(result, stringResults); } return Status.OK; } catch (CosmosException e) { LOGGER.error("Failed to read key {} in collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); return Status.NOT_FOUND; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set * field/value pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { CosmosQueryRequestOptions queryOptions = new CosmosQueryRequestOptions(); queryOptions.setMaxDegreeOfParallelism(AzureCosmosClient.maxDegreeOfParallelism); queryOptions.setMaxBufferedItemCount(AzureCosmosClient.maxBufferedItemCount); CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } List<SqlParameter> paramList = new ArrayList<>(); paramList.add(new SqlParameter("@startkey", startkey)); SqlQuerySpec querySpec = new SqlQuerySpec( this.createSelectTop(fields, recordcount) + " FROM root r WHERE r.id >= @startkey", paramList); CosmosPagedIterable<ObjectNode> pagedIterable = container.queryItems(querySpec, queryOptions, ObjectNode.class); Iterator<FeedResponse<ObjectNode>> pageIterator = pagedIterable .iterableByPage(AzureCosmosClient.preferredPageSize).iterator(); while (pageIterator.hasNext()) { List<ObjectNode> pageDocs = pageIterator.next().getResults(); for (ObjectNode doc : pageDocs) { Map<String, String> stringResults = new HashMap<>(doc.size()); Iterator<Map.Entry<String, JsonNode>> nodeIterator = doc.fields(); while (nodeIterator.hasNext()) { Entry<String, JsonNode> pair = nodeIterator.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } HashMap<String, ByteIterator> byteResults = new HashMap<>(doc.size()); StringByteIterator.putAllAsByteIterators(byteResults, stringResults); result.add(byteResults); } } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to query key {} from collection {} in database {}", startkey, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key, * overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>} /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Insert key: {} into table: {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } PartitionKey pk = new PartitionKey(key); ObjectNode node = OBJECT_MAPPER.createObjectNode(); node.put("id", key); for (Map.Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } if (AzureCosmosClient.useUpsert) { container.upsertItem(node, pk, new CosmosItemRequestOptions()); } else { container.createItem(node, pk, new CosmosItemRequestOptions()); } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to insert key {} to collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } @Override public Status delete(String table, String key) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Delete key {} from table {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } container.deleteItem(key, new PartitionKey(key), new CosmosItemRequestOptions()); return Status.OK; } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to delete key {} in collection {}", key, table, e); } return Status.ERROR; } private String createSelectTop(Set<String> fields, int top) { if (fields == null) { return "SELECT TOP " + top + " * "; } else { StringBuilder result = new StringBuilder("SELECT TOP ").append(top).append(" "); int initLength = result.length(); for (String field : fields) { if (result.length() != initLength) { result.append(", "); } result.append("r['").append(field).append("'] "); } return result.toString(); } } }
String readEtag = ""; // Azure Cosmos DB does not have patch support. Until then, we need to read // the document, update it, and then write it back. // This could be made more efficient by using a stored procedure // and doing the read/modify write on the server side. Perhaps // that will be a future improvement. for (int attempt = 0; attempt < NUM_UPDATE_ATTEMPTS; attempt++) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); readEtag = response.getETag(); ObjectNode node = response.getItem(); for (Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); requestOptions.setIfMatchETag(readEtag); PartitionKey pk = new PartitionKey(key); container.replaceItem(node, key, pk, requestOptions); return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to update key {} to collection {} in database {} on attempt {}", key, table, AzureCosmosClient.databaseName, attempt, e); } } return Status.ERROR;
brianfrankcooper_YCSB
YCSB/azurecosmos/src/main/java/site/ycsb/db/AzureCosmosClient.java
AzureCosmosClient
insert
class AzureCosmosClient extends DB { protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); // Default configuration values private static final ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.SESSION; private static final String DEFAULT_DATABASE_NAME = "ycsb"; private static final boolean DEFAULT_USE_GATEWAY = false; private static final boolean DEFAULT_USE_UPSERT = false; private static final int DEFAULT_MAX_DEGREE_OF_PARALLELISM = -1; private static final int DEFAULT_MAX_BUFFERED_ITEM_COUNT = 0; private static final int DEFAULT_PREFERRED_PAGE_SIZE = -1; public static final int NUM_UPDATE_ATTEMPTS = 4; private static final boolean DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG = false; private static final String DEFAULT_USER_AGENT = "azurecosmos-ycsb"; private static final Logger LOGGER = LoggerFactory.getLogger(AzureCosmosClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static CosmosClient client; private static CosmosDatabase database; private static String databaseName; private static boolean useUpsert; private static int maxDegreeOfParallelism; private static int maxBufferedItemCount; private static int preferredPageSize; private static boolean includeExceptionStackInLog; private static Map<String, CosmosContainer> containerCache; private static String userAgent; @Override public void init() throws DBException { INIT_COUNT.incrementAndGet(); synchronized (INIT_COUNT) { if (client != null) { return; } try { initAzureCosmosClient(); } catch (Exception e) { throw new DBException(e); } } } private void initAzureCosmosClient() throws DBException { // Connection properties String primaryKey = this.getStringProperty("azurecosmos.primaryKey", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing primary key required to connect to the database."); } String uri = this.getStringProperty("azurecosmos.uri", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing uri required to connect to the database."); } AzureCosmosClient.userAgent = this.getStringProperty("azurecosmos.userAgent", DEFAULT_USER_AGENT); AzureCosmosClient.useUpsert = this.getBooleanProperty("azurecosmos.useUpsert", DEFAULT_USE_UPSERT); AzureCosmosClient.databaseName = this.getStringProperty("azurecosmos.databaseName", DEFAULT_DATABASE_NAME); AzureCosmosClient.maxDegreeOfParallelism = this.getIntProperty("azurecosmos.maxDegreeOfParallelism", DEFAULT_MAX_DEGREE_OF_PARALLELISM); AzureCosmosClient.maxBufferedItemCount = this.getIntProperty("azurecosmos.maxBufferedItemCount", DEFAULT_MAX_BUFFERED_ITEM_COUNT); AzureCosmosClient.preferredPageSize = this.getIntProperty("azurecosmos.preferredPageSize", DEFAULT_PREFERRED_PAGE_SIZE); AzureCosmosClient.includeExceptionStackInLog = this.getBooleanProperty("azurecosmos.includeExceptionStackInLog", DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG); ConsistencyLevel consistencyLevel = ConsistencyLevel.valueOf( this.getStringProperty("azurecosmos.consistencyLevel", DEFAULT_CONSISTENCY_LEVEL.toString().toUpperCase())); boolean useGateway = this.getBooleanProperty("azurecosmos.useGateway", DEFAULT_USE_GATEWAY); ThrottlingRetryOptions retryOptions = new ThrottlingRetryOptions(); int maxRetryAttemptsOnThrottledRequests = this.getIntProperty("azurecosmos.maxRetryAttemptsOnThrottledRequests", -1); if (maxRetryAttemptsOnThrottledRequests != -1) { retryOptions.setMaxRetryAttemptsOnThrottledRequests(maxRetryAttemptsOnThrottledRequests); } // Direct connection config options. DirectConnectionConfig directConnectionConfig = new DirectConnectionConfig(); int directMaxConnectionsPerEndpoint = this.getIntProperty("azurecosmos.directMaxConnectionsPerEndpoint", -1); if (directMaxConnectionsPerEndpoint != -1) { directConnectionConfig.setMaxConnectionsPerEndpoint(directMaxConnectionsPerEndpoint); } int directIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.directIdleConnectionTimeoutInSeconds", -1); if (directIdleConnectionTimeoutInSeconds != -1) { directConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(directIdleConnectionTimeoutInSeconds)); } // Gateway connection config options. GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); int gatewayMaxConnectionPoolSize = this.getIntProperty("azurecosmos.gatewayMaxConnectionPoolSize", -1); if (gatewayMaxConnectionPoolSize != -1) { gatewayConnectionConfig.setMaxConnectionPoolSize(gatewayMaxConnectionPoolSize); } int gatewayIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.gatewayIdleConnectionTimeoutInSeconds", -1); if (gatewayIdleConnectionTimeoutInSeconds != -1) { gatewayConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(gatewayIdleConnectionTimeoutInSeconds)); } try { LOGGER.info( "Creating Cosmos DB client {}, useGateway={}, consistencyLevel={}," + " maxRetryAttemptsOnThrottledRequests={}, maxRetryWaitTimeInSeconds={}" + " useUpsert={}, maxDegreeOfParallelism={}, maxBufferedItemCount={}, preferredPageSize={}", uri, useGateway, consistencyLevel.toString(), retryOptions.getMaxRetryAttemptsOnThrottledRequests(), retryOptions.getMaxRetryWaitTime().toMillis() / 1000, AzureCosmosClient.useUpsert, AzureCosmosClient.maxDegreeOfParallelism, AzureCosmosClient.maxBufferedItemCount, AzureCosmosClient.preferredPageSize); CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(uri).key(primaryKey) .throttlingRetryOptions(retryOptions).consistencyLevel(consistencyLevel).userAgentSuffix(userAgent); if (useGateway) { builder = builder.gatewayMode(gatewayConnectionConfig); } else { builder = builder.directMode(directConnectionConfig); } AzureCosmosClient.client = builder.buildClient(); LOGGER.info("Azure Cosmos DB connection created to {}", uri); } catch (IllegalArgumentException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException("Illegal argument passed in. Check the format of your parameters.", e); } AzureCosmosClient.containerCache = new ConcurrentHashMap<>(); // Verify the database exists try { AzureCosmosClient.database = AzureCosmosClient.client.getDatabase(databaseName); AzureCosmosClient.database.read(); } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException( "Invalid database name (" + AzureCosmosClient.databaseName + ") or failed to read database.", e); } } private String getStringProperty(String propertyName, String defaultValue) { return getProperties().getProperty(propertyName, defaultValue); } private boolean getBooleanProperty(String propertyName, boolean defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } return Boolean.parseBoolean(stringVal); } private int getIntProperty(String propertyName, int defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } try { return Integer.parseInt(stringVal); } catch (NumberFormatException e) { return defaultValue; } } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { synchronized (INIT_COUNT) { if (INIT_COUNT.decrementAndGet() <= 0 && AzureCosmosClient.client != null) { try { AzureCosmosClient.client.close(); } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Could not close DocumentClient", e); } finally { AzureCosmosClient.client = null; } } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); ObjectNode node = response.getItem(); Map<String, String> stringResults = new HashMap<>(node.size()); if (fields == null) { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } StringByteIterator.putAllAsByteIterators(result, stringResults); } else { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); if (fields.contains(pair.getKey())) { stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } } StringByteIterator.putAllAsByteIterators(result, stringResults); } return Status.OK; } catch (CosmosException e) { LOGGER.error("Failed to read key {} in collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); return Status.NOT_FOUND; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set * field/value pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { CosmosQueryRequestOptions queryOptions = new CosmosQueryRequestOptions(); queryOptions.setMaxDegreeOfParallelism(AzureCosmosClient.maxDegreeOfParallelism); queryOptions.setMaxBufferedItemCount(AzureCosmosClient.maxBufferedItemCount); CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } List<SqlParameter> paramList = new ArrayList<>(); paramList.add(new SqlParameter("@startkey", startkey)); SqlQuerySpec querySpec = new SqlQuerySpec( this.createSelectTop(fields, recordcount) + " FROM root r WHERE r.id >= @startkey", paramList); CosmosPagedIterable<ObjectNode> pagedIterable = container.queryItems(querySpec, queryOptions, ObjectNode.class); Iterator<FeedResponse<ObjectNode>> pageIterator = pagedIterable .iterableByPage(AzureCosmosClient.preferredPageSize).iterator(); while (pageIterator.hasNext()) { List<ObjectNode> pageDocs = pageIterator.next().getResults(); for (ObjectNode doc : pageDocs) { Map<String, String> stringResults = new HashMap<>(doc.size()); Iterator<Map.Entry<String, JsonNode>> nodeIterator = doc.fields(); while (nodeIterator.hasNext()) { Entry<String, JsonNode> pair = nodeIterator.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } HashMap<String, ByteIterator> byteResults = new HashMap<>(doc.size()); StringByteIterator.putAllAsByteIterators(byteResults, stringResults); result.add(byteResults); } } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to query key {} from collection {} in database {}", startkey, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key, * overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { String readEtag = ""; // Azure Cosmos DB does not have patch support. Until then, we need to read // the document, update it, and then write it back. // This could be made more efficient by using a stored procedure // and doing the read/modify write on the server side. Perhaps // that will be a future improvement. for (int attempt = 0; attempt < NUM_UPDATE_ATTEMPTS; attempt++) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); readEtag = response.getETag(); ObjectNode node = response.getItem(); for (Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); requestOptions.setIfMatchETag(readEtag); PartitionKey pk = new PartitionKey(key); container.replaceItem(node, key, pk, requestOptions); return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to update key {} to collection {} in database {} on attempt {}", key, table, AzureCosmosClient.databaseName, attempt, e); } } return Status.ERROR; } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>} @Override public Status delete(String table, String key) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Delete key {} from table {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } container.deleteItem(key, new PartitionKey(key), new CosmosItemRequestOptions()); return Status.OK; } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to delete key {} in collection {}", key, table, e); } return Status.ERROR; } private String createSelectTop(Set<String> fields, int top) { if (fields == null) { return "SELECT TOP " + top + " * "; } else { StringBuilder result = new StringBuilder("SELECT TOP ").append(top).append(" "); int initLength = result.length(); for (String field : fields) { if (result.length() != initLength) { result.append(", "); } result.append("r['").append(field).append("'] "); } return result.toString(); } } }
if (LOGGER.isDebugEnabled()) { LOGGER.debug("Insert key: {} into table: {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } PartitionKey pk = new PartitionKey(key); ObjectNode node = OBJECT_MAPPER.createObjectNode(); node.put("id", key); for (Map.Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } if (AzureCosmosClient.useUpsert) { container.upsertItem(node, pk, new CosmosItemRequestOptions()); } else { container.createItem(node, pk, new CosmosItemRequestOptions()); } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to insert key {} to collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); } return Status.ERROR;
brianfrankcooper_YCSB
YCSB/azurecosmos/src/main/java/site/ycsb/db/AzureCosmosClient.java
AzureCosmosClient
delete
class AzureCosmosClient extends DB { protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); // Default configuration values private static final ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.SESSION; private static final String DEFAULT_DATABASE_NAME = "ycsb"; private static final boolean DEFAULT_USE_GATEWAY = false; private static final boolean DEFAULT_USE_UPSERT = false; private static final int DEFAULT_MAX_DEGREE_OF_PARALLELISM = -1; private static final int DEFAULT_MAX_BUFFERED_ITEM_COUNT = 0; private static final int DEFAULT_PREFERRED_PAGE_SIZE = -1; public static final int NUM_UPDATE_ATTEMPTS = 4; private static final boolean DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG = false; private static final String DEFAULT_USER_AGENT = "azurecosmos-ycsb"; private static final Logger LOGGER = LoggerFactory.getLogger(AzureCosmosClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static CosmosClient client; private static CosmosDatabase database; private static String databaseName; private static boolean useUpsert; private static int maxDegreeOfParallelism; private static int maxBufferedItemCount; private static int preferredPageSize; private static boolean includeExceptionStackInLog; private static Map<String, CosmosContainer> containerCache; private static String userAgent; @Override public void init() throws DBException { INIT_COUNT.incrementAndGet(); synchronized (INIT_COUNT) { if (client != null) { return; } try { initAzureCosmosClient(); } catch (Exception e) { throw new DBException(e); } } } private void initAzureCosmosClient() throws DBException { // Connection properties String primaryKey = this.getStringProperty("azurecosmos.primaryKey", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing primary key required to connect to the database."); } String uri = this.getStringProperty("azurecosmos.uri", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing uri required to connect to the database."); } AzureCosmosClient.userAgent = this.getStringProperty("azurecosmos.userAgent", DEFAULT_USER_AGENT); AzureCosmosClient.useUpsert = this.getBooleanProperty("azurecosmos.useUpsert", DEFAULT_USE_UPSERT); AzureCosmosClient.databaseName = this.getStringProperty("azurecosmos.databaseName", DEFAULT_DATABASE_NAME); AzureCosmosClient.maxDegreeOfParallelism = this.getIntProperty("azurecosmos.maxDegreeOfParallelism", DEFAULT_MAX_DEGREE_OF_PARALLELISM); AzureCosmosClient.maxBufferedItemCount = this.getIntProperty("azurecosmos.maxBufferedItemCount", DEFAULT_MAX_BUFFERED_ITEM_COUNT); AzureCosmosClient.preferredPageSize = this.getIntProperty("azurecosmos.preferredPageSize", DEFAULT_PREFERRED_PAGE_SIZE); AzureCosmosClient.includeExceptionStackInLog = this.getBooleanProperty("azurecosmos.includeExceptionStackInLog", DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG); ConsistencyLevel consistencyLevel = ConsistencyLevel.valueOf( this.getStringProperty("azurecosmos.consistencyLevel", DEFAULT_CONSISTENCY_LEVEL.toString().toUpperCase())); boolean useGateway = this.getBooleanProperty("azurecosmos.useGateway", DEFAULT_USE_GATEWAY); ThrottlingRetryOptions retryOptions = new ThrottlingRetryOptions(); int maxRetryAttemptsOnThrottledRequests = this.getIntProperty("azurecosmos.maxRetryAttemptsOnThrottledRequests", -1); if (maxRetryAttemptsOnThrottledRequests != -1) { retryOptions.setMaxRetryAttemptsOnThrottledRequests(maxRetryAttemptsOnThrottledRequests); } // Direct connection config options. DirectConnectionConfig directConnectionConfig = new DirectConnectionConfig(); int directMaxConnectionsPerEndpoint = this.getIntProperty("azurecosmos.directMaxConnectionsPerEndpoint", -1); if (directMaxConnectionsPerEndpoint != -1) { directConnectionConfig.setMaxConnectionsPerEndpoint(directMaxConnectionsPerEndpoint); } int directIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.directIdleConnectionTimeoutInSeconds", -1); if (directIdleConnectionTimeoutInSeconds != -1) { directConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(directIdleConnectionTimeoutInSeconds)); } // Gateway connection config options. GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); int gatewayMaxConnectionPoolSize = this.getIntProperty("azurecosmos.gatewayMaxConnectionPoolSize", -1); if (gatewayMaxConnectionPoolSize != -1) { gatewayConnectionConfig.setMaxConnectionPoolSize(gatewayMaxConnectionPoolSize); } int gatewayIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.gatewayIdleConnectionTimeoutInSeconds", -1); if (gatewayIdleConnectionTimeoutInSeconds != -1) { gatewayConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(gatewayIdleConnectionTimeoutInSeconds)); } try { LOGGER.info( "Creating Cosmos DB client {}, useGateway={}, consistencyLevel={}," + " maxRetryAttemptsOnThrottledRequests={}, maxRetryWaitTimeInSeconds={}" + " useUpsert={}, maxDegreeOfParallelism={}, maxBufferedItemCount={}, preferredPageSize={}", uri, useGateway, consistencyLevel.toString(), retryOptions.getMaxRetryAttemptsOnThrottledRequests(), retryOptions.getMaxRetryWaitTime().toMillis() / 1000, AzureCosmosClient.useUpsert, AzureCosmosClient.maxDegreeOfParallelism, AzureCosmosClient.maxBufferedItemCount, AzureCosmosClient.preferredPageSize); CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(uri).key(primaryKey) .throttlingRetryOptions(retryOptions).consistencyLevel(consistencyLevel).userAgentSuffix(userAgent); if (useGateway) { builder = builder.gatewayMode(gatewayConnectionConfig); } else { builder = builder.directMode(directConnectionConfig); } AzureCosmosClient.client = builder.buildClient(); LOGGER.info("Azure Cosmos DB connection created to {}", uri); } catch (IllegalArgumentException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException("Illegal argument passed in. Check the format of your parameters.", e); } AzureCosmosClient.containerCache = new ConcurrentHashMap<>(); // Verify the database exists try { AzureCosmosClient.database = AzureCosmosClient.client.getDatabase(databaseName); AzureCosmosClient.database.read(); } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException( "Invalid database name (" + AzureCosmosClient.databaseName + ") or failed to read database.", e); } } private String getStringProperty(String propertyName, String defaultValue) { return getProperties().getProperty(propertyName, defaultValue); } private boolean getBooleanProperty(String propertyName, boolean defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } return Boolean.parseBoolean(stringVal); } private int getIntProperty(String propertyName, int defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } try { return Integer.parseInt(stringVal); } catch (NumberFormatException e) { return defaultValue; } } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { synchronized (INIT_COUNT) { if (INIT_COUNT.decrementAndGet() <= 0 && AzureCosmosClient.client != null) { try { AzureCosmosClient.client.close(); } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Could not close DocumentClient", e); } finally { AzureCosmosClient.client = null; } } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); ObjectNode node = response.getItem(); Map<String, String> stringResults = new HashMap<>(node.size()); if (fields == null) { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } StringByteIterator.putAllAsByteIterators(result, stringResults); } else { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); if (fields.contains(pair.getKey())) { stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } } StringByteIterator.putAllAsByteIterators(result, stringResults); } return Status.OK; } catch (CosmosException e) { LOGGER.error("Failed to read key {} in collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); return Status.NOT_FOUND; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set * field/value pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { CosmosQueryRequestOptions queryOptions = new CosmosQueryRequestOptions(); queryOptions.setMaxDegreeOfParallelism(AzureCosmosClient.maxDegreeOfParallelism); queryOptions.setMaxBufferedItemCount(AzureCosmosClient.maxBufferedItemCount); CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } List<SqlParameter> paramList = new ArrayList<>(); paramList.add(new SqlParameter("@startkey", startkey)); SqlQuerySpec querySpec = new SqlQuerySpec( this.createSelectTop(fields, recordcount) + " FROM root r WHERE r.id >= @startkey", paramList); CosmosPagedIterable<ObjectNode> pagedIterable = container.queryItems(querySpec, queryOptions, ObjectNode.class); Iterator<FeedResponse<ObjectNode>> pageIterator = pagedIterable .iterableByPage(AzureCosmosClient.preferredPageSize).iterator(); while (pageIterator.hasNext()) { List<ObjectNode> pageDocs = pageIterator.next().getResults(); for (ObjectNode doc : pageDocs) { Map<String, String> stringResults = new HashMap<>(doc.size()); Iterator<Map.Entry<String, JsonNode>> nodeIterator = doc.fields(); while (nodeIterator.hasNext()) { Entry<String, JsonNode> pair = nodeIterator.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } HashMap<String, ByteIterator> byteResults = new HashMap<>(doc.size()); StringByteIterator.putAllAsByteIterators(byteResults, stringResults); result.add(byteResults); } } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to query key {} from collection {} in database {}", startkey, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key, * overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { String readEtag = ""; // Azure Cosmos DB does not have patch support. Until then, we need to read // the document, update it, and then write it back. // This could be made more efficient by using a stored procedure // and doing the read/modify write on the server side. Perhaps // that will be a future improvement. for (int attempt = 0; attempt < NUM_UPDATE_ATTEMPTS; attempt++) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); readEtag = response.getETag(); ObjectNode node = response.getItem(); for (Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); requestOptions.setIfMatchETag(readEtag); PartitionKey pk = new PartitionKey(key); container.replaceItem(node, key, pk, requestOptions); return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to update key {} to collection {} in database {} on attempt {}", key, table, AzureCosmosClient.databaseName, attempt, e); } } return Status.ERROR; } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Insert key: {} into table: {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } PartitionKey pk = new PartitionKey(key); ObjectNode node = OBJECT_MAPPER.createObjectNode(); node.put("id", key); for (Map.Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } if (AzureCosmosClient.useUpsert) { container.upsertItem(node, pk, new CosmosItemRequestOptions()); } else { container.createItem(node, pk, new CosmosItemRequestOptions()); } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to insert key {} to collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } @Override public Status delete(String table, String key) {<FILL_FUNCTION_BODY>} private String createSelectTop(Set<String> fields, int top) { if (fields == null) { return "SELECT TOP " + top + " * "; } else { StringBuilder result = new StringBuilder("SELECT TOP ").append(top).append(" "); int initLength = result.length(); for (String field : fields) { if (result.length() != initLength) { result.append(", "); } result.append("r['").append(field).append("'] "); } return result.toString(); } } }
if (LOGGER.isDebugEnabled()) { LOGGER.debug("Delete key {} from table {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } container.deleteItem(key, new PartitionKey(key), new CosmosItemRequestOptions()); return Status.OK; } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to delete key {} in collection {}", key, table, e); } return Status.ERROR;
brianfrankcooper_YCSB
YCSB/azurecosmos/src/main/java/site/ycsb/db/AzureCosmosClient.java
AzureCosmosClient
createSelectTop
class AzureCosmosClient extends DB { protected static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); // Default configuration values private static final ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = ConsistencyLevel.SESSION; private static final String DEFAULT_DATABASE_NAME = "ycsb"; private static final boolean DEFAULT_USE_GATEWAY = false; private static final boolean DEFAULT_USE_UPSERT = false; private static final int DEFAULT_MAX_DEGREE_OF_PARALLELISM = -1; private static final int DEFAULT_MAX_BUFFERED_ITEM_COUNT = 0; private static final int DEFAULT_PREFERRED_PAGE_SIZE = -1; public static final int NUM_UPDATE_ATTEMPTS = 4; private static final boolean DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG = false; private static final String DEFAULT_USER_AGENT = "azurecosmos-ycsb"; private static final Logger LOGGER = LoggerFactory.getLogger(AzureCosmosClient.class); /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static CosmosClient client; private static CosmosDatabase database; private static String databaseName; private static boolean useUpsert; private static int maxDegreeOfParallelism; private static int maxBufferedItemCount; private static int preferredPageSize; private static boolean includeExceptionStackInLog; private static Map<String, CosmosContainer> containerCache; private static String userAgent; @Override public void init() throws DBException { INIT_COUNT.incrementAndGet(); synchronized (INIT_COUNT) { if (client != null) { return; } try { initAzureCosmosClient(); } catch (Exception e) { throw new DBException(e); } } } private void initAzureCosmosClient() throws DBException { // Connection properties String primaryKey = this.getStringProperty("azurecosmos.primaryKey", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing primary key required to connect to the database."); } String uri = this.getStringProperty("azurecosmos.uri", null); if (primaryKey == null || primaryKey.isEmpty()) { throw new DBException("Missing uri required to connect to the database."); } AzureCosmosClient.userAgent = this.getStringProperty("azurecosmos.userAgent", DEFAULT_USER_AGENT); AzureCosmosClient.useUpsert = this.getBooleanProperty("azurecosmos.useUpsert", DEFAULT_USE_UPSERT); AzureCosmosClient.databaseName = this.getStringProperty("azurecosmos.databaseName", DEFAULT_DATABASE_NAME); AzureCosmosClient.maxDegreeOfParallelism = this.getIntProperty("azurecosmos.maxDegreeOfParallelism", DEFAULT_MAX_DEGREE_OF_PARALLELISM); AzureCosmosClient.maxBufferedItemCount = this.getIntProperty("azurecosmos.maxBufferedItemCount", DEFAULT_MAX_BUFFERED_ITEM_COUNT); AzureCosmosClient.preferredPageSize = this.getIntProperty("azurecosmos.preferredPageSize", DEFAULT_PREFERRED_PAGE_SIZE); AzureCosmosClient.includeExceptionStackInLog = this.getBooleanProperty("azurecosmos.includeExceptionStackInLog", DEFAULT_INCLUDE_EXCEPTION_STACK_IN_LOG); ConsistencyLevel consistencyLevel = ConsistencyLevel.valueOf( this.getStringProperty("azurecosmos.consistencyLevel", DEFAULT_CONSISTENCY_LEVEL.toString().toUpperCase())); boolean useGateway = this.getBooleanProperty("azurecosmos.useGateway", DEFAULT_USE_GATEWAY); ThrottlingRetryOptions retryOptions = new ThrottlingRetryOptions(); int maxRetryAttemptsOnThrottledRequests = this.getIntProperty("azurecosmos.maxRetryAttemptsOnThrottledRequests", -1); if (maxRetryAttemptsOnThrottledRequests != -1) { retryOptions.setMaxRetryAttemptsOnThrottledRequests(maxRetryAttemptsOnThrottledRequests); } // Direct connection config options. DirectConnectionConfig directConnectionConfig = new DirectConnectionConfig(); int directMaxConnectionsPerEndpoint = this.getIntProperty("azurecosmos.directMaxConnectionsPerEndpoint", -1); if (directMaxConnectionsPerEndpoint != -1) { directConnectionConfig.setMaxConnectionsPerEndpoint(directMaxConnectionsPerEndpoint); } int directIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.directIdleConnectionTimeoutInSeconds", -1); if (directIdleConnectionTimeoutInSeconds != -1) { directConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(directIdleConnectionTimeoutInSeconds)); } // Gateway connection config options. GatewayConnectionConfig gatewayConnectionConfig = new GatewayConnectionConfig(); int gatewayMaxConnectionPoolSize = this.getIntProperty("azurecosmos.gatewayMaxConnectionPoolSize", -1); if (gatewayMaxConnectionPoolSize != -1) { gatewayConnectionConfig.setMaxConnectionPoolSize(gatewayMaxConnectionPoolSize); } int gatewayIdleConnectionTimeoutInSeconds = this.getIntProperty("azurecosmos.gatewayIdleConnectionTimeoutInSeconds", -1); if (gatewayIdleConnectionTimeoutInSeconds != -1) { gatewayConnectionConfig.setIdleConnectionTimeout(Duration.ofSeconds(gatewayIdleConnectionTimeoutInSeconds)); } try { LOGGER.info( "Creating Cosmos DB client {}, useGateway={}, consistencyLevel={}," + " maxRetryAttemptsOnThrottledRequests={}, maxRetryWaitTimeInSeconds={}" + " useUpsert={}, maxDegreeOfParallelism={}, maxBufferedItemCount={}, preferredPageSize={}", uri, useGateway, consistencyLevel.toString(), retryOptions.getMaxRetryAttemptsOnThrottledRequests(), retryOptions.getMaxRetryWaitTime().toMillis() / 1000, AzureCosmosClient.useUpsert, AzureCosmosClient.maxDegreeOfParallelism, AzureCosmosClient.maxBufferedItemCount, AzureCosmosClient.preferredPageSize); CosmosClientBuilder builder = new CosmosClientBuilder().endpoint(uri).key(primaryKey) .throttlingRetryOptions(retryOptions).consistencyLevel(consistencyLevel).userAgentSuffix(userAgent); if (useGateway) { builder = builder.gatewayMode(gatewayConnectionConfig); } else { builder = builder.directMode(directConnectionConfig); } AzureCosmosClient.client = builder.buildClient(); LOGGER.info("Azure Cosmos DB connection created to {}", uri); } catch (IllegalArgumentException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException("Illegal argument passed in. Check the format of your parameters.", e); } AzureCosmosClient.containerCache = new ConcurrentHashMap<>(); // Verify the database exists try { AzureCosmosClient.database = AzureCosmosClient.client.getDatabase(databaseName); AzureCosmosClient.database.read(); } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } throw new DBException( "Invalid database name (" + AzureCosmosClient.databaseName + ") or failed to read database.", e); } } private String getStringProperty(String propertyName, String defaultValue) { return getProperties().getProperty(propertyName, defaultValue); } private boolean getBooleanProperty(String propertyName, boolean defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } return Boolean.parseBoolean(stringVal); } private int getIntProperty(String propertyName, int defaultValue) { String stringVal = getProperties().getProperty(propertyName, null); if (stringVal == null) { return defaultValue; } try { return Integer.parseInt(stringVal); } catch (NumberFormatException e) { return defaultValue; } } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { synchronized (INIT_COUNT) { if (INIT_COUNT.decrementAndGet() <= 0 && AzureCosmosClient.client != null) { try { AzureCosmosClient.client.close(); } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Could not close DocumentClient", e); } finally { AzureCosmosClient.client = null; } } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); ObjectNode node = response.getItem(); Map<String, String> stringResults = new HashMap<>(node.size()); if (fields == null) { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } StringByteIterator.putAllAsByteIterators(result, stringResults); } else { Iterator<Map.Entry<String, JsonNode>> iter = node.fields(); while (iter.hasNext()) { Entry<String, JsonNode> pair = iter.next(); if (fields.contains(pair.getKey())) { stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } } StringByteIterator.putAllAsByteIterators(result, stringResults); } return Status.OK; } catch (CosmosException e) { LOGGER.error("Failed to read key {} in collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); return Status.NOT_FOUND; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set * field/value pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { CosmosQueryRequestOptions queryOptions = new CosmosQueryRequestOptions(); queryOptions.setMaxDegreeOfParallelism(AzureCosmosClient.maxDegreeOfParallelism); queryOptions.setMaxBufferedItemCount(AzureCosmosClient.maxBufferedItemCount); CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } List<SqlParameter> paramList = new ArrayList<>(); paramList.add(new SqlParameter("@startkey", startkey)); SqlQuerySpec querySpec = new SqlQuerySpec( this.createSelectTop(fields, recordcount) + " FROM root r WHERE r.id >= @startkey", paramList); CosmosPagedIterable<ObjectNode> pagedIterable = container.queryItems(querySpec, queryOptions, ObjectNode.class); Iterator<FeedResponse<ObjectNode>> pageIterator = pagedIterable .iterableByPage(AzureCosmosClient.preferredPageSize).iterator(); while (pageIterator.hasNext()) { List<ObjectNode> pageDocs = pageIterator.next().getResults(); for (ObjectNode doc : pageDocs) { Map<String, String> stringResults = new HashMap<>(doc.size()); Iterator<Map.Entry<String, JsonNode>> nodeIterator = doc.fields(); while (nodeIterator.hasNext()) { Entry<String, JsonNode> pair = nodeIterator.next(); stringResults.put(pair.getKey().toString(), pair.getValue().toString()); } HashMap<String, ByteIterator> byteResults = new HashMap<>(doc.size()); StringByteIterator.putAllAsByteIterators(byteResults, stringResults); result.add(byteResults); } } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to query key {} from collection {} in database {}", startkey, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key, * overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { String readEtag = ""; // Azure Cosmos DB does not have patch support. Until then, we need to read // the document, update it, and then write it back. // This could be made more efficient by using a stored procedure // and doing the read/modify write on the server side. Perhaps // that will be a future improvement. for (int attempt = 0; attempt < NUM_UPDATE_ATTEMPTS; attempt++) { try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } CosmosItemResponse<ObjectNode> response = container.readItem(key, new PartitionKey(key), ObjectNode.class); readEtag = response.getETag(); ObjectNode node = response.getItem(); for (Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } CosmosItemRequestOptions requestOptions = new CosmosItemRequestOptions(); requestOptions.setIfMatchETag(readEtag); PartitionKey pk = new PartitionKey(key); container.replaceItem(node, key, pk, requestOptions); return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to update key {} to collection {} in database {} on attempt {}", key, table, AzureCosmosClient.databaseName, attempt, e); } } return Status.ERROR; } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Insert key: {} into table: {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } PartitionKey pk = new PartitionKey(key); ObjectNode node = OBJECT_MAPPER.createObjectNode(); node.put("id", key); for (Map.Entry<String, ByteIterator> pair : values.entrySet()) { node.put(pair.getKey(), pair.getValue().toString()); } if (AzureCosmosClient.useUpsert) { container.upsertItem(node, pk, new CosmosItemRequestOptions()); } else { container.createItem(node, pk, new CosmosItemRequestOptions()); } return Status.OK; } catch (CosmosException e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to insert key {} to collection {} in database {}", key, table, AzureCosmosClient.databaseName, e); } return Status.ERROR; } @Override public Status delete(String table, String key) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Delete key {} from table {}", key, table); } try { CosmosContainer container = AzureCosmosClient.containerCache.get(table); if (container == null) { container = AzureCosmosClient.database.getContainer(table); AzureCosmosClient.containerCache.put(table, container); } container.deleteItem(key, new PartitionKey(key), new CosmosItemRequestOptions()); return Status.OK; } catch (Exception e) { if (!AzureCosmosClient.includeExceptionStackInLog) { e = null; } LOGGER.error("Failed to delete key {} in collection {}", key, table, e); } return Status.ERROR; } private String createSelectTop(Set<String> fields, int top) {<FILL_FUNCTION_BODY>} }
if (fields == null) { return "SELECT TOP " + top + " * "; } else { StringBuilder result = new StringBuilder("SELECT TOP ").append(top).append(" "); int initLength = result.length(); for (String field : fields) { if (result.length() != initLength) { result.append(", "); } result.append("r['").append(field).append("'] "); } return result.toString(); }
brianfrankcooper_YCSB
YCSB/azuretablestorage/src/main/java/site/ycsb/db/azuretablestorage/AzureClient.java
AzureClient
init
class AzureClient extends DB { public static final String PROTOCOL = "azure.protocal"; public static final String PROTOCOL_DEFAULT = "https"; public static final String TABLE_ENDPOINT = "azure.endpoint"; public static final String ACCOUNT = "azure.account"; public static final String KEY = "azure.key"; public static final String TABLE = "azure.table"; public static final String TABLE_DEFAULT = "usertable"; public static final String PARTITIONKEY = "azure.partitionkey"; public static final String PARTITIONKEY_DEFAULT = "Test"; public static final String BATCHSIZE = "azure.batchsize"; public static final String BATCHSIZE_DEFAULT = "1"; private static final int BATCHSIZE_UPPERBOUND = 100; private static final TableBatchOperation BATCH_OPERATION = new TableBatchOperation(); private static String partitionKey; private CloudStorageAccount storageAccount = null; private CloudTableClient tableClient = null; private CloudTable cloudTable = null; private static int batchSize; private static int curIdx = 0; @Override public void init() throws DBException {<FILL_FUNCTION_BODY>} @Override public void cleanup() { } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { if (fields != null) { return readSubset(key, fields, result); } else { return readEntity(key, result); } } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { String whereStr = String.format("(PartitionKey eq '%s') and (RowKey ge '%s')", partitionKey, startkey); TableQuery<DynamicTableEntity> scanQuery = new TableQuery<DynamicTableEntity>(DynamicTableEntity.class) .where(whereStr).take(recordcount); int cnt = 0; for (DynamicTableEntity entity : cloudTable.execute(scanQuery)) { HashMap<String, EntityProperty> properties = entity.getProperties(); HashMap<String, ByteIterator> cur = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); if (fields == null || fields.contains(fieldName)) { cur.put(fieldName, fieldVal); } } result.add(cur); if (++cnt == recordcount) { break; } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { return insertOrUpdate(key, values); } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (batchSize == 1) { return insertOrUpdate(key, values); } else { return insertBatch(key, values); } } @Override public Status delete(String table, String key) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, TableServiceEntity.class); TableServiceEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); // secondly, delete the entity TableOperation deleteOp = TableOperation.delete(entity); cloudTable.execute(deleteOp); return Status.OK; } catch (Exception e) { return Status.ERROR; } } private String getStorageConnectionString(String protocol, String account, String key, String tableEndPoint) { String res = String.format("DefaultEndpointsProtocol=%s;AccountName=%s;AccountKey=%s", protocol, account, key); if (tableEndPoint != null) { res = String.format("%s;TableEndpoint=%s", res, tableEndPoint); } return res; } /* * Read subset of properties instead of full fields with projection. */ public Status readSubset(String key, Set<String> fields, Map<String, ByteIterator> result) { String whereStr = String.format("RowKey eq '%s'", key); TableQuery<TableServiceEntity> projectionQuery = TableQuery.from( TableServiceEntity.class).where(whereStr).select(fields.toArray(new String[0])); EntityResolver<HashMap<String, ByteIterator>> resolver = new EntityResolver<HashMap<String, ByteIterator>>() { public HashMap<String, ByteIterator> resolve(String partitionkey, String rowKey, Date timeStamp, HashMap<String, EntityProperty> properties, String etag) { HashMap<String, ByteIterator> tmp = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String key = entry.getKey(); ByteIterator val = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); tmp.put(key, val); } return tmp; } }; try { for (HashMap<String, ByteIterator> tmp : cloudTable.execute(projectionQuery, resolver)) { for (Entry<String, ByteIterator> entry : tmp.entrySet()){ String fieldName = entry.getKey(); ByteIterator fieldVal = entry.getValue(); result.put(fieldName, fieldVal); } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status readEntity(String key, Map<String, ByteIterator> result) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, DynamicTableEntity.class); DynamicTableEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); HashMap<String, EntityProperty> properties = entity.getProperties(); for (Entry<String, EntityProperty> entry: properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); result.put(fieldName, fieldVal); } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status insertBatch(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); BATCH_OPERATION.insertOrReplace(entity); if (++curIdx == batchSize) { try { cloudTable.execute(BATCH_OPERATION); BATCH_OPERATION.clear(); curIdx = 0; } catch (Exception e) { return Status.ERROR; } } return Status.OK; } private Status insertOrUpdate(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); TableOperation insertOrReplace = TableOperation.insertOrReplace(entity); try { cloudTable.execute(insertOrReplace); return Status.OK; } catch (Exception e) { return Status.ERROR; } } }
Properties props = getProperties(); String protocol = props.getProperty(PROTOCOL, PROTOCOL_DEFAULT); if (protocol != "https" && protocol != "http") { throw new DBException("Protocol must be 'http' or 'https'!\n"); } String table = props.getProperty(TABLE, TABLE_DEFAULT); partitionKey = props.getProperty(PARTITIONKEY, PARTITIONKEY_DEFAULT); batchSize = Integer.parseInt(props.getProperty(BATCHSIZE, BATCHSIZE_DEFAULT)); if (batchSize < 1 || batchSize > BATCHSIZE_UPPERBOUND) { throw new DBException(String.format("Batchsize must be between 1 and %d!\n", BATCHSIZE_UPPERBOUND)); } String account = props.getProperty(ACCOUNT); String key = props.getProperty(KEY); String tableEndPoint = props.getProperty(TABLE_ENDPOINT); String storageConnectionString = getStorageConnectionString(protocol, account, key, tableEndPoint); try { storageAccount = CloudStorageAccount.parse(storageConnectionString); } catch (Exception e) { throw new DBException("Could not connect to the account.\n", e); } tableClient = storageAccount.createCloudTableClient(); try { cloudTable = tableClient.getTableReference(table); cloudTable.createIfNotExists(); } catch (Exception e) { throw new DBException("Could not connect to the table.\n", e); }
brianfrankcooper_YCSB
YCSB/azuretablestorage/src/main/java/site/ycsb/db/azuretablestorage/AzureClient.java
AzureClient
cleanup
class AzureClient extends DB { public static final String PROTOCOL = "azure.protocal"; public static final String PROTOCOL_DEFAULT = "https"; public static final String TABLE_ENDPOINT = "azure.endpoint"; public static final String ACCOUNT = "azure.account"; public static final String KEY = "azure.key"; public static final String TABLE = "azure.table"; public static final String TABLE_DEFAULT = "usertable"; public static final String PARTITIONKEY = "azure.partitionkey"; public static final String PARTITIONKEY_DEFAULT = "Test"; public static final String BATCHSIZE = "azure.batchsize"; public static final String BATCHSIZE_DEFAULT = "1"; private static final int BATCHSIZE_UPPERBOUND = 100; private static final TableBatchOperation BATCH_OPERATION = new TableBatchOperation(); private static String partitionKey; private CloudStorageAccount storageAccount = null; private CloudTableClient tableClient = null; private CloudTable cloudTable = null; private static int batchSize; private static int curIdx = 0; @Override public void init() throws DBException { Properties props = getProperties(); String protocol = props.getProperty(PROTOCOL, PROTOCOL_DEFAULT); if (protocol != "https" && protocol != "http") { throw new DBException("Protocol must be 'http' or 'https'!\n"); } String table = props.getProperty(TABLE, TABLE_DEFAULT); partitionKey = props.getProperty(PARTITIONKEY, PARTITIONKEY_DEFAULT); batchSize = Integer.parseInt(props.getProperty(BATCHSIZE, BATCHSIZE_DEFAULT)); if (batchSize < 1 || batchSize > BATCHSIZE_UPPERBOUND) { throw new DBException(String.format("Batchsize must be between 1 and %d!\n", BATCHSIZE_UPPERBOUND)); } String account = props.getProperty(ACCOUNT); String key = props.getProperty(KEY); String tableEndPoint = props.getProperty(TABLE_ENDPOINT); String storageConnectionString = getStorageConnectionString(protocol, account, key, tableEndPoint); try { storageAccount = CloudStorageAccount.parse(storageConnectionString); } catch (Exception e) { throw new DBException("Could not connect to the account.\n", e); } tableClient = storageAccount.createCloudTableClient(); try { cloudTable = tableClient.getTableReference(table); cloudTable.createIfNotExists(); } catch (Exception e) { throw new DBException("Could not connect to the table.\n", e); } } @Override public void cleanup() {<FILL_FUNCTION_BODY>} @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { if (fields != null) { return readSubset(key, fields, result); } else { return readEntity(key, result); } } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { String whereStr = String.format("(PartitionKey eq '%s') and (RowKey ge '%s')", partitionKey, startkey); TableQuery<DynamicTableEntity> scanQuery = new TableQuery<DynamicTableEntity>(DynamicTableEntity.class) .where(whereStr).take(recordcount); int cnt = 0; for (DynamicTableEntity entity : cloudTable.execute(scanQuery)) { HashMap<String, EntityProperty> properties = entity.getProperties(); HashMap<String, ByteIterator> cur = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); if (fields == null || fields.contains(fieldName)) { cur.put(fieldName, fieldVal); } } result.add(cur); if (++cnt == recordcount) { break; } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { return insertOrUpdate(key, values); } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (batchSize == 1) { return insertOrUpdate(key, values); } else { return insertBatch(key, values); } } @Override public Status delete(String table, String key) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, TableServiceEntity.class); TableServiceEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); // secondly, delete the entity TableOperation deleteOp = TableOperation.delete(entity); cloudTable.execute(deleteOp); return Status.OK; } catch (Exception e) { return Status.ERROR; } } private String getStorageConnectionString(String protocol, String account, String key, String tableEndPoint) { String res = String.format("DefaultEndpointsProtocol=%s;AccountName=%s;AccountKey=%s", protocol, account, key); if (tableEndPoint != null) { res = String.format("%s;TableEndpoint=%s", res, tableEndPoint); } return res; } /* * Read subset of properties instead of full fields with projection. */ public Status readSubset(String key, Set<String> fields, Map<String, ByteIterator> result) { String whereStr = String.format("RowKey eq '%s'", key); TableQuery<TableServiceEntity> projectionQuery = TableQuery.from( TableServiceEntity.class).where(whereStr).select(fields.toArray(new String[0])); EntityResolver<HashMap<String, ByteIterator>> resolver = new EntityResolver<HashMap<String, ByteIterator>>() { public HashMap<String, ByteIterator> resolve(String partitionkey, String rowKey, Date timeStamp, HashMap<String, EntityProperty> properties, String etag) { HashMap<String, ByteIterator> tmp = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String key = entry.getKey(); ByteIterator val = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); tmp.put(key, val); } return tmp; } }; try { for (HashMap<String, ByteIterator> tmp : cloudTable.execute(projectionQuery, resolver)) { for (Entry<String, ByteIterator> entry : tmp.entrySet()){ String fieldName = entry.getKey(); ByteIterator fieldVal = entry.getValue(); result.put(fieldName, fieldVal); } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status readEntity(String key, Map<String, ByteIterator> result) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, DynamicTableEntity.class); DynamicTableEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); HashMap<String, EntityProperty> properties = entity.getProperties(); for (Entry<String, EntityProperty> entry: properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); result.put(fieldName, fieldVal); } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status insertBatch(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); BATCH_OPERATION.insertOrReplace(entity); if (++curIdx == batchSize) { try { cloudTable.execute(BATCH_OPERATION); BATCH_OPERATION.clear(); curIdx = 0; } catch (Exception e) { return Status.ERROR; } } return Status.OK; } private Status insertOrUpdate(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); TableOperation insertOrReplace = TableOperation.insertOrReplace(entity); try { cloudTable.execute(insertOrReplace); return Status.OK; } catch (Exception e) { return Status.ERROR; } } }
brianfrankcooper_YCSB
YCSB/azuretablestorage/src/main/java/site/ycsb/db/azuretablestorage/AzureClient.java
AzureClient
read
class AzureClient extends DB { public static final String PROTOCOL = "azure.protocal"; public static final String PROTOCOL_DEFAULT = "https"; public static final String TABLE_ENDPOINT = "azure.endpoint"; public static final String ACCOUNT = "azure.account"; public static final String KEY = "azure.key"; public static final String TABLE = "azure.table"; public static final String TABLE_DEFAULT = "usertable"; public static final String PARTITIONKEY = "azure.partitionkey"; public static final String PARTITIONKEY_DEFAULT = "Test"; public static final String BATCHSIZE = "azure.batchsize"; public static final String BATCHSIZE_DEFAULT = "1"; private static final int BATCHSIZE_UPPERBOUND = 100; private static final TableBatchOperation BATCH_OPERATION = new TableBatchOperation(); private static String partitionKey; private CloudStorageAccount storageAccount = null; private CloudTableClient tableClient = null; private CloudTable cloudTable = null; private static int batchSize; private static int curIdx = 0; @Override public void init() throws DBException { Properties props = getProperties(); String protocol = props.getProperty(PROTOCOL, PROTOCOL_DEFAULT); if (protocol != "https" && protocol != "http") { throw new DBException("Protocol must be 'http' or 'https'!\n"); } String table = props.getProperty(TABLE, TABLE_DEFAULT); partitionKey = props.getProperty(PARTITIONKEY, PARTITIONKEY_DEFAULT); batchSize = Integer.parseInt(props.getProperty(BATCHSIZE, BATCHSIZE_DEFAULT)); if (batchSize < 1 || batchSize > BATCHSIZE_UPPERBOUND) { throw new DBException(String.format("Batchsize must be between 1 and %d!\n", BATCHSIZE_UPPERBOUND)); } String account = props.getProperty(ACCOUNT); String key = props.getProperty(KEY); String tableEndPoint = props.getProperty(TABLE_ENDPOINT); String storageConnectionString = getStorageConnectionString(protocol, account, key, tableEndPoint); try { storageAccount = CloudStorageAccount.parse(storageConnectionString); } catch (Exception e) { throw new DBException("Could not connect to the account.\n", e); } tableClient = storageAccount.createCloudTableClient(); try { cloudTable = tableClient.getTableReference(table); cloudTable.createIfNotExists(); } catch (Exception e) { throw new DBException("Could not connect to the table.\n", e); } } @Override public void cleanup() { } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {<FILL_FUNCTION_BODY>} @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { String whereStr = String.format("(PartitionKey eq '%s') and (RowKey ge '%s')", partitionKey, startkey); TableQuery<DynamicTableEntity> scanQuery = new TableQuery<DynamicTableEntity>(DynamicTableEntity.class) .where(whereStr).take(recordcount); int cnt = 0; for (DynamicTableEntity entity : cloudTable.execute(scanQuery)) { HashMap<String, EntityProperty> properties = entity.getProperties(); HashMap<String, ByteIterator> cur = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); if (fields == null || fields.contains(fieldName)) { cur.put(fieldName, fieldVal); } } result.add(cur); if (++cnt == recordcount) { break; } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { return insertOrUpdate(key, values); } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (batchSize == 1) { return insertOrUpdate(key, values); } else { return insertBatch(key, values); } } @Override public Status delete(String table, String key) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, TableServiceEntity.class); TableServiceEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); // secondly, delete the entity TableOperation deleteOp = TableOperation.delete(entity); cloudTable.execute(deleteOp); return Status.OK; } catch (Exception e) { return Status.ERROR; } } private String getStorageConnectionString(String protocol, String account, String key, String tableEndPoint) { String res = String.format("DefaultEndpointsProtocol=%s;AccountName=%s;AccountKey=%s", protocol, account, key); if (tableEndPoint != null) { res = String.format("%s;TableEndpoint=%s", res, tableEndPoint); } return res; } /* * Read subset of properties instead of full fields with projection. */ public Status readSubset(String key, Set<String> fields, Map<String, ByteIterator> result) { String whereStr = String.format("RowKey eq '%s'", key); TableQuery<TableServiceEntity> projectionQuery = TableQuery.from( TableServiceEntity.class).where(whereStr).select(fields.toArray(new String[0])); EntityResolver<HashMap<String, ByteIterator>> resolver = new EntityResolver<HashMap<String, ByteIterator>>() { public HashMap<String, ByteIterator> resolve(String partitionkey, String rowKey, Date timeStamp, HashMap<String, EntityProperty> properties, String etag) { HashMap<String, ByteIterator> tmp = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String key = entry.getKey(); ByteIterator val = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); tmp.put(key, val); } return tmp; } }; try { for (HashMap<String, ByteIterator> tmp : cloudTable.execute(projectionQuery, resolver)) { for (Entry<String, ByteIterator> entry : tmp.entrySet()){ String fieldName = entry.getKey(); ByteIterator fieldVal = entry.getValue(); result.put(fieldName, fieldVal); } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status readEntity(String key, Map<String, ByteIterator> result) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, DynamicTableEntity.class); DynamicTableEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); HashMap<String, EntityProperty> properties = entity.getProperties(); for (Entry<String, EntityProperty> entry: properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); result.put(fieldName, fieldVal); } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status insertBatch(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); BATCH_OPERATION.insertOrReplace(entity); if (++curIdx == batchSize) { try { cloudTable.execute(BATCH_OPERATION); BATCH_OPERATION.clear(); curIdx = 0; } catch (Exception e) { return Status.ERROR; } } return Status.OK; } private Status insertOrUpdate(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); TableOperation insertOrReplace = TableOperation.insertOrReplace(entity); try { cloudTable.execute(insertOrReplace); return Status.OK; } catch (Exception e) { return Status.ERROR; } } }
if (fields != null) { return readSubset(key, fields, result); } else { return readEntity(key, result); }
brianfrankcooper_YCSB
YCSB/azuretablestorage/src/main/java/site/ycsb/db/azuretablestorage/AzureClient.java
AzureClient
scan
class AzureClient extends DB { public static final String PROTOCOL = "azure.protocal"; public static final String PROTOCOL_DEFAULT = "https"; public static final String TABLE_ENDPOINT = "azure.endpoint"; public static final String ACCOUNT = "azure.account"; public static final String KEY = "azure.key"; public static final String TABLE = "azure.table"; public static final String TABLE_DEFAULT = "usertable"; public static final String PARTITIONKEY = "azure.partitionkey"; public static final String PARTITIONKEY_DEFAULT = "Test"; public static final String BATCHSIZE = "azure.batchsize"; public static final String BATCHSIZE_DEFAULT = "1"; private static final int BATCHSIZE_UPPERBOUND = 100; private static final TableBatchOperation BATCH_OPERATION = new TableBatchOperation(); private static String partitionKey; private CloudStorageAccount storageAccount = null; private CloudTableClient tableClient = null; private CloudTable cloudTable = null; private static int batchSize; private static int curIdx = 0; @Override public void init() throws DBException { Properties props = getProperties(); String protocol = props.getProperty(PROTOCOL, PROTOCOL_DEFAULT); if (protocol != "https" && protocol != "http") { throw new DBException("Protocol must be 'http' or 'https'!\n"); } String table = props.getProperty(TABLE, TABLE_DEFAULT); partitionKey = props.getProperty(PARTITIONKEY, PARTITIONKEY_DEFAULT); batchSize = Integer.parseInt(props.getProperty(BATCHSIZE, BATCHSIZE_DEFAULT)); if (batchSize < 1 || batchSize > BATCHSIZE_UPPERBOUND) { throw new DBException(String.format("Batchsize must be between 1 and %d!\n", BATCHSIZE_UPPERBOUND)); } String account = props.getProperty(ACCOUNT); String key = props.getProperty(KEY); String tableEndPoint = props.getProperty(TABLE_ENDPOINT); String storageConnectionString = getStorageConnectionString(protocol, account, key, tableEndPoint); try { storageAccount = CloudStorageAccount.parse(storageConnectionString); } catch (Exception e) { throw new DBException("Could not connect to the account.\n", e); } tableClient = storageAccount.createCloudTableClient(); try { cloudTable = tableClient.getTableReference(table); cloudTable.createIfNotExists(); } catch (Exception e) { throw new DBException("Could not connect to the table.\n", e); } } @Override public void cleanup() { } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { if (fields != null) { return readSubset(key, fields, result); } else { return readEntity(key, result); } } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {<FILL_FUNCTION_BODY>} @Override public Status update(String table, String key, Map<String, ByteIterator> values) { return insertOrUpdate(key, values); } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (batchSize == 1) { return insertOrUpdate(key, values); } else { return insertBatch(key, values); } } @Override public Status delete(String table, String key) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, TableServiceEntity.class); TableServiceEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); // secondly, delete the entity TableOperation deleteOp = TableOperation.delete(entity); cloudTable.execute(deleteOp); return Status.OK; } catch (Exception e) { return Status.ERROR; } } private String getStorageConnectionString(String protocol, String account, String key, String tableEndPoint) { String res = String.format("DefaultEndpointsProtocol=%s;AccountName=%s;AccountKey=%s", protocol, account, key); if (tableEndPoint != null) { res = String.format("%s;TableEndpoint=%s", res, tableEndPoint); } return res; } /* * Read subset of properties instead of full fields with projection. */ public Status readSubset(String key, Set<String> fields, Map<String, ByteIterator> result) { String whereStr = String.format("RowKey eq '%s'", key); TableQuery<TableServiceEntity> projectionQuery = TableQuery.from( TableServiceEntity.class).where(whereStr).select(fields.toArray(new String[0])); EntityResolver<HashMap<String, ByteIterator>> resolver = new EntityResolver<HashMap<String, ByteIterator>>() { public HashMap<String, ByteIterator> resolve(String partitionkey, String rowKey, Date timeStamp, HashMap<String, EntityProperty> properties, String etag) { HashMap<String, ByteIterator> tmp = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String key = entry.getKey(); ByteIterator val = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); tmp.put(key, val); } return tmp; } }; try { for (HashMap<String, ByteIterator> tmp : cloudTable.execute(projectionQuery, resolver)) { for (Entry<String, ByteIterator> entry : tmp.entrySet()){ String fieldName = entry.getKey(); ByteIterator fieldVal = entry.getValue(); result.put(fieldName, fieldVal); } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status readEntity(String key, Map<String, ByteIterator> result) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, DynamicTableEntity.class); DynamicTableEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); HashMap<String, EntityProperty> properties = entity.getProperties(); for (Entry<String, EntityProperty> entry: properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); result.put(fieldName, fieldVal); } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status insertBatch(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); BATCH_OPERATION.insertOrReplace(entity); if (++curIdx == batchSize) { try { cloudTable.execute(BATCH_OPERATION); BATCH_OPERATION.clear(); curIdx = 0; } catch (Exception e) { return Status.ERROR; } } return Status.OK; } private Status insertOrUpdate(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); TableOperation insertOrReplace = TableOperation.insertOrReplace(entity); try { cloudTable.execute(insertOrReplace); return Status.OK; } catch (Exception e) { return Status.ERROR; } } }
try { String whereStr = String.format("(PartitionKey eq '%s') and (RowKey ge '%s')", partitionKey, startkey); TableQuery<DynamicTableEntity> scanQuery = new TableQuery<DynamicTableEntity>(DynamicTableEntity.class) .where(whereStr).take(recordcount); int cnt = 0; for (DynamicTableEntity entity : cloudTable.execute(scanQuery)) { HashMap<String, EntityProperty> properties = entity.getProperties(); HashMap<String, ByteIterator> cur = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); if (fields == null || fields.contains(fieldName)) { cur.put(fieldName, fieldVal); } } result.add(cur); if (++cnt == recordcount) { break; } } return Status.OK; } catch (Exception e) { return Status.ERROR; }
brianfrankcooper_YCSB
YCSB/azuretablestorage/src/main/java/site/ycsb/db/azuretablestorage/AzureClient.java
AzureClient
update
class AzureClient extends DB { public static final String PROTOCOL = "azure.protocal"; public static final String PROTOCOL_DEFAULT = "https"; public static final String TABLE_ENDPOINT = "azure.endpoint"; public static final String ACCOUNT = "azure.account"; public static final String KEY = "azure.key"; public static final String TABLE = "azure.table"; public static final String TABLE_DEFAULT = "usertable"; public static final String PARTITIONKEY = "azure.partitionkey"; public static final String PARTITIONKEY_DEFAULT = "Test"; public static final String BATCHSIZE = "azure.batchsize"; public static final String BATCHSIZE_DEFAULT = "1"; private static final int BATCHSIZE_UPPERBOUND = 100; private static final TableBatchOperation BATCH_OPERATION = new TableBatchOperation(); private static String partitionKey; private CloudStorageAccount storageAccount = null; private CloudTableClient tableClient = null; private CloudTable cloudTable = null; private static int batchSize; private static int curIdx = 0; @Override public void init() throws DBException { Properties props = getProperties(); String protocol = props.getProperty(PROTOCOL, PROTOCOL_DEFAULT); if (protocol != "https" && protocol != "http") { throw new DBException("Protocol must be 'http' or 'https'!\n"); } String table = props.getProperty(TABLE, TABLE_DEFAULT); partitionKey = props.getProperty(PARTITIONKEY, PARTITIONKEY_DEFAULT); batchSize = Integer.parseInt(props.getProperty(BATCHSIZE, BATCHSIZE_DEFAULT)); if (batchSize < 1 || batchSize > BATCHSIZE_UPPERBOUND) { throw new DBException(String.format("Batchsize must be between 1 and %d!\n", BATCHSIZE_UPPERBOUND)); } String account = props.getProperty(ACCOUNT); String key = props.getProperty(KEY); String tableEndPoint = props.getProperty(TABLE_ENDPOINT); String storageConnectionString = getStorageConnectionString(protocol, account, key, tableEndPoint); try { storageAccount = CloudStorageAccount.parse(storageConnectionString); } catch (Exception e) { throw new DBException("Could not connect to the account.\n", e); } tableClient = storageAccount.createCloudTableClient(); try { cloudTable = tableClient.getTableReference(table); cloudTable.createIfNotExists(); } catch (Exception e) { throw new DBException("Could not connect to the table.\n", e); } } @Override public void cleanup() { } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { if (fields != null) { return readSubset(key, fields, result); } else { return readEntity(key, result); } } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { String whereStr = String.format("(PartitionKey eq '%s') and (RowKey ge '%s')", partitionKey, startkey); TableQuery<DynamicTableEntity> scanQuery = new TableQuery<DynamicTableEntity>(DynamicTableEntity.class) .where(whereStr).take(recordcount); int cnt = 0; for (DynamicTableEntity entity : cloudTable.execute(scanQuery)) { HashMap<String, EntityProperty> properties = entity.getProperties(); HashMap<String, ByteIterator> cur = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); if (fields == null || fields.contains(fieldName)) { cur.put(fieldName, fieldVal); } } result.add(cur); if (++cnt == recordcount) { break; } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>} @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (batchSize == 1) { return insertOrUpdate(key, values); } else { return insertBatch(key, values); } } @Override public Status delete(String table, String key) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, TableServiceEntity.class); TableServiceEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); // secondly, delete the entity TableOperation deleteOp = TableOperation.delete(entity); cloudTable.execute(deleteOp); return Status.OK; } catch (Exception e) { return Status.ERROR; } } private String getStorageConnectionString(String protocol, String account, String key, String tableEndPoint) { String res = String.format("DefaultEndpointsProtocol=%s;AccountName=%s;AccountKey=%s", protocol, account, key); if (tableEndPoint != null) { res = String.format("%s;TableEndpoint=%s", res, tableEndPoint); } return res; } /* * Read subset of properties instead of full fields with projection. */ public Status readSubset(String key, Set<String> fields, Map<String, ByteIterator> result) { String whereStr = String.format("RowKey eq '%s'", key); TableQuery<TableServiceEntity> projectionQuery = TableQuery.from( TableServiceEntity.class).where(whereStr).select(fields.toArray(new String[0])); EntityResolver<HashMap<String, ByteIterator>> resolver = new EntityResolver<HashMap<String, ByteIterator>>() { public HashMap<String, ByteIterator> resolve(String partitionkey, String rowKey, Date timeStamp, HashMap<String, EntityProperty> properties, String etag) { HashMap<String, ByteIterator> tmp = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String key = entry.getKey(); ByteIterator val = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); tmp.put(key, val); } return tmp; } }; try { for (HashMap<String, ByteIterator> tmp : cloudTable.execute(projectionQuery, resolver)) { for (Entry<String, ByteIterator> entry : tmp.entrySet()){ String fieldName = entry.getKey(); ByteIterator fieldVal = entry.getValue(); result.put(fieldName, fieldVal); } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status readEntity(String key, Map<String, ByteIterator> result) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, DynamicTableEntity.class); DynamicTableEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); HashMap<String, EntityProperty> properties = entity.getProperties(); for (Entry<String, EntityProperty> entry: properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); result.put(fieldName, fieldVal); } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status insertBatch(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); BATCH_OPERATION.insertOrReplace(entity); if (++curIdx == batchSize) { try { cloudTable.execute(BATCH_OPERATION); BATCH_OPERATION.clear(); curIdx = 0; } catch (Exception e) { return Status.ERROR; } } return Status.OK; } private Status insertOrUpdate(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); TableOperation insertOrReplace = TableOperation.insertOrReplace(entity); try { cloudTable.execute(insertOrReplace); return Status.OK; } catch (Exception e) { return Status.ERROR; } } }
return insertOrUpdate(key, values);
brianfrankcooper_YCSB
YCSB/azuretablestorage/src/main/java/site/ycsb/db/azuretablestorage/AzureClient.java
AzureClient
insert
class AzureClient extends DB { public static final String PROTOCOL = "azure.protocal"; public static final String PROTOCOL_DEFAULT = "https"; public static final String TABLE_ENDPOINT = "azure.endpoint"; public static final String ACCOUNT = "azure.account"; public static final String KEY = "azure.key"; public static final String TABLE = "azure.table"; public static final String TABLE_DEFAULT = "usertable"; public static final String PARTITIONKEY = "azure.partitionkey"; public static final String PARTITIONKEY_DEFAULT = "Test"; public static final String BATCHSIZE = "azure.batchsize"; public static final String BATCHSIZE_DEFAULT = "1"; private static final int BATCHSIZE_UPPERBOUND = 100; private static final TableBatchOperation BATCH_OPERATION = new TableBatchOperation(); private static String partitionKey; private CloudStorageAccount storageAccount = null; private CloudTableClient tableClient = null; private CloudTable cloudTable = null; private static int batchSize; private static int curIdx = 0; @Override public void init() throws DBException { Properties props = getProperties(); String protocol = props.getProperty(PROTOCOL, PROTOCOL_DEFAULT); if (protocol != "https" && protocol != "http") { throw new DBException("Protocol must be 'http' or 'https'!\n"); } String table = props.getProperty(TABLE, TABLE_DEFAULT); partitionKey = props.getProperty(PARTITIONKEY, PARTITIONKEY_DEFAULT); batchSize = Integer.parseInt(props.getProperty(BATCHSIZE, BATCHSIZE_DEFAULT)); if (batchSize < 1 || batchSize > BATCHSIZE_UPPERBOUND) { throw new DBException(String.format("Batchsize must be between 1 and %d!\n", BATCHSIZE_UPPERBOUND)); } String account = props.getProperty(ACCOUNT); String key = props.getProperty(KEY); String tableEndPoint = props.getProperty(TABLE_ENDPOINT); String storageConnectionString = getStorageConnectionString(protocol, account, key, tableEndPoint); try { storageAccount = CloudStorageAccount.parse(storageConnectionString); } catch (Exception e) { throw new DBException("Could not connect to the account.\n", e); } tableClient = storageAccount.createCloudTableClient(); try { cloudTable = tableClient.getTableReference(table); cloudTable.createIfNotExists(); } catch (Exception e) { throw new DBException("Could not connect to the table.\n", e); } } @Override public void cleanup() { } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { if (fields != null) { return readSubset(key, fields, result); } else { return readEntity(key, result); } } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { String whereStr = String.format("(PartitionKey eq '%s') and (RowKey ge '%s')", partitionKey, startkey); TableQuery<DynamicTableEntity> scanQuery = new TableQuery<DynamicTableEntity>(DynamicTableEntity.class) .where(whereStr).take(recordcount); int cnt = 0; for (DynamicTableEntity entity : cloudTable.execute(scanQuery)) { HashMap<String, EntityProperty> properties = entity.getProperties(); HashMap<String, ByteIterator> cur = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); if (fields == null || fields.contains(fieldName)) { cur.put(fieldName, fieldVal); } } result.add(cur); if (++cnt == recordcount) { break; } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { return insertOrUpdate(key, values); } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>} @Override public Status delete(String table, String key) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, TableServiceEntity.class); TableServiceEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); // secondly, delete the entity TableOperation deleteOp = TableOperation.delete(entity); cloudTable.execute(deleteOp); return Status.OK; } catch (Exception e) { return Status.ERROR; } } private String getStorageConnectionString(String protocol, String account, String key, String tableEndPoint) { String res = String.format("DefaultEndpointsProtocol=%s;AccountName=%s;AccountKey=%s", protocol, account, key); if (tableEndPoint != null) { res = String.format("%s;TableEndpoint=%s", res, tableEndPoint); } return res; } /* * Read subset of properties instead of full fields with projection. */ public Status readSubset(String key, Set<String> fields, Map<String, ByteIterator> result) { String whereStr = String.format("RowKey eq '%s'", key); TableQuery<TableServiceEntity> projectionQuery = TableQuery.from( TableServiceEntity.class).where(whereStr).select(fields.toArray(new String[0])); EntityResolver<HashMap<String, ByteIterator>> resolver = new EntityResolver<HashMap<String, ByteIterator>>() { public HashMap<String, ByteIterator> resolve(String partitionkey, String rowKey, Date timeStamp, HashMap<String, EntityProperty> properties, String etag) { HashMap<String, ByteIterator> tmp = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String key = entry.getKey(); ByteIterator val = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); tmp.put(key, val); } return tmp; } }; try { for (HashMap<String, ByteIterator> tmp : cloudTable.execute(projectionQuery, resolver)) { for (Entry<String, ByteIterator> entry : tmp.entrySet()){ String fieldName = entry.getKey(); ByteIterator fieldVal = entry.getValue(); result.put(fieldName, fieldVal); } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status readEntity(String key, Map<String, ByteIterator> result) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, DynamicTableEntity.class); DynamicTableEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); HashMap<String, EntityProperty> properties = entity.getProperties(); for (Entry<String, EntityProperty> entry: properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); result.put(fieldName, fieldVal); } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status insertBatch(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); BATCH_OPERATION.insertOrReplace(entity); if (++curIdx == batchSize) { try { cloudTable.execute(BATCH_OPERATION); BATCH_OPERATION.clear(); curIdx = 0; } catch (Exception e) { return Status.ERROR; } } return Status.OK; } private Status insertOrUpdate(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); TableOperation insertOrReplace = TableOperation.insertOrReplace(entity); try { cloudTable.execute(insertOrReplace); return Status.OK; } catch (Exception e) { return Status.ERROR; } } }
if (batchSize == 1) { return insertOrUpdate(key, values); } else { return insertBatch(key, values); }
brianfrankcooper_YCSB
YCSB/azuretablestorage/src/main/java/site/ycsb/db/azuretablestorage/AzureClient.java
AzureClient
delete
class AzureClient extends DB { public static final String PROTOCOL = "azure.protocal"; public static final String PROTOCOL_DEFAULT = "https"; public static final String TABLE_ENDPOINT = "azure.endpoint"; public static final String ACCOUNT = "azure.account"; public static final String KEY = "azure.key"; public static final String TABLE = "azure.table"; public static final String TABLE_DEFAULT = "usertable"; public static final String PARTITIONKEY = "azure.partitionkey"; public static final String PARTITIONKEY_DEFAULT = "Test"; public static final String BATCHSIZE = "azure.batchsize"; public static final String BATCHSIZE_DEFAULT = "1"; private static final int BATCHSIZE_UPPERBOUND = 100; private static final TableBatchOperation BATCH_OPERATION = new TableBatchOperation(); private static String partitionKey; private CloudStorageAccount storageAccount = null; private CloudTableClient tableClient = null; private CloudTable cloudTable = null; private static int batchSize; private static int curIdx = 0; @Override public void init() throws DBException { Properties props = getProperties(); String protocol = props.getProperty(PROTOCOL, PROTOCOL_DEFAULT); if (protocol != "https" && protocol != "http") { throw new DBException("Protocol must be 'http' or 'https'!\n"); } String table = props.getProperty(TABLE, TABLE_DEFAULT); partitionKey = props.getProperty(PARTITIONKEY, PARTITIONKEY_DEFAULT); batchSize = Integer.parseInt(props.getProperty(BATCHSIZE, BATCHSIZE_DEFAULT)); if (batchSize < 1 || batchSize > BATCHSIZE_UPPERBOUND) { throw new DBException(String.format("Batchsize must be between 1 and %d!\n", BATCHSIZE_UPPERBOUND)); } String account = props.getProperty(ACCOUNT); String key = props.getProperty(KEY); String tableEndPoint = props.getProperty(TABLE_ENDPOINT); String storageConnectionString = getStorageConnectionString(protocol, account, key, tableEndPoint); try { storageAccount = CloudStorageAccount.parse(storageConnectionString); } catch (Exception e) { throw new DBException("Could not connect to the account.\n", e); } tableClient = storageAccount.createCloudTableClient(); try { cloudTable = tableClient.getTableReference(table); cloudTable.createIfNotExists(); } catch (Exception e) { throw new DBException("Could not connect to the table.\n", e); } } @Override public void cleanup() { } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { if (fields != null) { return readSubset(key, fields, result); } else { return readEntity(key, result); } } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { String whereStr = String.format("(PartitionKey eq '%s') and (RowKey ge '%s')", partitionKey, startkey); TableQuery<DynamicTableEntity> scanQuery = new TableQuery<DynamicTableEntity>(DynamicTableEntity.class) .where(whereStr).take(recordcount); int cnt = 0; for (DynamicTableEntity entity : cloudTable.execute(scanQuery)) { HashMap<String, EntityProperty> properties = entity.getProperties(); HashMap<String, ByteIterator> cur = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); if (fields == null || fields.contains(fieldName)) { cur.put(fieldName, fieldVal); } } result.add(cur); if (++cnt == recordcount) { break; } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { return insertOrUpdate(key, values); } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (batchSize == 1) { return insertOrUpdate(key, values); } else { return insertBatch(key, values); } } @Override public Status delete(String table, String key) {<FILL_FUNCTION_BODY>} private String getStorageConnectionString(String protocol, String account, String key, String tableEndPoint) { String res = String.format("DefaultEndpointsProtocol=%s;AccountName=%s;AccountKey=%s", protocol, account, key); if (tableEndPoint != null) { res = String.format("%s;TableEndpoint=%s", res, tableEndPoint); } return res; } /* * Read subset of properties instead of full fields with projection. */ public Status readSubset(String key, Set<String> fields, Map<String, ByteIterator> result) { String whereStr = String.format("RowKey eq '%s'", key); TableQuery<TableServiceEntity> projectionQuery = TableQuery.from( TableServiceEntity.class).where(whereStr).select(fields.toArray(new String[0])); EntityResolver<HashMap<String, ByteIterator>> resolver = new EntityResolver<HashMap<String, ByteIterator>>() { public HashMap<String, ByteIterator> resolve(String partitionkey, String rowKey, Date timeStamp, HashMap<String, EntityProperty> properties, String etag) { HashMap<String, ByteIterator> tmp = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String key = entry.getKey(); ByteIterator val = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); tmp.put(key, val); } return tmp; } }; try { for (HashMap<String, ByteIterator> tmp : cloudTable.execute(projectionQuery, resolver)) { for (Entry<String, ByteIterator> entry : tmp.entrySet()){ String fieldName = entry.getKey(); ByteIterator fieldVal = entry.getValue(); result.put(fieldName, fieldVal); } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status readEntity(String key, Map<String, ByteIterator> result) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, DynamicTableEntity.class); DynamicTableEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); HashMap<String, EntityProperty> properties = entity.getProperties(); for (Entry<String, EntityProperty> entry: properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); result.put(fieldName, fieldVal); } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status insertBatch(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); BATCH_OPERATION.insertOrReplace(entity); if (++curIdx == batchSize) { try { cloudTable.execute(BATCH_OPERATION); BATCH_OPERATION.clear(); curIdx = 0; } catch (Exception e) { return Status.ERROR; } } return Status.OK; } private Status insertOrUpdate(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); TableOperation insertOrReplace = TableOperation.insertOrReplace(entity); try { cloudTable.execute(insertOrReplace); return Status.OK; } catch (Exception e) { return Status.ERROR; } } }
try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, TableServiceEntity.class); TableServiceEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); // secondly, delete the entity TableOperation deleteOp = TableOperation.delete(entity); cloudTable.execute(deleteOp); return Status.OK; } catch (Exception e) { return Status.ERROR; }
brianfrankcooper_YCSB
YCSB/azuretablestorage/src/main/java/site/ycsb/db/azuretablestorage/AzureClient.java
AzureClient
getStorageConnectionString
class AzureClient extends DB { public static final String PROTOCOL = "azure.protocal"; public static final String PROTOCOL_DEFAULT = "https"; public static final String TABLE_ENDPOINT = "azure.endpoint"; public static final String ACCOUNT = "azure.account"; public static final String KEY = "azure.key"; public static final String TABLE = "azure.table"; public static final String TABLE_DEFAULT = "usertable"; public static final String PARTITIONKEY = "azure.partitionkey"; public static final String PARTITIONKEY_DEFAULT = "Test"; public static final String BATCHSIZE = "azure.batchsize"; public static final String BATCHSIZE_DEFAULT = "1"; private static final int BATCHSIZE_UPPERBOUND = 100; private static final TableBatchOperation BATCH_OPERATION = new TableBatchOperation(); private static String partitionKey; private CloudStorageAccount storageAccount = null; private CloudTableClient tableClient = null; private CloudTable cloudTable = null; private static int batchSize; private static int curIdx = 0; @Override public void init() throws DBException { Properties props = getProperties(); String protocol = props.getProperty(PROTOCOL, PROTOCOL_DEFAULT); if (protocol != "https" && protocol != "http") { throw new DBException("Protocol must be 'http' or 'https'!\n"); } String table = props.getProperty(TABLE, TABLE_DEFAULT); partitionKey = props.getProperty(PARTITIONKEY, PARTITIONKEY_DEFAULT); batchSize = Integer.parseInt(props.getProperty(BATCHSIZE, BATCHSIZE_DEFAULT)); if (batchSize < 1 || batchSize > BATCHSIZE_UPPERBOUND) { throw new DBException(String.format("Batchsize must be between 1 and %d!\n", BATCHSIZE_UPPERBOUND)); } String account = props.getProperty(ACCOUNT); String key = props.getProperty(KEY); String tableEndPoint = props.getProperty(TABLE_ENDPOINT); String storageConnectionString = getStorageConnectionString(protocol, account, key, tableEndPoint); try { storageAccount = CloudStorageAccount.parse(storageConnectionString); } catch (Exception e) { throw new DBException("Could not connect to the account.\n", e); } tableClient = storageAccount.createCloudTableClient(); try { cloudTable = tableClient.getTableReference(table); cloudTable.createIfNotExists(); } catch (Exception e) { throw new DBException("Could not connect to the table.\n", e); } } @Override public void cleanup() { } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { if (fields != null) { return readSubset(key, fields, result); } else { return readEntity(key, result); } } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { String whereStr = String.format("(PartitionKey eq '%s') and (RowKey ge '%s')", partitionKey, startkey); TableQuery<DynamicTableEntity> scanQuery = new TableQuery<DynamicTableEntity>(DynamicTableEntity.class) .where(whereStr).take(recordcount); int cnt = 0; for (DynamicTableEntity entity : cloudTable.execute(scanQuery)) { HashMap<String, EntityProperty> properties = entity.getProperties(); HashMap<String, ByteIterator> cur = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); if (fields == null || fields.contains(fieldName)) { cur.put(fieldName, fieldVal); } } result.add(cur); if (++cnt == recordcount) { break; } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { return insertOrUpdate(key, values); } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (batchSize == 1) { return insertOrUpdate(key, values); } else { return insertBatch(key, values); } } @Override public Status delete(String table, String key) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, TableServiceEntity.class); TableServiceEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); // secondly, delete the entity TableOperation deleteOp = TableOperation.delete(entity); cloudTable.execute(deleteOp); return Status.OK; } catch (Exception e) { return Status.ERROR; } } private String getStorageConnectionString(String protocol, String account, String key, String tableEndPoint) {<FILL_FUNCTION_BODY>} /* * Read subset of properties instead of full fields with projection. */ public Status readSubset(String key, Set<String> fields, Map<String, ByteIterator> result) { String whereStr = String.format("RowKey eq '%s'", key); TableQuery<TableServiceEntity> projectionQuery = TableQuery.from( TableServiceEntity.class).where(whereStr).select(fields.toArray(new String[0])); EntityResolver<HashMap<String, ByteIterator>> resolver = new EntityResolver<HashMap<String, ByteIterator>>() { public HashMap<String, ByteIterator> resolve(String partitionkey, String rowKey, Date timeStamp, HashMap<String, EntityProperty> properties, String etag) { HashMap<String, ByteIterator> tmp = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String key = entry.getKey(); ByteIterator val = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); tmp.put(key, val); } return tmp; } }; try { for (HashMap<String, ByteIterator> tmp : cloudTable.execute(projectionQuery, resolver)) { for (Entry<String, ByteIterator> entry : tmp.entrySet()){ String fieldName = entry.getKey(); ByteIterator fieldVal = entry.getValue(); result.put(fieldName, fieldVal); } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status readEntity(String key, Map<String, ByteIterator> result) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, DynamicTableEntity.class); DynamicTableEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); HashMap<String, EntityProperty> properties = entity.getProperties(); for (Entry<String, EntityProperty> entry: properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); result.put(fieldName, fieldVal); } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status insertBatch(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); BATCH_OPERATION.insertOrReplace(entity); if (++curIdx == batchSize) { try { cloudTable.execute(BATCH_OPERATION); BATCH_OPERATION.clear(); curIdx = 0; } catch (Exception e) { return Status.ERROR; } } return Status.OK; } private Status insertOrUpdate(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); TableOperation insertOrReplace = TableOperation.insertOrReplace(entity); try { cloudTable.execute(insertOrReplace); return Status.OK; } catch (Exception e) { return Status.ERROR; } } }
String res = String.format("DefaultEndpointsProtocol=%s;AccountName=%s;AccountKey=%s", protocol, account, key); if (tableEndPoint != null) { res = String.format("%s;TableEndpoint=%s", res, tableEndPoint); } return res;
brianfrankcooper_YCSB
YCSB/azuretablestorage/src/main/java/site/ycsb/db/azuretablestorage/AzureClient.java
AzureClient
readSubset
class AzureClient extends DB { public static final String PROTOCOL = "azure.protocal"; public static final String PROTOCOL_DEFAULT = "https"; public static final String TABLE_ENDPOINT = "azure.endpoint"; public static final String ACCOUNT = "azure.account"; public static final String KEY = "azure.key"; public static final String TABLE = "azure.table"; public static final String TABLE_DEFAULT = "usertable"; public static final String PARTITIONKEY = "azure.partitionkey"; public static final String PARTITIONKEY_DEFAULT = "Test"; public static final String BATCHSIZE = "azure.batchsize"; public static final String BATCHSIZE_DEFAULT = "1"; private static final int BATCHSIZE_UPPERBOUND = 100; private static final TableBatchOperation BATCH_OPERATION = new TableBatchOperation(); private static String partitionKey; private CloudStorageAccount storageAccount = null; private CloudTableClient tableClient = null; private CloudTable cloudTable = null; private static int batchSize; private static int curIdx = 0; @Override public void init() throws DBException { Properties props = getProperties(); String protocol = props.getProperty(PROTOCOL, PROTOCOL_DEFAULT); if (protocol != "https" && protocol != "http") { throw new DBException("Protocol must be 'http' or 'https'!\n"); } String table = props.getProperty(TABLE, TABLE_DEFAULT); partitionKey = props.getProperty(PARTITIONKEY, PARTITIONKEY_DEFAULT); batchSize = Integer.parseInt(props.getProperty(BATCHSIZE, BATCHSIZE_DEFAULT)); if (batchSize < 1 || batchSize > BATCHSIZE_UPPERBOUND) { throw new DBException(String.format("Batchsize must be between 1 and %d!\n", BATCHSIZE_UPPERBOUND)); } String account = props.getProperty(ACCOUNT); String key = props.getProperty(KEY); String tableEndPoint = props.getProperty(TABLE_ENDPOINT); String storageConnectionString = getStorageConnectionString(protocol, account, key, tableEndPoint); try { storageAccount = CloudStorageAccount.parse(storageConnectionString); } catch (Exception e) { throw new DBException("Could not connect to the account.\n", e); } tableClient = storageAccount.createCloudTableClient(); try { cloudTable = tableClient.getTableReference(table); cloudTable.createIfNotExists(); } catch (Exception e) { throw new DBException("Could not connect to the table.\n", e); } } @Override public void cleanup() { } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { if (fields != null) { return readSubset(key, fields, result); } else { return readEntity(key, result); } } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { String whereStr = String.format("(PartitionKey eq '%s') and (RowKey ge '%s')", partitionKey, startkey); TableQuery<DynamicTableEntity> scanQuery = new TableQuery<DynamicTableEntity>(DynamicTableEntity.class) .where(whereStr).take(recordcount); int cnt = 0; for (DynamicTableEntity entity : cloudTable.execute(scanQuery)) { HashMap<String, EntityProperty> properties = entity.getProperties(); HashMap<String, ByteIterator> cur = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); if (fields == null || fields.contains(fieldName)) { cur.put(fieldName, fieldVal); } } result.add(cur); if (++cnt == recordcount) { break; } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { return insertOrUpdate(key, values); } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (batchSize == 1) { return insertOrUpdate(key, values); } else { return insertBatch(key, values); } } @Override public Status delete(String table, String key) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, TableServiceEntity.class); TableServiceEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); // secondly, delete the entity TableOperation deleteOp = TableOperation.delete(entity); cloudTable.execute(deleteOp); return Status.OK; } catch (Exception e) { return Status.ERROR; } } private String getStorageConnectionString(String protocol, String account, String key, String tableEndPoint) { String res = String.format("DefaultEndpointsProtocol=%s;AccountName=%s;AccountKey=%s", protocol, account, key); if (tableEndPoint != null) { res = String.format("%s;TableEndpoint=%s", res, tableEndPoint); } return res; } /* * Read subset of properties instead of full fields with projection. */ public Status readSubset(String key, Set<String> fields, Map<String, ByteIterator> result) {<FILL_FUNCTION_BODY>} private Status readEntity(String key, Map<String, ByteIterator> result) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, DynamicTableEntity.class); DynamicTableEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); HashMap<String, EntityProperty> properties = entity.getProperties(); for (Entry<String, EntityProperty> entry: properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); result.put(fieldName, fieldVal); } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status insertBatch(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); BATCH_OPERATION.insertOrReplace(entity); if (++curIdx == batchSize) { try { cloudTable.execute(BATCH_OPERATION); BATCH_OPERATION.clear(); curIdx = 0; } catch (Exception e) { return Status.ERROR; } } return Status.OK; } private Status insertOrUpdate(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); TableOperation insertOrReplace = TableOperation.insertOrReplace(entity); try { cloudTable.execute(insertOrReplace); return Status.OK; } catch (Exception e) { return Status.ERROR; } } }
String whereStr = String.format("RowKey eq '%s'", key); TableQuery<TableServiceEntity> projectionQuery = TableQuery.from( TableServiceEntity.class).where(whereStr).select(fields.toArray(new String[0])); EntityResolver<HashMap<String, ByteIterator>> resolver = new EntityResolver<HashMap<String, ByteIterator>>() { public HashMap<String, ByteIterator> resolve(String partitionkey, String rowKey, Date timeStamp, HashMap<String, EntityProperty> properties, String etag) { HashMap<String, ByteIterator> tmp = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String key = entry.getKey(); ByteIterator val = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); tmp.put(key, val); } return tmp; } }; try { for (HashMap<String, ByteIterator> tmp : cloudTable.execute(projectionQuery, resolver)) { for (Entry<String, ByteIterator> entry : tmp.entrySet()){ String fieldName = entry.getKey(); ByteIterator fieldVal = entry.getValue(); result.put(fieldName, fieldVal); } } return Status.OK; } catch (Exception e) { return Status.ERROR; }
brianfrankcooper_YCSB
YCSB/azuretablestorage/src/main/java/site/ycsb/db/azuretablestorage/AzureClient.java
AzureClient
resolve
class AzureClient extends DB { public static final String PROTOCOL = "azure.protocal"; public static final String PROTOCOL_DEFAULT = "https"; public static final String TABLE_ENDPOINT = "azure.endpoint"; public static final String ACCOUNT = "azure.account"; public static final String KEY = "azure.key"; public static final String TABLE = "azure.table"; public static final String TABLE_DEFAULT = "usertable"; public static final String PARTITIONKEY = "azure.partitionkey"; public static final String PARTITIONKEY_DEFAULT = "Test"; public static final String BATCHSIZE = "azure.batchsize"; public static final String BATCHSIZE_DEFAULT = "1"; private static final int BATCHSIZE_UPPERBOUND = 100; private static final TableBatchOperation BATCH_OPERATION = new TableBatchOperation(); private static String partitionKey; private CloudStorageAccount storageAccount = null; private CloudTableClient tableClient = null; private CloudTable cloudTable = null; private static int batchSize; private static int curIdx = 0; @Override public void init() throws DBException { Properties props = getProperties(); String protocol = props.getProperty(PROTOCOL, PROTOCOL_DEFAULT); if (protocol != "https" && protocol != "http") { throw new DBException("Protocol must be 'http' or 'https'!\n"); } String table = props.getProperty(TABLE, TABLE_DEFAULT); partitionKey = props.getProperty(PARTITIONKEY, PARTITIONKEY_DEFAULT); batchSize = Integer.parseInt(props.getProperty(BATCHSIZE, BATCHSIZE_DEFAULT)); if (batchSize < 1 || batchSize > BATCHSIZE_UPPERBOUND) { throw new DBException(String.format("Batchsize must be between 1 and %d!\n", BATCHSIZE_UPPERBOUND)); } String account = props.getProperty(ACCOUNT); String key = props.getProperty(KEY); String tableEndPoint = props.getProperty(TABLE_ENDPOINT); String storageConnectionString = getStorageConnectionString(protocol, account, key, tableEndPoint); try { storageAccount = CloudStorageAccount.parse(storageConnectionString); } catch (Exception e) { throw new DBException("Could not connect to the account.\n", e); } tableClient = storageAccount.createCloudTableClient(); try { cloudTable = tableClient.getTableReference(table); cloudTable.createIfNotExists(); } catch (Exception e) { throw new DBException("Could not connect to the table.\n", e); } } @Override public void cleanup() { } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { if (fields != null) { return readSubset(key, fields, result); } else { return readEntity(key, result); } } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { String whereStr = String.format("(PartitionKey eq '%s') and (RowKey ge '%s')", partitionKey, startkey); TableQuery<DynamicTableEntity> scanQuery = new TableQuery<DynamicTableEntity>(DynamicTableEntity.class) .where(whereStr).take(recordcount); int cnt = 0; for (DynamicTableEntity entity : cloudTable.execute(scanQuery)) { HashMap<String, EntityProperty> properties = entity.getProperties(); HashMap<String, ByteIterator> cur = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); if (fields == null || fields.contains(fieldName)) { cur.put(fieldName, fieldVal); } } result.add(cur); if (++cnt == recordcount) { break; } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { return insertOrUpdate(key, values); } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (batchSize == 1) { return insertOrUpdate(key, values); } else { return insertBatch(key, values); } } @Override public Status delete(String table, String key) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, TableServiceEntity.class); TableServiceEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); // secondly, delete the entity TableOperation deleteOp = TableOperation.delete(entity); cloudTable.execute(deleteOp); return Status.OK; } catch (Exception e) { return Status.ERROR; } } private String getStorageConnectionString(String protocol, String account, String key, String tableEndPoint) { String res = String.format("DefaultEndpointsProtocol=%s;AccountName=%s;AccountKey=%s", protocol, account, key); if (tableEndPoint != null) { res = String.format("%s;TableEndpoint=%s", res, tableEndPoint); } return res; } /* * Read subset of properties instead of full fields with projection. */ public Status readSubset(String key, Set<String> fields, Map<String, ByteIterator> result) { String whereStr = String.format("RowKey eq '%s'", key); TableQuery<TableServiceEntity> projectionQuery = TableQuery.from( TableServiceEntity.class).where(whereStr).select(fields.toArray(new String[0])); EntityResolver<HashMap<String, ByteIterator>> resolver = new EntityResolver<HashMap<String, ByteIterator>>() { public HashMap<String, ByteIterator> resolve(String partitionkey, String rowKey, Date timeStamp, HashMap<String, EntityProperty> properties, String etag) {<FILL_FUNCTION_BODY>} }; try { for (HashMap<String, ByteIterator> tmp : cloudTable.execute(projectionQuery, resolver)) { for (Entry<String, ByteIterator> entry : tmp.entrySet()){ String fieldName = entry.getKey(); ByteIterator fieldVal = entry.getValue(); result.put(fieldName, fieldVal); } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status readEntity(String key, Map<String, ByteIterator> result) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, DynamicTableEntity.class); DynamicTableEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); HashMap<String, EntityProperty> properties = entity.getProperties(); for (Entry<String, EntityProperty> entry: properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); result.put(fieldName, fieldVal); } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status insertBatch(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); BATCH_OPERATION.insertOrReplace(entity); if (++curIdx == batchSize) { try { cloudTable.execute(BATCH_OPERATION); BATCH_OPERATION.clear(); curIdx = 0; } catch (Exception e) { return Status.ERROR; } } return Status.OK; } private Status insertOrUpdate(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); TableOperation insertOrReplace = TableOperation.insertOrReplace(entity); try { cloudTable.execute(insertOrReplace); return Status.OK; } catch (Exception e) { return Status.ERROR; } } }
HashMap<String, ByteIterator> tmp = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String key = entry.getKey(); ByteIterator val = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); tmp.put(key, val); } return tmp;
brianfrankcooper_YCSB
YCSB/azuretablestorage/src/main/java/site/ycsb/db/azuretablestorage/AzureClient.java
AzureClient
readEntity
class AzureClient extends DB { public static final String PROTOCOL = "azure.protocal"; public static final String PROTOCOL_DEFAULT = "https"; public static final String TABLE_ENDPOINT = "azure.endpoint"; public static final String ACCOUNT = "azure.account"; public static final String KEY = "azure.key"; public static final String TABLE = "azure.table"; public static final String TABLE_DEFAULT = "usertable"; public static final String PARTITIONKEY = "azure.partitionkey"; public static final String PARTITIONKEY_DEFAULT = "Test"; public static final String BATCHSIZE = "azure.batchsize"; public static final String BATCHSIZE_DEFAULT = "1"; private static final int BATCHSIZE_UPPERBOUND = 100; private static final TableBatchOperation BATCH_OPERATION = new TableBatchOperation(); private static String partitionKey; private CloudStorageAccount storageAccount = null; private CloudTableClient tableClient = null; private CloudTable cloudTable = null; private static int batchSize; private static int curIdx = 0; @Override public void init() throws DBException { Properties props = getProperties(); String protocol = props.getProperty(PROTOCOL, PROTOCOL_DEFAULT); if (protocol != "https" && protocol != "http") { throw new DBException("Protocol must be 'http' or 'https'!\n"); } String table = props.getProperty(TABLE, TABLE_DEFAULT); partitionKey = props.getProperty(PARTITIONKEY, PARTITIONKEY_DEFAULT); batchSize = Integer.parseInt(props.getProperty(BATCHSIZE, BATCHSIZE_DEFAULT)); if (batchSize < 1 || batchSize > BATCHSIZE_UPPERBOUND) { throw new DBException(String.format("Batchsize must be between 1 and %d!\n", BATCHSIZE_UPPERBOUND)); } String account = props.getProperty(ACCOUNT); String key = props.getProperty(KEY); String tableEndPoint = props.getProperty(TABLE_ENDPOINT); String storageConnectionString = getStorageConnectionString(protocol, account, key, tableEndPoint); try { storageAccount = CloudStorageAccount.parse(storageConnectionString); } catch (Exception e) { throw new DBException("Could not connect to the account.\n", e); } tableClient = storageAccount.createCloudTableClient(); try { cloudTable = tableClient.getTableReference(table); cloudTable.createIfNotExists(); } catch (Exception e) { throw new DBException("Could not connect to the table.\n", e); } } @Override public void cleanup() { } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { if (fields != null) { return readSubset(key, fields, result); } else { return readEntity(key, result); } } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { String whereStr = String.format("(PartitionKey eq '%s') and (RowKey ge '%s')", partitionKey, startkey); TableQuery<DynamicTableEntity> scanQuery = new TableQuery<DynamicTableEntity>(DynamicTableEntity.class) .where(whereStr).take(recordcount); int cnt = 0; for (DynamicTableEntity entity : cloudTable.execute(scanQuery)) { HashMap<String, EntityProperty> properties = entity.getProperties(); HashMap<String, ByteIterator> cur = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); if (fields == null || fields.contains(fieldName)) { cur.put(fieldName, fieldVal); } } result.add(cur); if (++cnt == recordcount) { break; } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { return insertOrUpdate(key, values); } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (batchSize == 1) { return insertOrUpdate(key, values); } else { return insertBatch(key, values); } } @Override public Status delete(String table, String key) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, TableServiceEntity.class); TableServiceEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); // secondly, delete the entity TableOperation deleteOp = TableOperation.delete(entity); cloudTable.execute(deleteOp); return Status.OK; } catch (Exception e) { return Status.ERROR; } } private String getStorageConnectionString(String protocol, String account, String key, String tableEndPoint) { String res = String.format("DefaultEndpointsProtocol=%s;AccountName=%s;AccountKey=%s", protocol, account, key); if (tableEndPoint != null) { res = String.format("%s;TableEndpoint=%s", res, tableEndPoint); } return res; } /* * Read subset of properties instead of full fields with projection. */ public Status readSubset(String key, Set<String> fields, Map<String, ByteIterator> result) { String whereStr = String.format("RowKey eq '%s'", key); TableQuery<TableServiceEntity> projectionQuery = TableQuery.from( TableServiceEntity.class).where(whereStr).select(fields.toArray(new String[0])); EntityResolver<HashMap<String, ByteIterator>> resolver = new EntityResolver<HashMap<String, ByteIterator>>() { public HashMap<String, ByteIterator> resolve(String partitionkey, String rowKey, Date timeStamp, HashMap<String, EntityProperty> properties, String etag) { HashMap<String, ByteIterator> tmp = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String key = entry.getKey(); ByteIterator val = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); tmp.put(key, val); } return tmp; } }; try { for (HashMap<String, ByteIterator> tmp : cloudTable.execute(projectionQuery, resolver)) { for (Entry<String, ByteIterator> entry : tmp.entrySet()){ String fieldName = entry.getKey(); ByteIterator fieldVal = entry.getValue(); result.put(fieldName, fieldVal); } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status readEntity(String key, Map<String, ByteIterator> result) {<FILL_FUNCTION_BODY>} private Status insertBatch(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); BATCH_OPERATION.insertOrReplace(entity); if (++curIdx == batchSize) { try { cloudTable.execute(BATCH_OPERATION); BATCH_OPERATION.clear(); curIdx = 0; } catch (Exception e) { return Status.ERROR; } } return Status.OK; } private Status insertOrUpdate(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); TableOperation insertOrReplace = TableOperation.insertOrReplace(entity); try { cloudTable.execute(insertOrReplace); return Status.OK; } catch (Exception e) { return Status.ERROR; } } }
try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, DynamicTableEntity.class); DynamicTableEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); HashMap<String, EntityProperty> properties = entity.getProperties(); for (Entry<String, EntityProperty> entry: properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); result.put(fieldName, fieldVal); } return Status.OK; } catch (Exception e) { return Status.ERROR; }
brianfrankcooper_YCSB
YCSB/azuretablestorage/src/main/java/site/ycsb/db/azuretablestorage/AzureClient.java
AzureClient
insertBatch
class AzureClient extends DB { public static final String PROTOCOL = "azure.protocal"; public static final String PROTOCOL_DEFAULT = "https"; public static final String TABLE_ENDPOINT = "azure.endpoint"; public static final String ACCOUNT = "azure.account"; public static final String KEY = "azure.key"; public static final String TABLE = "azure.table"; public static final String TABLE_DEFAULT = "usertable"; public static final String PARTITIONKEY = "azure.partitionkey"; public static final String PARTITIONKEY_DEFAULT = "Test"; public static final String BATCHSIZE = "azure.batchsize"; public static final String BATCHSIZE_DEFAULT = "1"; private static final int BATCHSIZE_UPPERBOUND = 100; private static final TableBatchOperation BATCH_OPERATION = new TableBatchOperation(); private static String partitionKey; private CloudStorageAccount storageAccount = null; private CloudTableClient tableClient = null; private CloudTable cloudTable = null; private static int batchSize; private static int curIdx = 0; @Override public void init() throws DBException { Properties props = getProperties(); String protocol = props.getProperty(PROTOCOL, PROTOCOL_DEFAULT); if (protocol != "https" && protocol != "http") { throw new DBException("Protocol must be 'http' or 'https'!\n"); } String table = props.getProperty(TABLE, TABLE_DEFAULT); partitionKey = props.getProperty(PARTITIONKEY, PARTITIONKEY_DEFAULT); batchSize = Integer.parseInt(props.getProperty(BATCHSIZE, BATCHSIZE_DEFAULT)); if (batchSize < 1 || batchSize > BATCHSIZE_UPPERBOUND) { throw new DBException(String.format("Batchsize must be between 1 and %d!\n", BATCHSIZE_UPPERBOUND)); } String account = props.getProperty(ACCOUNT); String key = props.getProperty(KEY); String tableEndPoint = props.getProperty(TABLE_ENDPOINT); String storageConnectionString = getStorageConnectionString(protocol, account, key, tableEndPoint); try { storageAccount = CloudStorageAccount.parse(storageConnectionString); } catch (Exception e) { throw new DBException("Could not connect to the account.\n", e); } tableClient = storageAccount.createCloudTableClient(); try { cloudTable = tableClient.getTableReference(table); cloudTable.createIfNotExists(); } catch (Exception e) { throw new DBException("Could not connect to the table.\n", e); } } @Override public void cleanup() { } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { if (fields != null) { return readSubset(key, fields, result); } else { return readEntity(key, result); } } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { String whereStr = String.format("(PartitionKey eq '%s') and (RowKey ge '%s')", partitionKey, startkey); TableQuery<DynamicTableEntity> scanQuery = new TableQuery<DynamicTableEntity>(DynamicTableEntity.class) .where(whereStr).take(recordcount); int cnt = 0; for (DynamicTableEntity entity : cloudTable.execute(scanQuery)) { HashMap<String, EntityProperty> properties = entity.getProperties(); HashMap<String, ByteIterator> cur = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); if (fields == null || fields.contains(fieldName)) { cur.put(fieldName, fieldVal); } } result.add(cur); if (++cnt == recordcount) { break; } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { return insertOrUpdate(key, values); } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (batchSize == 1) { return insertOrUpdate(key, values); } else { return insertBatch(key, values); } } @Override public Status delete(String table, String key) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, TableServiceEntity.class); TableServiceEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); // secondly, delete the entity TableOperation deleteOp = TableOperation.delete(entity); cloudTable.execute(deleteOp); return Status.OK; } catch (Exception e) { return Status.ERROR; } } private String getStorageConnectionString(String protocol, String account, String key, String tableEndPoint) { String res = String.format("DefaultEndpointsProtocol=%s;AccountName=%s;AccountKey=%s", protocol, account, key); if (tableEndPoint != null) { res = String.format("%s;TableEndpoint=%s", res, tableEndPoint); } return res; } /* * Read subset of properties instead of full fields with projection. */ public Status readSubset(String key, Set<String> fields, Map<String, ByteIterator> result) { String whereStr = String.format("RowKey eq '%s'", key); TableQuery<TableServiceEntity> projectionQuery = TableQuery.from( TableServiceEntity.class).where(whereStr).select(fields.toArray(new String[0])); EntityResolver<HashMap<String, ByteIterator>> resolver = new EntityResolver<HashMap<String, ByteIterator>>() { public HashMap<String, ByteIterator> resolve(String partitionkey, String rowKey, Date timeStamp, HashMap<String, EntityProperty> properties, String etag) { HashMap<String, ByteIterator> tmp = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String key = entry.getKey(); ByteIterator val = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); tmp.put(key, val); } return tmp; } }; try { for (HashMap<String, ByteIterator> tmp : cloudTable.execute(projectionQuery, resolver)) { for (Entry<String, ByteIterator> entry : tmp.entrySet()){ String fieldName = entry.getKey(); ByteIterator fieldVal = entry.getValue(); result.put(fieldName, fieldVal); } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status readEntity(String key, Map<String, ByteIterator> result) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, DynamicTableEntity.class); DynamicTableEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); HashMap<String, EntityProperty> properties = entity.getProperties(); for (Entry<String, EntityProperty> entry: properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); result.put(fieldName, fieldVal); } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status insertBatch(String key, Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>} private Status insertOrUpdate(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); TableOperation insertOrReplace = TableOperation.insertOrReplace(entity); try { cloudTable.execute(insertOrReplace); return Status.OK; } catch (Exception e) { return Status.ERROR; } } }
HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); BATCH_OPERATION.insertOrReplace(entity); if (++curIdx == batchSize) { try { cloudTable.execute(BATCH_OPERATION); BATCH_OPERATION.clear(); curIdx = 0; } catch (Exception e) { return Status.ERROR; } } return Status.OK;
brianfrankcooper_YCSB
YCSB/azuretablestorage/src/main/java/site/ycsb/db/azuretablestorage/AzureClient.java
AzureClient
insertOrUpdate
class AzureClient extends DB { public static final String PROTOCOL = "azure.protocal"; public static final String PROTOCOL_DEFAULT = "https"; public static final String TABLE_ENDPOINT = "azure.endpoint"; public static final String ACCOUNT = "azure.account"; public static final String KEY = "azure.key"; public static final String TABLE = "azure.table"; public static final String TABLE_DEFAULT = "usertable"; public static final String PARTITIONKEY = "azure.partitionkey"; public static final String PARTITIONKEY_DEFAULT = "Test"; public static final String BATCHSIZE = "azure.batchsize"; public static final String BATCHSIZE_DEFAULT = "1"; private static final int BATCHSIZE_UPPERBOUND = 100; private static final TableBatchOperation BATCH_OPERATION = new TableBatchOperation(); private static String partitionKey; private CloudStorageAccount storageAccount = null; private CloudTableClient tableClient = null; private CloudTable cloudTable = null; private static int batchSize; private static int curIdx = 0; @Override public void init() throws DBException { Properties props = getProperties(); String protocol = props.getProperty(PROTOCOL, PROTOCOL_DEFAULT); if (protocol != "https" && protocol != "http") { throw new DBException("Protocol must be 'http' or 'https'!\n"); } String table = props.getProperty(TABLE, TABLE_DEFAULT); partitionKey = props.getProperty(PARTITIONKEY, PARTITIONKEY_DEFAULT); batchSize = Integer.parseInt(props.getProperty(BATCHSIZE, BATCHSIZE_DEFAULT)); if (batchSize < 1 || batchSize > BATCHSIZE_UPPERBOUND) { throw new DBException(String.format("Batchsize must be between 1 and %d!\n", BATCHSIZE_UPPERBOUND)); } String account = props.getProperty(ACCOUNT); String key = props.getProperty(KEY); String tableEndPoint = props.getProperty(TABLE_ENDPOINT); String storageConnectionString = getStorageConnectionString(protocol, account, key, tableEndPoint); try { storageAccount = CloudStorageAccount.parse(storageConnectionString); } catch (Exception e) { throw new DBException("Could not connect to the account.\n", e); } tableClient = storageAccount.createCloudTableClient(); try { cloudTable = tableClient.getTableReference(table); cloudTable.createIfNotExists(); } catch (Exception e) { throw new DBException("Could not connect to the table.\n", e); } } @Override public void cleanup() { } @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { if (fields != null) { return readSubset(key, fields, result); } else { return readEntity(key, result); } } @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { String whereStr = String.format("(PartitionKey eq '%s') and (RowKey ge '%s')", partitionKey, startkey); TableQuery<DynamicTableEntity> scanQuery = new TableQuery<DynamicTableEntity>(DynamicTableEntity.class) .where(whereStr).take(recordcount); int cnt = 0; for (DynamicTableEntity entity : cloudTable.execute(scanQuery)) { HashMap<String, EntityProperty> properties = entity.getProperties(); HashMap<String, ByteIterator> cur = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); if (fields == null || fields.contains(fieldName)) { cur.put(fieldName, fieldVal); } } result.add(cur); if (++cnt == recordcount) { break; } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { return insertOrUpdate(key, values); } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (batchSize == 1) { return insertOrUpdate(key, values); } else { return insertBatch(key, values); } } @Override public Status delete(String table, String key) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, TableServiceEntity.class); TableServiceEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); // secondly, delete the entity TableOperation deleteOp = TableOperation.delete(entity); cloudTable.execute(deleteOp); return Status.OK; } catch (Exception e) { return Status.ERROR; } } private String getStorageConnectionString(String protocol, String account, String key, String tableEndPoint) { String res = String.format("DefaultEndpointsProtocol=%s;AccountName=%s;AccountKey=%s", protocol, account, key); if (tableEndPoint != null) { res = String.format("%s;TableEndpoint=%s", res, tableEndPoint); } return res; } /* * Read subset of properties instead of full fields with projection. */ public Status readSubset(String key, Set<String> fields, Map<String, ByteIterator> result) { String whereStr = String.format("RowKey eq '%s'", key); TableQuery<TableServiceEntity> projectionQuery = TableQuery.from( TableServiceEntity.class).where(whereStr).select(fields.toArray(new String[0])); EntityResolver<HashMap<String, ByteIterator>> resolver = new EntityResolver<HashMap<String, ByteIterator>>() { public HashMap<String, ByteIterator> resolve(String partitionkey, String rowKey, Date timeStamp, HashMap<String, EntityProperty> properties, String etag) { HashMap<String, ByteIterator> tmp = new HashMap<String, ByteIterator>(); for (Entry<String, EntityProperty> entry : properties.entrySet()) { String key = entry.getKey(); ByteIterator val = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); tmp.put(key, val); } return tmp; } }; try { for (HashMap<String, ByteIterator> tmp : cloudTable.execute(projectionQuery, resolver)) { for (Entry<String, ByteIterator> entry : tmp.entrySet()){ String fieldName = entry.getKey(); ByteIterator fieldVal = entry.getValue(); result.put(fieldName, fieldVal); } } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status readEntity(String key, Map<String, ByteIterator> result) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = TableOperation.retrieve(partitionKey, key, DynamicTableEntity.class); DynamicTableEntity entity = cloudTable.execute(retrieveOp).getResultAsType(); HashMap<String, EntityProperty> properties = entity.getProperties(); for (Entry<String, EntityProperty> entry: properties.entrySet()) { String fieldName = entry.getKey(); ByteIterator fieldVal = new ByteArrayByteIterator(entry.getValue().getValueAsByteArray()); result.put(fieldName, fieldVal); } return Status.OK; } catch (Exception e) { return Status.ERROR; } } private Status insertBatch(String key, Map<String, ByteIterator> values) { HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); BATCH_OPERATION.insertOrReplace(entity); if (++curIdx == batchSize) { try { cloudTable.execute(BATCH_OPERATION); BATCH_OPERATION.clear(); curIdx = 0; } catch (Exception e) { return Status.ERROR; } } return Status.OK; } private Status insertOrUpdate(String key, Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>} }
HashMap<String, EntityProperty> properties = new HashMap<String, EntityProperty>(); for (Entry<String, ByteIterator> entry : values.entrySet()) { String fieldName = entry.getKey(); byte[] fieldVal = entry.getValue().toArray(); properties.put(fieldName, new EntityProperty(fieldVal)); } DynamicTableEntity entity = new DynamicTableEntity(partitionKey, key, properties); TableOperation insertOrReplace = TableOperation.insertOrReplace(entity); try { cloudTable.execute(insertOrReplace); return Status.OK; } catch (Exception e) { return Status.ERROR; }
brianfrankcooper_YCSB
YCSB/cassandra/src/main/java/site/ycsb/db/CassandraCQLClient.java
CassandraCQLClient
init
class CassandraCQLClient extends DB { private static Logger logger = LoggerFactory.getLogger(CassandraCQLClient.class); private static Cluster cluster = null; private static Session session = null; private static ConcurrentMap<Set<String>, PreparedStatement> readStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> scanStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> insertStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> updateStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static AtomicReference<PreparedStatement> readAllStmt = new AtomicReference<PreparedStatement>(); private static AtomicReference<PreparedStatement> scanAllStmt = new AtomicReference<PreparedStatement>(); private static AtomicReference<PreparedStatement> deleteStmt = new AtomicReference<PreparedStatement>(); private static ConsistencyLevel readConsistencyLevel = ConsistencyLevel.QUORUM; private static ConsistencyLevel writeConsistencyLevel = ConsistencyLevel.QUORUM; public static final String YCSB_KEY = "y_id"; public static final String KEYSPACE_PROPERTY = "cassandra.keyspace"; public static final String KEYSPACE_PROPERTY_DEFAULT = "ycsb"; public static final String USERNAME_PROPERTY = "cassandra.username"; public static final String PASSWORD_PROPERTY = "cassandra.password"; public static final String HOSTS_PROPERTY = "hosts"; public static final String PORT_PROPERTY = "port"; public static final String PORT_PROPERTY_DEFAULT = "9042"; public static final String READ_CONSISTENCY_LEVEL_PROPERTY = "cassandra.readconsistencylevel"; public static final String READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = readConsistencyLevel.name(); public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY = "cassandra.writeconsistencylevel"; public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = writeConsistencyLevel.name(); public static final String MAX_CONNECTIONS_PROPERTY = "cassandra.maxconnections"; public static final String CORE_CONNECTIONS_PROPERTY = "cassandra.coreconnections"; public static final String CONNECT_TIMEOUT_MILLIS_PROPERTY = "cassandra.connecttimeoutmillis"; public static final String READ_TIMEOUT_MILLIS_PROPERTY = "cassandra.readtimeoutmillis"; public static final String TRACING_PROPERTY = "cassandra.tracing"; public static final String TRACING_PROPERTY_DEFAULT = "false"; public static final String USE_SSL_CONNECTION = "cassandra.useSSL"; private static final String DEFAULT_USE_SSL_CONNECTION = "false"; /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static boolean debug = false; private static boolean trace = false; /** * Initialize any state for this DB. Called once per DB instance; there is one * DB instance per client thread. */ @Override public void init() throws DBException {<FILL_FUNCTION_BODY>} /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { synchronized (INIT_COUNT) { final int curInitCount = INIT_COUNT.decrementAndGet(); if (curInitCount <= 0) { readStmts.clear(); scanStmts.clear(); insertStmts.clear(); updateStmts.clear(); readAllStmt.set(null); scanAllStmt.set(null); deleteStmt.set(null); session.close(); cluster.close(); cluster = null; session = null; } if (curInitCount < 0) { // This should never happen. throw new DBException( String.format("initCount is negative: %d", curInitCount)); } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { PreparedStatement stmt = (fields == null) ? readAllStmt.get() : readStmts.get(fields); // Prepare statement on demand if (stmt == null) { Select.Builder selectBuilder; if (fields == null) { selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); } } stmt = session.prepare(selectBuilder.from(table) .where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker())) .limit(1)); stmt.setConsistencyLevel(readConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = (fields == null) ? readAllStmt.getAndSet(stmt) : readStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); ResultSet rs = session.execute(stmt.bind(key)); if (rs.isExhausted()) { return Status.NOT_FOUND; } // Should be only 1 row Row row = rs.one(); ColumnDefinitions cd = row.getColumnDefinitions(); for (ColumnDefinitions.Definition def : cd) { ByteBuffer val = row.getBytesUnsafe(def.getName()); if (val != null) { result.put(def.getName(), new ByteArrayByteIterator(val.array())); } else { result.put(def.getName(), null); } } return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error reading key: {}", key).getMessage(), e); return Status.ERROR; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * Cassandra CQL uses "token" method for range scan which doesn't always yield * intuitive results. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { PreparedStatement stmt = (fields == null) ? scanAllStmt.get() : scanStmts.get(fields); // Prepare statement on demand if (stmt == null) { Select.Builder selectBuilder; if (fields == null) { selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); } } Select selectStmt = selectBuilder.from(table); // The statement builder is not setup right for tokens. // So, we need to build it manually. String initialStmt = selectStmt.toString(); StringBuilder scanStmt = new StringBuilder(); scanStmt.append(initialStmt.substring(0, initialStmt.length() - 1)); scanStmt.append(" WHERE "); scanStmt.append(QueryBuilder.token(YCSB_KEY)); scanStmt.append(" >= "); scanStmt.append("token("); scanStmt.append(QueryBuilder.bindMarker()); scanStmt.append(")"); scanStmt.append(" LIMIT "); scanStmt.append(QueryBuilder.bindMarker()); stmt = session.prepare(scanStmt.toString()); stmt.setConsistencyLevel(readConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = (fields == null) ? scanAllStmt.getAndSet(stmt) : scanStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("startKey = {}, recordcount = {}", startkey, recordcount); ResultSet rs = session.execute(stmt.bind(startkey, Integer.valueOf(recordcount))); HashMap<String, ByteIterator> tuple; while (!rs.isExhausted()) { Row row = rs.one(); tuple = new HashMap<String, ByteIterator>(); ColumnDefinitions cd = row.getColumnDefinitions(); for (ColumnDefinitions.Definition def : cd) { ByteBuffer val = row.getBytesUnsafe(def.getName()); if (val != null) { tuple.put(def.getName(), new ByteArrayByteIterator(val.array())); } else { tuple.put(def.getName(), null); } } result.add(tuple); } return Status.OK; } catch (Exception e) { logger.error( MessageFormatter.format("Error scanning with startkey: {}", startkey).getMessage(), e); return Status.ERROR; } } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { Set<String> fields = values.keySet(); PreparedStatement stmt = updateStmts.get(fields); // Prepare statement on demand if (stmt == null) { Update updateStmt = QueryBuilder.update(table); // Add fields for (String field : fields) { updateStmt.with(QueryBuilder.set(field, QueryBuilder.bindMarker())); } // Add key updateStmt.where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker())); stmt = session.prepare(updateStmt); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = updateStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } if (logger.isDebugEnabled()) { logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { logger.debug("{} = {}", entry.getKey(), entry.getValue()); } } // Add fields ColumnDefinitions vars = stmt.getVariables(); BoundStatement boundStmt = stmt.bind(); for (int i = 0; i < vars.size() - 1; i++) { boundStmt.setString(i, values.get(vars.getName(i)).toString()); } // Add key boundStmt.setString(vars.size() - 1, key); session.execute(boundStmt); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error updating key: {}", key).getMessage(), e); } return Status.ERROR; } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { Set<String> fields = values.keySet(); PreparedStatement stmt = insertStmts.get(fields); // Prepare statement on demand if (stmt == null) { Insert insertStmt = QueryBuilder.insertInto(table); // Add key insertStmt.value(YCSB_KEY, QueryBuilder.bindMarker()); // Add fields for (String field : fields) { insertStmt.value(field, QueryBuilder.bindMarker()); } stmt = session.prepare(insertStmt); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = insertStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } if (logger.isDebugEnabled()) { logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { logger.debug("{} = {}", entry.getKey(), entry.getValue()); } } // Add key BoundStatement boundStmt = stmt.bind().setString(0, key); // Add fields ColumnDefinitions vars = stmt.getVariables(); for (int i = 1; i < vars.size(); i++) { boundStmt.setString(i, values.get(vars.getName(i)).toString()); } session.execute(boundStmt); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error inserting key: {}", key).getMessage(), e); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ @Override public Status delete(String table, String key) { try { PreparedStatement stmt = deleteStmt.get(); // Prepare statement on demand if (stmt == null) { stmt = session.prepare(QueryBuilder.delete().from(table) .where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker()))); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = deleteStmt.getAndSet(stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); session.execute(stmt.bind(key)); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error deleting key: {}", key).getMessage(), e); } return Status.ERROR; } }
// Keep track of number of calls to init (for later cleanup) INIT_COUNT.incrementAndGet(); // Synchronized so that we only have a single // cluster/session instance for all the threads. synchronized (INIT_COUNT) { // Check if the cluster has already been initialized if (cluster != null) { return; } try { debug = Boolean.parseBoolean(getProperties().getProperty("debug", "false")); trace = Boolean.valueOf(getProperties().getProperty(TRACING_PROPERTY, TRACING_PROPERTY_DEFAULT)); String host = getProperties().getProperty(HOSTS_PROPERTY); if (host == null) { throw new DBException(String.format( "Required property \"%s\" missing for CassandraCQLClient", HOSTS_PROPERTY)); } String[] hosts = host.split(","); String port = getProperties().getProperty(PORT_PROPERTY, PORT_PROPERTY_DEFAULT); String username = getProperties().getProperty(USERNAME_PROPERTY); String password = getProperties().getProperty(PASSWORD_PROPERTY); String keyspace = getProperties().getProperty(KEYSPACE_PROPERTY, KEYSPACE_PROPERTY_DEFAULT); readConsistencyLevel = ConsistencyLevel.valueOf( getProperties().getProperty(READ_CONSISTENCY_LEVEL_PROPERTY, READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); writeConsistencyLevel = ConsistencyLevel.valueOf( getProperties().getProperty(WRITE_CONSISTENCY_LEVEL_PROPERTY, WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); Boolean useSSL = Boolean.parseBoolean(getProperties().getProperty(USE_SSL_CONNECTION, DEFAULT_USE_SSL_CONNECTION)); if ((username != null) && !username.isEmpty()) { Cluster.Builder clusterBuilder = Cluster.builder().withCredentials(username, password) .withPort(Integer.valueOf(port)).addContactPoints(hosts); if (useSSL) { clusterBuilder = clusterBuilder.withSSL(); } cluster = clusterBuilder.build(); } else { cluster = Cluster.builder().withPort(Integer.valueOf(port)) .addContactPoints(hosts).build(); } String maxConnections = getProperties().getProperty( MAX_CONNECTIONS_PROPERTY); if (maxConnections != null) { cluster.getConfiguration().getPoolingOptions() .setMaxConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(maxConnections)); } String coreConnections = getProperties().getProperty( CORE_CONNECTIONS_PROPERTY); if (coreConnections != null) { cluster.getConfiguration().getPoolingOptions() .setCoreConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(coreConnections)); } String connectTimoutMillis = getProperties().getProperty( CONNECT_TIMEOUT_MILLIS_PROPERTY); if (connectTimoutMillis != null) { cluster.getConfiguration().getSocketOptions() .setConnectTimeoutMillis(Integer.valueOf(connectTimoutMillis)); } String readTimoutMillis = getProperties().getProperty( READ_TIMEOUT_MILLIS_PROPERTY); if (readTimoutMillis != null) { cluster.getConfiguration().getSocketOptions() .setReadTimeoutMillis(Integer.valueOf(readTimoutMillis)); } Metadata metadata = cluster.getMetadata(); logger.info("Connected to cluster: {}\n", metadata.getClusterName()); for (Host discoveredHost : metadata.getAllHosts()) { logger.info("Datacenter: {}; Host: {}; Rack: {}\n", discoveredHost.getDatacenter(), discoveredHost.getAddress(), discoveredHost.getRack()); } session = cluster.connect(keyspace); } catch (Exception e) { throw new DBException(e); } } // synchronized
brianfrankcooper_YCSB
YCSB/cassandra/src/main/java/site/ycsb/db/CassandraCQLClient.java
CassandraCQLClient
cleanup
class CassandraCQLClient extends DB { private static Logger logger = LoggerFactory.getLogger(CassandraCQLClient.class); private static Cluster cluster = null; private static Session session = null; private static ConcurrentMap<Set<String>, PreparedStatement> readStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> scanStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> insertStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> updateStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static AtomicReference<PreparedStatement> readAllStmt = new AtomicReference<PreparedStatement>(); private static AtomicReference<PreparedStatement> scanAllStmt = new AtomicReference<PreparedStatement>(); private static AtomicReference<PreparedStatement> deleteStmt = new AtomicReference<PreparedStatement>(); private static ConsistencyLevel readConsistencyLevel = ConsistencyLevel.QUORUM; private static ConsistencyLevel writeConsistencyLevel = ConsistencyLevel.QUORUM; public static final String YCSB_KEY = "y_id"; public static final String KEYSPACE_PROPERTY = "cassandra.keyspace"; public static final String KEYSPACE_PROPERTY_DEFAULT = "ycsb"; public static final String USERNAME_PROPERTY = "cassandra.username"; public static final String PASSWORD_PROPERTY = "cassandra.password"; public static final String HOSTS_PROPERTY = "hosts"; public static final String PORT_PROPERTY = "port"; public static final String PORT_PROPERTY_DEFAULT = "9042"; public static final String READ_CONSISTENCY_LEVEL_PROPERTY = "cassandra.readconsistencylevel"; public static final String READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = readConsistencyLevel.name(); public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY = "cassandra.writeconsistencylevel"; public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = writeConsistencyLevel.name(); public static final String MAX_CONNECTIONS_PROPERTY = "cassandra.maxconnections"; public static final String CORE_CONNECTIONS_PROPERTY = "cassandra.coreconnections"; public static final String CONNECT_TIMEOUT_MILLIS_PROPERTY = "cassandra.connecttimeoutmillis"; public static final String READ_TIMEOUT_MILLIS_PROPERTY = "cassandra.readtimeoutmillis"; public static final String TRACING_PROPERTY = "cassandra.tracing"; public static final String TRACING_PROPERTY_DEFAULT = "false"; public static final String USE_SSL_CONNECTION = "cassandra.useSSL"; private static final String DEFAULT_USE_SSL_CONNECTION = "false"; /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static boolean debug = false; private static boolean trace = false; /** * Initialize any state for this DB. Called once per DB instance; there is one * DB instance per client thread. */ @Override public void init() throws DBException { // Keep track of number of calls to init (for later cleanup) INIT_COUNT.incrementAndGet(); // Synchronized so that we only have a single // cluster/session instance for all the threads. synchronized (INIT_COUNT) { // Check if the cluster has already been initialized if (cluster != null) { return; } try { debug = Boolean.parseBoolean(getProperties().getProperty("debug", "false")); trace = Boolean.valueOf(getProperties().getProperty(TRACING_PROPERTY, TRACING_PROPERTY_DEFAULT)); String host = getProperties().getProperty(HOSTS_PROPERTY); if (host == null) { throw new DBException(String.format( "Required property \"%s\" missing for CassandraCQLClient", HOSTS_PROPERTY)); } String[] hosts = host.split(","); String port = getProperties().getProperty(PORT_PROPERTY, PORT_PROPERTY_DEFAULT); String username = getProperties().getProperty(USERNAME_PROPERTY); String password = getProperties().getProperty(PASSWORD_PROPERTY); String keyspace = getProperties().getProperty(KEYSPACE_PROPERTY, KEYSPACE_PROPERTY_DEFAULT); readConsistencyLevel = ConsistencyLevel.valueOf( getProperties().getProperty(READ_CONSISTENCY_LEVEL_PROPERTY, READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); writeConsistencyLevel = ConsistencyLevel.valueOf( getProperties().getProperty(WRITE_CONSISTENCY_LEVEL_PROPERTY, WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); Boolean useSSL = Boolean.parseBoolean(getProperties().getProperty(USE_SSL_CONNECTION, DEFAULT_USE_SSL_CONNECTION)); if ((username != null) && !username.isEmpty()) { Cluster.Builder clusterBuilder = Cluster.builder().withCredentials(username, password) .withPort(Integer.valueOf(port)).addContactPoints(hosts); if (useSSL) { clusterBuilder = clusterBuilder.withSSL(); } cluster = clusterBuilder.build(); } else { cluster = Cluster.builder().withPort(Integer.valueOf(port)) .addContactPoints(hosts).build(); } String maxConnections = getProperties().getProperty( MAX_CONNECTIONS_PROPERTY); if (maxConnections != null) { cluster.getConfiguration().getPoolingOptions() .setMaxConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(maxConnections)); } String coreConnections = getProperties().getProperty( CORE_CONNECTIONS_PROPERTY); if (coreConnections != null) { cluster.getConfiguration().getPoolingOptions() .setCoreConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(coreConnections)); } String connectTimoutMillis = getProperties().getProperty( CONNECT_TIMEOUT_MILLIS_PROPERTY); if (connectTimoutMillis != null) { cluster.getConfiguration().getSocketOptions() .setConnectTimeoutMillis(Integer.valueOf(connectTimoutMillis)); } String readTimoutMillis = getProperties().getProperty( READ_TIMEOUT_MILLIS_PROPERTY); if (readTimoutMillis != null) { cluster.getConfiguration().getSocketOptions() .setReadTimeoutMillis(Integer.valueOf(readTimoutMillis)); } Metadata metadata = cluster.getMetadata(); logger.info("Connected to cluster: {}\n", metadata.getClusterName()); for (Host discoveredHost : metadata.getAllHosts()) { logger.info("Datacenter: {}; Host: {}; Rack: {}\n", discoveredHost.getDatacenter(), discoveredHost.getAddress(), discoveredHost.getRack()); } session = cluster.connect(keyspace); } catch (Exception e) { throw new DBException(e); } } // synchronized } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException {<FILL_FUNCTION_BODY>} /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { PreparedStatement stmt = (fields == null) ? readAllStmt.get() : readStmts.get(fields); // Prepare statement on demand if (stmt == null) { Select.Builder selectBuilder; if (fields == null) { selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); } } stmt = session.prepare(selectBuilder.from(table) .where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker())) .limit(1)); stmt.setConsistencyLevel(readConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = (fields == null) ? readAllStmt.getAndSet(stmt) : readStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); ResultSet rs = session.execute(stmt.bind(key)); if (rs.isExhausted()) { return Status.NOT_FOUND; } // Should be only 1 row Row row = rs.one(); ColumnDefinitions cd = row.getColumnDefinitions(); for (ColumnDefinitions.Definition def : cd) { ByteBuffer val = row.getBytesUnsafe(def.getName()); if (val != null) { result.put(def.getName(), new ByteArrayByteIterator(val.array())); } else { result.put(def.getName(), null); } } return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error reading key: {}", key).getMessage(), e); return Status.ERROR; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * Cassandra CQL uses "token" method for range scan which doesn't always yield * intuitive results. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { PreparedStatement stmt = (fields == null) ? scanAllStmt.get() : scanStmts.get(fields); // Prepare statement on demand if (stmt == null) { Select.Builder selectBuilder; if (fields == null) { selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); } } Select selectStmt = selectBuilder.from(table); // The statement builder is not setup right for tokens. // So, we need to build it manually. String initialStmt = selectStmt.toString(); StringBuilder scanStmt = new StringBuilder(); scanStmt.append(initialStmt.substring(0, initialStmt.length() - 1)); scanStmt.append(" WHERE "); scanStmt.append(QueryBuilder.token(YCSB_KEY)); scanStmt.append(" >= "); scanStmt.append("token("); scanStmt.append(QueryBuilder.bindMarker()); scanStmt.append(")"); scanStmt.append(" LIMIT "); scanStmt.append(QueryBuilder.bindMarker()); stmt = session.prepare(scanStmt.toString()); stmt.setConsistencyLevel(readConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = (fields == null) ? scanAllStmt.getAndSet(stmt) : scanStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("startKey = {}, recordcount = {}", startkey, recordcount); ResultSet rs = session.execute(stmt.bind(startkey, Integer.valueOf(recordcount))); HashMap<String, ByteIterator> tuple; while (!rs.isExhausted()) { Row row = rs.one(); tuple = new HashMap<String, ByteIterator>(); ColumnDefinitions cd = row.getColumnDefinitions(); for (ColumnDefinitions.Definition def : cd) { ByteBuffer val = row.getBytesUnsafe(def.getName()); if (val != null) { tuple.put(def.getName(), new ByteArrayByteIterator(val.array())); } else { tuple.put(def.getName(), null); } } result.add(tuple); } return Status.OK; } catch (Exception e) { logger.error( MessageFormatter.format("Error scanning with startkey: {}", startkey).getMessage(), e); return Status.ERROR; } } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { Set<String> fields = values.keySet(); PreparedStatement stmt = updateStmts.get(fields); // Prepare statement on demand if (stmt == null) { Update updateStmt = QueryBuilder.update(table); // Add fields for (String field : fields) { updateStmt.with(QueryBuilder.set(field, QueryBuilder.bindMarker())); } // Add key updateStmt.where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker())); stmt = session.prepare(updateStmt); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = updateStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } if (logger.isDebugEnabled()) { logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { logger.debug("{} = {}", entry.getKey(), entry.getValue()); } } // Add fields ColumnDefinitions vars = stmt.getVariables(); BoundStatement boundStmt = stmt.bind(); for (int i = 0; i < vars.size() - 1; i++) { boundStmt.setString(i, values.get(vars.getName(i)).toString()); } // Add key boundStmt.setString(vars.size() - 1, key); session.execute(boundStmt); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error updating key: {}", key).getMessage(), e); } return Status.ERROR; } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { Set<String> fields = values.keySet(); PreparedStatement stmt = insertStmts.get(fields); // Prepare statement on demand if (stmt == null) { Insert insertStmt = QueryBuilder.insertInto(table); // Add key insertStmt.value(YCSB_KEY, QueryBuilder.bindMarker()); // Add fields for (String field : fields) { insertStmt.value(field, QueryBuilder.bindMarker()); } stmt = session.prepare(insertStmt); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = insertStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } if (logger.isDebugEnabled()) { logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { logger.debug("{} = {}", entry.getKey(), entry.getValue()); } } // Add key BoundStatement boundStmt = stmt.bind().setString(0, key); // Add fields ColumnDefinitions vars = stmt.getVariables(); for (int i = 1; i < vars.size(); i++) { boundStmt.setString(i, values.get(vars.getName(i)).toString()); } session.execute(boundStmt); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error inserting key: {}", key).getMessage(), e); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ @Override public Status delete(String table, String key) { try { PreparedStatement stmt = deleteStmt.get(); // Prepare statement on demand if (stmt == null) { stmt = session.prepare(QueryBuilder.delete().from(table) .where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker()))); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = deleteStmt.getAndSet(stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); session.execute(stmt.bind(key)); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error deleting key: {}", key).getMessage(), e); } return Status.ERROR; } }
synchronized (INIT_COUNT) { final int curInitCount = INIT_COUNT.decrementAndGet(); if (curInitCount <= 0) { readStmts.clear(); scanStmts.clear(); insertStmts.clear(); updateStmts.clear(); readAllStmt.set(null); scanAllStmt.set(null); deleteStmt.set(null); session.close(); cluster.close(); cluster = null; session = null; } if (curInitCount < 0) { // This should never happen. throw new DBException( String.format("initCount is negative: %d", curInitCount)); } }
brianfrankcooper_YCSB
YCSB/cassandra/src/main/java/site/ycsb/db/CassandraCQLClient.java
CassandraCQLClient
read
class CassandraCQLClient extends DB { private static Logger logger = LoggerFactory.getLogger(CassandraCQLClient.class); private static Cluster cluster = null; private static Session session = null; private static ConcurrentMap<Set<String>, PreparedStatement> readStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> scanStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> insertStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> updateStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static AtomicReference<PreparedStatement> readAllStmt = new AtomicReference<PreparedStatement>(); private static AtomicReference<PreparedStatement> scanAllStmt = new AtomicReference<PreparedStatement>(); private static AtomicReference<PreparedStatement> deleteStmt = new AtomicReference<PreparedStatement>(); private static ConsistencyLevel readConsistencyLevel = ConsistencyLevel.QUORUM; private static ConsistencyLevel writeConsistencyLevel = ConsistencyLevel.QUORUM; public static final String YCSB_KEY = "y_id"; public static final String KEYSPACE_PROPERTY = "cassandra.keyspace"; public static final String KEYSPACE_PROPERTY_DEFAULT = "ycsb"; public static final String USERNAME_PROPERTY = "cassandra.username"; public static final String PASSWORD_PROPERTY = "cassandra.password"; public static final String HOSTS_PROPERTY = "hosts"; public static final String PORT_PROPERTY = "port"; public static final String PORT_PROPERTY_DEFAULT = "9042"; public static final String READ_CONSISTENCY_LEVEL_PROPERTY = "cassandra.readconsistencylevel"; public static final String READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = readConsistencyLevel.name(); public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY = "cassandra.writeconsistencylevel"; public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = writeConsistencyLevel.name(); public static final String MAX_CONNECTIONS_PROPERTY = "cassandra.maxconnections"; public static final String CORE_CONNECTIONS_PROPERTY = "cassandra.coreconnections"; public static final String CONNECT_TIMEOUT_MILLIS_PROPERTY = "cassandra.connecttimeoutmillis"; public static final String READ_TIMEOUT_MILLIS_PROPERTY = "cassandra.readtimeoutmillis"; public static final String TRACING_PROPERTY = "cassandra.tracing"; public static final String TRACING_PROPERTY_DEFAULT = "false"; public static final String USE_SSL_CONNECTION = "cassandra.useSSL"; private static final String DEFAULT_USE_SSL_CONNECTION = "false"; /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static boolean debug = false; private static boolean trace = false; /** * Initialize any state for this DB. Called once per DB instance; there is one * DB instance per client thread. */ @Override public void init() throws DBException { // Keep track of number of calls to init (for later cleanup) INIT_COUNT.incrementAndGet(); // Synchronized so that we only have a single // cluster/session instance for all the threads. synchronized (INIT_COUNT) { // Check if the cluster has already been initialized if (cluster != null) { return; } try { debug = Boolean.parseBoolean(getProperties().getProperty("debug", "false")); trace = Boolean.valueOf(getProperties().getProperty(TRACING_PROPERTY, TRACING_PROPERTY_DEFAULT)); String host = getProperties().getProperty(HOSTS_PROPERTY); if (host == null) { throw new DBException(String.format( "Required property \"%s\" missing for CassandraCQLClient", HOSTS_PROPERTY)); } String[] hosts = host.split(","); String port = getProperties().getProperty(PORT_PROPERTY, PORT_PROPERTY_DEFAULT); String username = getProperties().getProperty(USERNAME_PROPERTY); String password = getProperties().getProperty(PASSWORD_PROPERTY); String keyspace = getProperties().getProperty(KEYSPACE_PROPERTY, KEYSPACE_PROPERTY_DEFAULT); readConsistencyLevel = ConsistencyLevel.valueOf( getProperties().getProperty(READ_CONSISTENCY_LEVEL_PROPERTY, READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); writeConsistencyLevel = ConsistencyLevel.valueOf( getProperties().getProperty(WRITE_CONSISTENCY_LEVEL_PROPERTY, WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); Boolean useSSL = Boolean.parseBoolean(getProperties().getProperty(USE_SSL_CONNECTION, DEFAULT_USE_SSL_CONNECTION)); if ((username != null) && !username.isEmpty()) { Cluster.Builder clusterBuilder = Cluster.builder().withCredentials(username, password) .withPort(Integer.valueOf(port)).addContactPoints(hosts); if (useSSL) { clusterBuilder = clusterBuilder.withSSL(); } cluster = clusterBuilder.build(); } else { cluster = Cluster.builder().withPort(Integer.valueOf(port)) .addContactPoints(hosts).build(); } String maxConnections = getProperties().getProperty( MAX_CONNECTIONS_PROPERTY); if (maxConnections != null) { cluster.getConfiguration().getPoolingOptions() .setMaxConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(maxConnections)); } String coreConnections = getProperties().getProperty( CORE_CONNECTIONS_PROPERTY); if (coreConnections != null) { cluster.getConfiguration().getPoolingOptions() .setCoreConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(coreConnections)); } String connectTimoutMillis = getProperties().getProperty( CONNECT_TIMEOUT_MILLIS_PROPERTY); if (connectTimoutMillis != null) { cluster.getConfiguration().getSocketOptions() .setConnectTimeoutMillis(Integer.valueOf(connectTimoutMillis)); } String readTimoutMillis = getProperties().getProperty( READ_TIMEOUT_MILLIS_PROPERTY); if (readTimoutMillis != null) { cluster.getConfiguration().getSocketOptions() .setReadTimeoutMillis(Integer.valueOf(readTimoutMillis)); } Metadata metadata = cluster.getMetadata(); logger.info("Connected to cluster: {}\n", metadata.getClusterName()); for (Host discoveredHost : metadata.getAllHosts()) { logger.info("Datacenter: {}; Host: {}; Rack: {}\n", discoveredHost.getDatacenter(), discoveredHost.getAddress(), discoveredHost.getRack()); } session = cluster.connect(keyspace); } catch (Exception e) { throw new DBException(e); } } // synchronized } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { synchronized (INIT_COUNT) { final int curInitCount = INIT_COUNT.decrementAndGet(); if (curInitCount <= 0) { readStmts.clear(); scanStmts.clear(); insertStmts.clear(); updateStmts.clear(); readAllStmt.set(null); scanAllStmt.set(null); deleteStmt.set(null); session.close(); cluster.close(); cluster = null; session = null; } if (curInitCount < 0) { // This should never happen. throw new DBException( String.format("initCount is negative: %d", curInitCount)); } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {<FILL_FUNCTION_BODY>} /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * Cassandra CQL uses "token" method for range scan which doesn't always yield * intuitive results. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { PreparedStatement stmt = (fields == null) ? scanAllStmt.get() : scanStmts.get(fields); // Prepare statement on demand if (stmt == null) { Select.Builder selectBuilder; if (fields == null) { selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); } } Select selectStmt = selectBuilder.from(table); // The statement builder is not setup right for tokens. // So, we need to build it manually. String initialStmt = selectStmt.toString(); StringBuilder scanStmt = new StringBuilder(); scanStmt.append(initialStmt.substring(0, initialStmt.length() - 1)); scanStmt.append(" WHERE "); scanStmt.append(QueryBuilder.token(YCSB_KEY)); scanStmt.append(" >= "); scanStmt.append("token("); scanStmt.append(QueryBuilder.bindMarker()); scanStmt.append(")"); scanStmt.append(" LIMIT "); scanStmt.append(QueryBuilder.bindMarker()); stmt = session.prepare(scanStmt.toString()); stmt.setConsistencyLevel(readConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = (fields == null) ? scanAllStmt.getAndSet(stmt) : scanStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("startKey = {}, recordcount = {}", startkey, recordcount); ResultSet rs = session.execute(stmt.bind(startkey, Integer.valueOf(recordcount))); HashMap<String, ByteIterator> tuple; while (!rs.isExhausted()) { Row row = rs.one(); tuple = new HashMap<String, ByteIterator>(); ColumnDefinitions cd = row.getColumnDefinitions(); for (ColumnDefinitions.Definition def : cd) { ByteBuffer val = row.getBytesUnsafe(def.getName()); if (val != null) { tuple.put(def.getName(), new ByteArrayByteIterator(val.array())); } else { tuple.put(def.getName(), null); } } result.add(tuple); } return Status.OK; } catch (Exception e) { logger.error( MessageFormatter.format("Error scanning with startkey: {}", startkey).getMessage(), e); return Status.ERROR; } } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { Set<String> fields = values.keySet(); PreparedStatement stmt = updateStmts.get(fields); // Prepare statement on demand if (stmt == null) { Update updateStmt = QueryBuilder.update(table); // Add fields for (String field : fields) { updateStmt.with(QueryBuilder.set(field, QueryBuilder.bindMarker())); } // Add key updateStmt.where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker())); stmt = session.prepare(updateStmt); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = updateStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } if (logger.isDebugEnabled()) { logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { logger.debug("{} = {}", entry.getKey(), entry.getValue()); } } // Add fields ColumnDefinitions vars = stmt.getVariables(); BoundStatement boundStmt = stmt.bind(); for (int i = 0; i < vars.size() - 1; i++) { boundStmt.setString(i, values.get(vars.getName(i)).toString()); } // Add key boundStmt.setString(vars.size() - 1, key); session.execute(boundStmt); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error updating key: {}", key).getMessage(), e); } return Status.ERROR; } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { Set<String> fields = values.keySet(); PreparedStatement stmt = insertStmts.get(fields); // Prepare statement on demand if (stmt == null) { Insert insertStmt = QueryBuilder.insertInto(table); // Add key insertStmt.value(YCSB_KEY, QueryBuilder.bindMarker()); // Add fields for (String field : fields) { insertStmt.value(field, QueryBuilder.bindMarker()); } stmt = session.prepare(insertStmt); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = insertStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } if (logger.isDebugEnabled()) { logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { logger.debug("{} = {}", entry.getKey(), entry.getValue()); } } // Add key BoundStatement boundStmt = stmt.bind().setString(0, key); // Add fields ColumnDefinitions vars = stmt.getVariables(); for (int i = 1; i < vars.size(); i++) { boundStmt.setString(i, values.get(vars.getName(i)).toString()); } session.execute(boundStmt); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error inserting key: {}", key).getMessage(), e); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ @Override public Status delete(String table, String key) { try { PreparedStatement stmt = deleteStmt.get(); // Prepare statement on demand if (stmt == null) { stmt = session.prepare(QueryBuilder.delete().from(table) .where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker()))); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = deleteStmt.getAndSet(stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); session.execute(stmt.bind(key)); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error deleting key: {}", key).getMessage(), e); } return Status.ERROR; } }
try { PreparedStatement stmt = (fields == null) ? readAllStmt.get() : readStmts.get(fields); // Prepare statement on demand if (stmt == null) { Select.Builder selectBuilder; if (fields == null) { selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); } } stmt = session.prepare(selectBuilder.from(table) .where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker())) .limit(1)); stmt.setConsistencyLevel(readConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = (fields == null) ? readAllStmt.getAndSet(stmt) : readStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); ResultSet rs = session.execute(stmt.bind(key)); if (rs.isExhausted()) { return Status.NOT_FOUND; } // Should be only 1 row Row row = rs.one(); ColumnDefinitions cd = row.getColumnDefinitions(); for (ColumnDefinitions.Definition def : cd) { ByteBuffer val = row.getBytesUnsafe(def.getName()); if (val != null) { result.put(def.getName(), new ByteArrayByteIterator(val.array())); } else { result.put(def.getName(), null); } } return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error reading key: {}", key).getMessage(), e); return Status.ERROR; }
brianfrankcooper_YCSB
YCSB/cassandra/src/main/java/site/ycsb/db/CassandraCQLClient.java
CassandraCQLClient
scan
class CassandraCQLClient extends DB { private static Logger logger = LoggerFactory.getLogger(CassandraCQLClient.class); private static Cluster cluster = null; private static Session session = null; private static ConcurrentMap<Set<String>, PreparedStatement> readStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> scanStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> insertStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> updateStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static AtomicReference<PreparedStatement> readAllStmt = new AtomicReference<PreparedStatement>(); private static AtomicReference<PreparedStatement> scanAllStmt = new AtomicReference<PreparedStatement>(); private static AtomicReference<PreparedStatement> deleteStmt = new AtomicReference<PreparedStatement>(); private static ConsistencyLevel readConsistencyLevel = ConsistencyLevel.QUORUM; private static ConsistencyLevel writeConsistencyLevel = ConsistencyLevel.QUORUM; public static final String YCSB_KEY = "y_id"; public static final String KEYSPACE_PROPERTY = "cassandra.keyspace"; public static final String KEYSPACE_PROPERTY_DEFAULT = "ycsb"; public static final String USERNAME_PROPERTY = "cassandra.username"; public static final String PASSWORD_PROPERTY = "cassandra.password"; public static final String HOSTS_PROPERTY = "hosts"; public static final String PORT_PROPERTY = "port"; public static final String PORT_PROPERTY_DEFAULT = "9042"; public static final String READ_CONSISTENCY_LEVEL_PROPERTY = "cassandra.readconsistencylevel"; public static final String READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = readConsistencyLevel.name(); public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY = "cassandra.writeconsistencylevel"; public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = writeConsistencyLevel.name(); public static final String MAX_CONNECTIONS_PROPERTY = "cassandra.maxconnections"; public static final String CORE_CONNECTIONS_PROPERTY = "cassandra.coreconnections"; public static final String CONNECT_TIMEOUT_MILLIS_PROPERTY = "cassandra.connecttimeoutmillis"; public static final String READ_TIMEOUT_MILLIS_PROPERTY = "cassandra.readtimeoutmillis"; public static final String TRACING_PROPERTY = "cassandra.tracing"; public static final String TRACING_PROPERTY_DEFAULT = "false"; public static final String USE_SSL_CONNECTION = "cassandra.useSSL"; private static final String DEFAULT_USE_SSL_CONNECTION = "false"; /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static boolean debug = false; private static boolean trace = false; /** * Initialize any state for this DB. Called once per DB instance; there is one * DB instance per client thread. */ @Override public void init() throws DBException { // Keep track of number of calls to init (for later cleanup) INIT_COUNT.incrementAndGet(); // Synchronized so that we only have a single // cluster/session instance for all the threads. synchronized (INIT_COUNT) { // Check if the cluster has already been initialized if (cluster != null) { return; } try { debug = Boolean.parseBoolean(getProperties().getProperty("debug", "false")); trace = Boolean.valueOf(getProperties().getProperty(TRACING_PROPERTY, TRACING_PROPERTY_DEFAULT)); String host = getProperties().getProperty(HOSTS_PROPERTY); if (host == null) { throw new DBException(String.format( "Required property \"%s\" missing for CassandraCQLClient", HOSTS_PROPERTY)); } String[] hosts = host.split(","); String port = getProperties().getProperty(PORT_PROPERTY, PORT_PROPERTY_DEFAULT); String username = getProperties().getProperty(USERNAME_PROPERTY); String password = getProperties().getProperty(PASSWORD_PROPERTY); String keyspace = getProperties().getProperty(KEYSPACE_PROPERTY, KEYSPACE_PROPERTY_DEFAULT); readConsistencyLevel = ConsistencyLevel.valueOf( getProperties().getProperty(READ_CONSISTENCY_LEVEL_PROPERTY, READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); writeConsistencyLevel = ConsistencyLevel.valueOf( getProperties().getProperty(WRITE_CONSISTENCY_LEVEL_PROPERTY, WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); Boolean useSSL = Boolean.parseBoolean(getProperties().getProperty(USE_SSL_CONNECTION, DEFAULT_USE_SSL_CONNECTION)); if ((username != null) && !username.isEmpty()) { Cluster.Builder clusterBuilder = Cluster.builder().withCredentials(username, password) .withPort(Integer.valueOf(port)).addContactPoints(hosts); if (useSSL) { clusterBuilder = clusterBuilder.withSSL(); } cluster = clusterBuilder.build(); } else { cluster = Cluster.builder().withPort(Integer.valueOf(port)) .addContactPoints(hosts).build(); } String maxConnections = getProperties().getProperty( MAX_CONNECTIONS_PROPERTY); if (maxConnections != null) { cluster.getConfiguration().getPoolingOptions() .setMaxConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(maxConnections)); } String coreConnections = getProperties().getProperty( CORE_CONNECTIONS_PROPERTY); if (coreConnections != null) { cluster.getConfiguration().getPoolingOptions() .setCoreConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(coreConnections)); } String connectTimoutMillis = getProperties().getProperty( CONNECT_TIMEOUT_MILLIS_PROPERTY); if (connectTimoutMillis != null) { cluster.getConfiguration().getSocketOptions() .setConnectTimeoutMillis(Integer.valueOf(connectTimoutMillis)); } String readTimoutMillis = getProperties().getProperty( READ_TIMEOUT_MILLIS_PROPERTY); if (readTimoutMillis != null) { cluster.getConfiguration().getSocketOptions() .setReadTimeoutMillis(Integer.valueOf(readTimoutMillis)); } Metadata metadata = cluster.getMetadata(); logger.info("Connected to cluster: {}\n", metadata.getClusterName()); for (Host discoveredHost : metadata.getAllHosts()) { logger.info("Datacenter: {}; Host: {}; Rack: {}\n", discoveredHost.getDatacenter(), discoveredHost.getAddress(), discoveredHost.getRack()); } session = cluster.connect(keyspace); } catch (Exception e) { throw new DBException(e); } } // synchronized } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { synchronized (INIT_COUNT) { final int curInitCount = INIT_COUNT.decrementAndGet(); if (curInitCount <= 0) { readStmts.clear(); scanStmts.clear(); insertStmts.clear(); updateStmts.clear(); readAllStmt.set(null); scanAllStmt.set(null); deleteStmt.set(null); session.close(); cluster.close(); cluster = null; session = null; } if (curInitCount < 0) { // This should never happen. throw new DBException( String.format("initCount is negative: %d", curInitCount)); } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { PreparedStatement stmt = (fields == null) ? readAllStmt.get() : readStmts.get(fields); // Prepare statement on demand if (stmt == null) { Select.Builder selectBuilder; if (fields == null) { selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); } } stmt = session.prepare(selectBuilder.from(table) .where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker())) .limit(1)); stmt.setConsistencyLevel(readConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = (fields == null) ? readAllStmt.getAndSet(stmt) : readStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); ResultSet rs = session.execute(stmt.bind(key)); if (rs.isExhausted()) { return Status.NOT_FOUND; } // Should be only 1 row Row row = rs.one(); ColumnDefinitions cd = row.getColumnDefinitions(); for (ColumnDefinitions.Definition def : cd) { ByteBuffer val = row.getBytesUnsafe(def.getName()); if (val != null) { result.put(def.getName(), new ByteArrayByteIterator(val.array())); } else { result.put(def.getName(), null); } } return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error reading key: {}", key).getMessage(), e); return Status.ERROR; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * Cassandra CQL uses "token" method for range scan which doesn't always yield * intuitive results. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {<FILL_FUNCTION_BODY>} /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { Set<String> fields = values.keySet(); PreparedStatement stmt = updateStmts.get(fields); // Prepare statement on demand if (stmt == null) { Update updateStmt = QueryBuilder.update(table); // Add fields for (String field : fields) { updateStmt.with(QueryBuilder.set(field, QueryBuilder.bindMarker())); } // Add key updateStmt.where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker())); stmt = session.prepare(updateStmt); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = updateStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } if (logger.isDebugEnabled()) { logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { logger.debug("{} = {}", entry.getKey(), entry.getValue()); } } // Add fields ColumnDefinitions vars = stmt.getVariables(); BoundStatement boundStmt = stmt.bind(); for (int i = 0; i < vars.size() - 1; i++) { boundStmt.setString(i, values.get(vars.getName(i)).toString()); } // Add key boundStmt.setString(vars.size() - 1, key); session.execute(boundStmt); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error updating key: {}", key).getMessage(), e); } return Status.ERROR; } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { Set<String> fields = values.keySet(); PreparedStatement stmt = insertStmts.get(fields); // Prepare statement on demand if (stmt == null) { Insert insertStmt = QueryBuilder.insertInto(table); // Add key insertStmt.value(YCSB_KEY, QueryBuilder.bindMarker()); // Add fields for (String field : fields) { insertStmt.value(field, QueryBuilder.bindMarker()); } stmt = session.prepare(insertStmt); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = insertStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } if (logger.isDebugEnabled()) { logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { logger.debug("{} = {}", entry.getKey(), entry.getValue()); } } // Add key BoundStatement boundStmt = stmt.bind().setString(0, key); // Add fields ColumnDefinitions vars = stmt.getVariables(); for (int i = 1; i < vars.size(); i++) { boundStmt.setString(i, values.get(vars.getName(i)).toString()); } session.execute(boundStmt); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error inserting key: {}", key).getMessage(), e); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ @Override public Status delete(String table, String key) { try { PreparedStatement stmt = deleteStmt.get(); // Prepare statement on demand if (stmt == null) { stmt = session.prepare(QueryBuilder.delete().from(table) .where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker()))); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = deleteStmt.getAndSet(stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); session.execute(stmt.bind(key)); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error deleting key: {}", key).getMessage(), e); } return Status.ERROR; } }
try { PreparedStatement stmt = (fields == null) ? scanAllStmt.get() : scanStmts.get(fields); // Prepare statement on demand if (stmt == null) { Select.Builder selectBuilder; if (fields == null) { selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); } } Select selectStmt = selectBuilder.from(table); // The statement builder is not setup right for tokens. // So, we need to build it manually. String initialStmt = selectStmt.toString(); StringBuilder scanStmt = new StringBuilder(); scanStmt.append(initialStmt.substring(0, initialStmt.length() - 1)); scanStmt.append(" WHERE "); scanStmt.append(QueryBuilder.token(YCSB_KEY)); scanStmt.append(" >= "); scanStmt.append("token("); scanStmt.append(QueryBuilder.bindMarker()); scanStmt.append(")"); scanStmt.append(" LIMIT "); scanStmt.append(QueryBuilder.bindMarker()); stmt = session.prepare(scanStmt.toString()); stmt.setConsistencyLevel(readConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = (fields == null) ? scanAllStmt.getAndSet(stmt) : scanStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("startKey = {}, recordcount = {}", startkey, recordcount); ResultSet rs = session.execute(stmt.bind(startkey, Integer.valueOf(recordcount))); HashMap<String, ByteIterator> tuple; while (!rs.isExhausted()) { Row row = rs.one(); tuple = new HashMap<String, ByteIterator>(); ColumnDefinitions cd = row.getColumnDefinitions(); for (ColumnDefinitions.Definition def : cd) { ByteBuffer val = row.getBytesUnsafe(def.getName()); if (val != null) { tuple.put(def.getName(), new ByteArrayByteIterator(val.array())); } else { tuple.put(def.getName(), null); } } result.add(tuple); } return Status.OK; } catch (Exception e) { logger.error( MessageFormatter.format("Error scanning with startkey: {}", startkey).getMessage(), e); return Status.ERROR; }
brianfrankcooper_YCSB
YCSB/cassandra/src/main/java/site/ycsb/db/CassandraCQLClient.java
CassandraCQLClient
update
class CassandraCQLClient extends DB { private static Logger logger = LoggerFactory.getLogger(CassandraCQLClient.class); private static Cluster cluster = null; private static Session session = null; private static ConcurrentMap<Set<String>, PreparedStatement> readStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> scanStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> insertStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> updateStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static AtomicReference<PreparedStatement> readAllStmt = new AtomicReference<PreparedStatement>(); private static AtomicReference<PreparedStatement> scanAllStmt = new AtomicReference<PreparedStatement>(); private static AtomicReference<PreparedStatement> deleteStmt = new AtomicReference<PreparedStatement>(); private static ConsistencyLevel readConsistencyLevel = ConsistencyLevel.QUORUM; private static ConsistencyLevel writeConsistencyLevel = ConsistencyLevel.QUORUM; public static final String YCSB_KEY = "y_id"; public static final String KEYSPACE_PROPERTY = "cassandra.keyspace"; public static final String KEYSPACE_PROPERTY_DEFAULT = "ycsb"; public static final String USERNAME_PROPERTY = "cassandra.username"; public static final String PASSWORD_PROPERTY = "cassandra.password"; public static final String HOSTS_PROPERTY = "hosts"; public static final String PORT_PROPERTY = "port"; public static final String PORT_PROPERTY_DEFAULT = "9042"; public static final String READ_CONSISTENCY_LEVEL_PROPERTY = "cassandra.readconsistencylevel"; public static final String READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = readConsistencyLevel.name(); public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY = "cassandra.writeconsistencylevel"; public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = writeConsistencyLevel.name(); public static final String MAX_CONNECTIONS_PROPERTY = "cassandra.maxconnections"; public static final String CORE_CONNECTIONS_PROPERTY = "cassandra.coreconnections"; public static final String CONNECT_TIMEOUT_MILLIS_PROPERTY = "cassandra.connecttimeoutmillis"; public static final String READ_TIMEOUT_MILLIS_PROPERTY = "cassandra.readtimeoutmillis"; public static final String TRACING_PROPERTY = "cassandra.tracing"; public static final String TRACING_PROPERTY_DEFAULT = "false"; public static final String USE_SSL_CONNECTION = "cassandra.useSSL"; private static final String DEFAULT_USE_SSL_CONNECTION = "false"; /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static boolean debug = false; private static boolean trace = false; /** * Initialize any state for this DB. Called once per DB instance; there is one * DB instance per client thread. */ @Override public void init() throws DBException { // Keep track of number of calls to init (for later cleanup) INIT_COUNT.incrementAndGet(); // Synchronized so that we only have a single // cluster/session instance for all the threads. synchronized (INIT_COUNT) { // Check if the cluster has already been initialized if (cluster != null) { return; } try { debug = Boolean.parseBoolean(getProperties().getProperty("debug", "false")); trace = Boolean.valueOf(getProperties().getProperty(TRACING_PROPERTY, TRACING_PROPERTY_DEFAULT)); String host = getProperties().getProperty(HOSTS_PROPERTY); if (host == null) { throw new DBException(String.format( "Required property \"%s\" missing for CassandraCQLClient", HOSTS_PROPERTY)); } String[] hosts = host.split(","); String port = getProperties().getProperty(PORT_PROPERTY, PORT_PROPERTY_DEFAULT); String username = getProperties().getProperty(USERNAME_PROPERTY); String password = getProperties().getProperty(PASSWORD_PROPERTY); String keyspace = getProperties().getProperty(KEYSPACE_PROPERTY, KEYSPACE_PROPERTY_DEFAULT); readConsistencyLevel = ConsistencyLevel.valueOf( getProperties().getProperty(READ_CONSISTENCY_LEVEL_PROPERTY, READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); writeConsistencyLevel = ConsistencyLevel.valueOf( getProperties().getProperty(WRITE_CONSISTENCY_LEVEL_PROPERTY, WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); Boolean useSSL = Boolean.parseBoolean(getProperties().getProperty(USE_SSL_CONNECTION, DEFAULT_USE_SSL_CONNECTION)); if ((username != null) && !username.isEmpty()) { Cluster.Builder clusterBuilder = Cluster.builder().withCredentials(username, password) .withPort(Integer.valueOf(port)).addContactPoints(hosts); if (useSSL) { clusterBuilder = clusterBuilder.withSSL(); } cluster = clusterBuilder.build(); } else { cluster = Cluster.builder().withPort(Integer.valueOf(port)) .addContactPoints(hosts).build(); } String maxConnections = getProperties().getProperty( MAX_CONNECTIONS_PROPERTY); if (maxConnections != null) { cluster.getConfiguration().getPoolingOptions() .setMaxConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(maxConnections)); } String coreConnections = getProperties().getProperty( CORE_CONNECTIONS_PROPERTY); if (coreConnections != null) { cluster.getConfiguration().getPoolingOptions() .setCoreConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(coreConnections)); } String connectTimoutMillis = getProperties().getProperty( CONNECT_TIMEOUT_MILLIS_PROPERTY); if (connectTimoutMillis != null) { cluster.getConfiguration().getSocketOptions() .setConnectTimeoutMillis(Integer.valueOf(connectTimoutMillis)); } String readTimoutMillis = getProperties().getProperty( READ_TIMEOUT_MILLIS_PROPERTY); if (readTimoutMillis != null) { cluster.getConfiguration().getSocketOptions() .setReadTimeoutMillis(Integer.valueOf(readTimoutMillis)); } Metadata metadata = cluster.getMetadata(); logger.info("Connected to cluster: {}\n", metadata.getClusterName()); for (Host discoveredHost : metadata.getAllHosts()) { logger.info("Datacenter: {}; Host: {}; Rack: {}\n", discoveredHost.getDatacenter(), discoveredHost.getAddress(), discoveredHost.getRack()); } session = cluster.connect(keyspace); } catch (Exception e) { throw new DBException(e); } } // synchronized } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { synchronized (INIT_COUNT) { final int curInitCount = INIT_COUNT.decrementAndGet(); if (curInitCount <= 0) { readStmts.clear(); scanStmts.clear(); insertStmts.clear(); updateStmts.clear(); readAllStmt.set(null); scanAllStmt.set(null); deleteStmt.set(null); session.close(); cluster.close(); cluster = null; session = null; } if (curInitCount < 0) { // This should never happen. throw new DBException( String.format("initCount is negative: %d", curInitCount)); } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { PreparedStatement stmt = (fields == null) ? readAllStmt.get() : readStmts.get(fields); // Prepare statement on demand if (stmt == null) { Select.Builder selectBuilder; if (fields == null) { selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); } } stmt = session.prepare(selectBuilder.from(table) .where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker())) .limit(1)); stmt.setConsistencyLevel(readConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = (fields == null) ? readAllStmt.getAndSet(stmt) : readStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); ResultSet rs = session.execute(stmt.bind(key)); if (rs.isExhausted()) { return Status.NOT_FOUND; } // Should be only 1 row Row row = rs.one(); ColumnDefinitions cd = row.getColumnDefinitions(); for (ColumnDefinitions.Definition def : cd) { ByteBuffer val = row.getBytesUnsafe(def.getName()); if (val != null) { result.put(def.getName(), new ByteArrayByteIterator(val.array())); } else { result.put(def.getName(), null); } } return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error reading key: {}", key).getMessage(), e); return Status.ERROR; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * Cassandra CQL uses "token" method for range scan which doesn't always yield * intuitive results. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { PreparedStatement stmt = (fields == null) ? scanAllStmt.get() : scanStmts.get(fields); // Prepare statement on demand if (stmt == null) { Select.Builder selectBuilder; if (fields == null) { selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); } } Select selectStmt = selectBuilder.from(table); // The statement builder is not setup right for tokens. // So, we need to build it manually. String initialStmt = selectStmt.toString(); StringBuilder scanStmt = new StringBuilder(); scanStmt.append(initialStmt.substring(0, initialStmt.length() - 1)); scanStmt.append(" WHERE "); scanStmt.append(QueryBuilder.token(YCSB_KEY)); scanStmt.append(" >= "); scanStmt.append("token("); scanStmt.append(QueryBuilder.bindMarker()); scanStmt.append(")"); scanStmt.append(" LIMIT "); scanStmt.append(QueryBuilder.bindMarker()); stmt = session.prepare(scanStmt.toString()); stmt.setConsistencyLevel(readConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = (fields == null) ? scanAllStmt.getAndSet(stmt) : scanStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("startKey = {}, recordcount = {}", startkey, recordcount); ResultSet rs = session.execute(stmt.bind(startkey, Integer.valueOf(recordcount))); HashMap<String, ByteIterator> tuple; while (!rs.isExhausted()) { Row row = rs.one(); tuple = new HashMap<String, ByteIterator>(); ColumnDefinitions cd = row.getColumnDefinitions(); for (ColumnDefinitions.Definition def : cd) { ByteBuffer val = row.getBytesUnsafe(def.getName()); if (val != null) { tuple.put(def.getName(), new ByteArrayByteIterator(val.array())); } else { tuple.put(def.getName(), null); } } result.add(tuple); } return Status.OK; } catch (Exception e) { logger.error( MessageFormatter.format("Error scanning with startkey: {}", startkey).getMessage(), e); return Status.ERROR; } } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>} /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { Set<String> fields = values.keySet(); PreparedStatement stmt = insertStmts.get(fields); // Prepare statement on demand if (stmt == null) { Insert insertStmt = QueryBuilder.insertInto(table); // Add key insertStmt.value(YCSB_KEY, QueryBuilder.bindMarker()); // Add fields for (String field : fields) { insertStmt.value(field, QueryBuilder.bindMarker()); } stmt = session.prepare(insertStmt); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = insertStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } if (logger.isDebugEnabled()) { logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { logger.debug("{} = {}", entry.getKey(), entry.getValue()); } } // Add key BoundStatement boundStmt = stmt.bind().setString(0, key); // Add fields ColumnDefinitions vars = stmt.getVariables(); for (int i = 1; i < vars.size(); i++) { boundStmt.setString(i, values.get(vars.getName(i)).toString()); } session.execute(boundStmt); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error inserting key: {}", key).getMessage(), e); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ @Override public Status delete(String table, String key) { try { PreparedStatement stmt = deleteStmt.get(); // Prepare statement on demand if (stmt == null) { stmt = session.prepare(QueryBuilder.delete().from(table) .where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker()))); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = deleteStmt.getAndSet(stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); session.execute(stmt.bind(key)); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error deleting key: {}", key).getMessage(), e); } return Status.ERROR; } }
try { Set<String> fields = values.keySet(); PreparedStatement stmt = updateStmts.get(fields); // Prepare statement on demand if (stmt == null) { Update updateStmt = QueryBuilder.update(table); // Add fields for (String field : fields) { updateStmt.with(QueryBuilder.set(field, QueryBuilder.bindMarker())); } // Add key updateStmt.where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker())); stmt = session.prepare(updateStmt); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = updateStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } if (logger.isDebugEnabled()) { logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { logger.debug("{} = {}", entry.getKey(), entry.getValue()); } } // Add fields ColumnDefinitions vars = stmt.getVariables(); BoundStatement boundStmt = stmt.bind(); for (int i = 0; i < vars.size() - 1; i++) { boundStmt.setString(i, values.get(vars.getName(i)).toString()); } // Add key boundStmt.setString(vars.size() - 1, key); session.execute(boundStmt); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error updating key: {}", key).getMessage(), e); } return Status.ERROR;
brianfrankcooper_YCSB
YCSB/cassandra/src/main/java/site/ycsb/db/CassandraCQLClient.java
CassandraCQLClient
insert
class CassandraCQLClient extends DB { private static Logger logger = LoggerFactory.getLogger(CassandraCQLClient.class); private static Cluster cluster = null; private static Session session = null; private static ConcurrentMap<Set<String>, PreparedStatement> readStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> scanStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> insertStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> updateStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static AtomicReference<PreparedStatement> readAllStmt = new AtomicReference<PreparedStatement>(); private static AtomicReference<PreparedStatement> scanAllStmt = new AtomicReference<PreparedStatement>(); private static AtomicReference<PreparedStatement> deleteStmt = new AtomicReference<PreparedStatement>(); private static ConsistencyLevel readConsistencyLevel = ConsistencyLevel.QUORUM; private static ConsistencyLevel writeConsistencyLevel = ConsistencyLevel.QUORUM; public static final String YCSB_KEY = "y_id"; public static final String KEYSPACE_PROPERTY = "cassandra.keyspace"; public static final String KEYSPACE_PROPERTY_DEFAULT = "ycsb"; public static final String USERNAME_PROPERTY = "cassandra.username"; public static final String PASSWORD_PROPERTY = "cassandra.password"; public static final String HOSTS_PROPERTY = "hosts"; public static final String PORT_PROPERTY = "port"; public static final String PORT_PROPERTY_DEFAULT = "9042"; public static final String READ_CONSISTENCY_LEVEL_PROPERTY = "cassandra.readconsistencylevel"; public static final String READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = readConsistencyLevel.name(); public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY = "cassandra.writeconsistencylevel"; public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = writeConsistencyLevel.name(); public static final String MAX_CONNECTIONS_PROPERTY = "cassandra.maxconnections"; public static final String CORE_CONNECTIONS_PROPERTY = "cassandra.coreconnections"; public static final String CONNECT_TIMEOUT_MILLIS_PROPERTY = "cassandra.connecttimeoutmillis"; public static final String READ_TIMEOUT_MILLIS_PROPERTY = "cassandra.readtimeoutmillis"; public static final String TRACING_PROPERTY = "cassandra.tracing"; public static final String TRACING_PROPERTY_DEFAULT = "false"; public static final String USE_SSL_CONNECTION = "cassandra.useSSL"; private static final String DEFAULT_USE_SSL_CONNECTION = "false"; /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static boolean debug = false; private static boolean trace = false; /** * Initialize any state for this DB. Called once per DB instance; there is one * DB instance per client thread. */ @Override public void init() throws DBException { // Keep track of number of calls to init (for later cleanup) INIT_COUNT.incrementAndGet(); // Synchronized so that we only have a single // cluster/session instance for all the threads. synchronized (INIT_COUNT) { // Check if the cluster has already been initialized if (cluster != null) { return; } try { debug = Boolean.parseBoolean(getProperties().getProperty("debug", "false")); trace = Boolean.valueOf(getProperties().getProperty(TRACING_PROPERTY, TRACING_PROPERTY_DEFAULT)); String host = getProperties().getProperty(HOSTS_PROPERTY); if (host == null) { throw new DBException(String.format( "Required property \"%s\" missing for CassandraCQLClient", HOSTS_PROPERTY)); } String[] hosts = host.split(","); String port = getProperties().getProperty(PORT_PROPERTY, PORT_PROPERTY_DEFAULT); String username = getProperties().getProperty(USERNAME_PROPERTY); String password = getProperties().getProperty(PASSWORD_PROPERTY); String keyspace = getProperties().getProperty(KEYSPACE_PROPERTY, KEYSPACE_PROPERTY_DEFAULT); readConsistencyLevel = ConsistencyLevel.valueOf( getProperties().getProperty(READ_CONSISTENCY_LEVEL_PROPERTY, READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); writeConsistencyLevel = ConsistencyLevel.valueOf( getProperties().getProperty(WRITE_CONSISTENCY_LEVEL_PROPERTY, WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); Boolean useSSL = Boolean.parseBoolean(getProperties().getProperty(USE_SSL_CONNECTION, DEFAULT_USE_SSL_CONNECTION)); if ((username != null) && !username.isEmpty()) { Cluster.Builder clusterBuilder = Cluster.builder().withCredentials(username, password) .withPort(Integer.valueOf(port)).addContactPoints(hosts); if (useSSL) { clusterBuilder = clusterBuilder.withSSL(); } cluster = clusterBuilder.build(); } else { cluster = Cluster.builder().withPort(Integer.valueOf(port)) .addContactPoints(hosts).build(); } String maxConnections = getProperties().getProperty( MAX_CONNECTIONS_PROPERTY); if (maxConnections != null) { cluster.getConfiguration().getPoolingOptions() .setMaxConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(maxConnections)); } String coreConnections = getProperties().getProperty( CORE_CONNECTIONS_PROPERTY); if (coreConnections != null) { cluster.getConfiguration().getPoolingOptions() .setCoreConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(coreConnections)); } String connectTimoutMillis = getProperties().getProperty( CONNECT_TIMEOUT_MILLIS_PROPERTY); if (connectTimoutMillis != null) { cluster.getConfiguration().getSocketOptions() .setConnectTimeoutMillis(Integer.valueOf(connectTimoutMillis)); } String readTimoutMillis = getProperties().getProperty( READ_TIMEOUT_MILLIS_PROPERTY); if (readTimoutMillis != null) { cluster.getConfiguration().getSocketOptions() .setReadTimeoutMillis(Integer.valueOf(readTimoutMillis)); } Metadata metadata = cluster.getMetadata(); logger.info("Connected to cluster: {}\n", metadata.getClusterName()); for (Host discoveredHost : metadata.getAllHosts()) { logger.info("Datacenter: {}; Host: {}; Rack: {}\n", discoveredHost.getDatacenter(), discoveredHost.getAddress(), discoveredHost.getRack()); } session = cluster.connect(keyspace); } catch (Exception e) { throw new DBException(e); } } // synchronized } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { synchronized (INIT_COUNT) { final int curInitCount = INIT_COUNT.decrementAndGet(); if (curInitCount <= 0) { readStmts.clear(); scanStmts.clear(); insertStmts.clear(); updateStmts.clear(); readAllStmt.set(null); scanAllStmt.set(null); deleteStmt.set(null); session.close(); cluster.close(); cluster = null; session = null; } if (curInitCount < 0) { // This should never happen. throw new DBException( String.format("initCount is negative: %d", curInitCount)); } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { PreparedStatement stmt = (fields == null) ? readAllStmt.get() : readStmts.get(fields); // Prepare statement on demand if (stmt == null) { Select.Builder selectBuilder; if (fields == null) { selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); } } stmt = session.prepare(selectBuilder.from(table) .where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker())) .limit(1)); stmt.setConsistencyLevel(readConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = (fields == null) ? readAllStmt.getAndSet(stmt) : readStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); ResultSet rs = session.execute(stmt.bind(key)); if (rs.isExhausted()) { return Status.NOT_FOUND; } // Should be only 1 row Row row = rs.one(); ColumnDefinitions cd = row.getColumnDefinitions(); for (ColumnDefinitions.Definition def : cd) { ByteBuffer val = row.getBytesUnsafe(def.getName()); if (val != null) { result.put(def.getName(), new ByteArrayByteIterator(val.array())); } else { result.put(def.getName(), null); } } return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error reading key: {}", key).getMessage(), e); return Status.ERROR; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * Cassandra CQL uses "token" method for range scan which doesn't always yield * intuitive results. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { PreparedStatement stmt = (fields == null) ? scanAllStmt.get() : scanStmts.get(fields); // Prepare statement on demand if (stmt == null) { Select.Builder selectBuilder; if (fields == null) { selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); } } Select selectStmt = selectBuilder.from(table); // The statement builder is not setup right for tokens. // So, we need to build it manually. String initialStmt = selectStmt.toString(); StringBuilder scanStmt = new StringBuilder(); scanStmt.append(initialStmt.substring(0, initialStmt.length() - 1)); scanStmt.append(" WHERE "); scanStmt.append(QueryBuilder.token(YCSB_KEY)); scanStmt.append(" >= "); scanStmt.append("token("); scanStmt.append(QueryBuilder.bindMarker()); scanStmt.append(")"); scanStmt.append(" LIMIT "); scanStmt.append(QueryBuilder.bindMarker()); stmt = session.prepare(scanStmt.toString()); stmt.setConsistencyLevel(readConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = (fields == null) ? scanAllStmt.getAndSet(stmt) : scanStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("startKey = {}, recordcount = {}", startkey, recordcount); ResultSet rs = session.execute(stmt.bind(startkey, Integer.valueOf(recordcount))); HashMap<String, ByteIterator> tuple; while (!rs.isExhausted()) { Row row = rs.one(); tuple = new HashMap<String, ByteIterator>(); ColumnDefinitions cd = row.getColumnDefinitions(); for (ColumnDefinitions.Definition def : cd) { ByteBuffer val = row.getBytesUnsafe(def.getName()); if (val != null) { tuple.put(def.getName(), new ByteArrayByteIterator(val.array())); } else { tuple.put(def.getName(), null); } } result.add(tuple); } return Status.OK; } catch (Exception e) { logger.error( MessageFormatter.format("Error scanning with startkey: {}", startkey).getMessage(), e); return Status.ERROR; } } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { Set<String> fields = values.keySet(); PreparedStatement stmt = updateStmts.get(fields); // Prepare statement on demand if (stmt == null) { Update updateStmt = QueryBuilder.update(table); // Add fields for (String field : fields) { updateStmt.with(QueryBuilder.set(field, QueryBuilder.bindMarker())); } // Add key updateStmt.where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker())); stmt = session.prepare(updateStmt); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = updateStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } if (logger.isDebugEnabled()) { logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { logger.debug("{} = {}", entry.getKey(), entry.getValue()); } } // Add fields ColumnDefinitions vars = stmt.getVariables(); BoundStatement boundStmt = stmt.bind(); for (int i = 0; i < vars.size() - 1; i++) { boundStmt.setString(i, values.get(vars.getName(i)).toString()); } // Add key boundStmt.setString(vars.size() - 1, key); session.execute(boundStmt); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error updating key: {}", key).getMessage(), e); } return Status.ERROR; } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>} /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ @Override public Status delete(String table, String key) { try { PreparedStatement stmt = deleteStmt.get(); // Prepare statement on demand if (stmt == null) { stmt = session.prepare(QueryBuilder.delete().from(table) .where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker()))); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = deleteStmt.getAndSet(stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); session.execute(stmt.bind(key)); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error deleting key: {}", key).getMessage(), e); } return Status.ERROR; } }
try { Set<String> fields = values.keySet(); PreparedStatement stmt = insertStmts.get(fields); // Prepare statement on demand if (stmt == null) { Insert insertStmt = QueryBuilder.insertInto(table); // Add key insertStmt.value(YCSB_KEY, QueryBuilder.bindMarker()); // Add fields for (String field : fields) { insertStmt.value(field, QueryBuilder.bindMarker()); } stmt = session.prepare(insertStmt); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = insertStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } if (logger.isDebugEnabled()) { logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { logger.debug("{} = {}", entry.getKey(), entry.getValue()); } } // Add key BoundStatement boundStmt = stmt.bind().setString(0, key); // Add fields ColumnDefinitions vars = stmt.getVariables(); for (int i = 1; i < vars.size(); i++) { boundStmt.setString(i, values.get(vars.getName(i)).toString()); } session.execute(boundStmt); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error inserting key: {}", key).getMessage(), e); } return Status.ERROR;
brianfrankcooper_YCSB
YCSB/cassandra/src/main/java/site/ycsb/db/CassandraCQLClient.java
CassandraCQLClient
delete
class CassandraCQLClient extends DB { private static Logger logger = LoggerFactory.getLogger(CassandraCQLClient.class); private static Cluster cluster = null; private static Session session = null; private static ConcurrentMap<Set<String>, PreparedStatement> readStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> scanStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> insertStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static ConcurrentMap<Set<String>, PreparedStatement> updateStmts = new ConcurrentHashMap<Set<String>, PreparedStatement>(); private static AtomicReference<PreparedStatement> readAllStmt = new AtomicReference<PreparedStatement>(); private static AtomicReference<PreparedStatement> scanAllStmt = new AtomicReference<PreparedStatement>(); private static AtomicReference<PreparedStatement> deleteStmt = new AtomicReference<PreparedStatement>(); private static ConsistencyLevel readConsistencyLevel = ConsistencyLevel.QUORUM; private static ConsistencyLevel writeConsistencyLevel = ConsistencyLevel.QUORUM; public static final String YCSB_KEY = "y_id"; public static final String KEYSPACE_PROPERTY = "cassandra.keyspace"; public static final String KEYSPACE_PROPERTY_DEFAULT = "ycsb"; public static final String USERNAME_PROPERTY = "cassandra.username"; public static final String PASSWORD_PROPERTY = "cassandra.password"; public static final String HOSTS_PROPERTY = "hosts"; public static final String PORT_PROPERTY = "port"; public static final String PORT_PROPERTY_DEFAULT = "9042"; public static final String READ_CONSISTENCY_LEVEL_PROPERTY = "cassandra.readconsistencylevel"; public static final String READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = readConsistencyLevel.name(); public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY = "cassandra.writeconsistencylevel"; public static final String WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT = writeConsistencyLevel.name(); public static final String MAX_CONNECTIONS_PROPERTY = "cassandra.maxconnections"; public static final String CORE_CONNECTIONS_PROPERTY = "cassandra.coreconnections"; public static final String CONNECT_TIMEOUT_MILLIS_PROPERTY = "cassandra.connecttimeoutmillis"; public static final String READ_TIMEOUT_MILLIS_PROPERTY = "cassandra.readtimeoutmillis"; public static final String TRACING_PROPERTY = "cassandra.tracing"; public static final String TRACING_PROPERTY_DEFAULT = "false"; public static final String USE_SSL_CONNECTION = "cassandra.useSSL"; private static final String DEFAULT_USE_SSL_CONNECTION = "false"; /** * Count the number of times initialized to teardown on the last * {@link #cleanup()}. */ private static final AtomicInteger INIT_COUNT = new AtomicInteger(0); private static boolean debug = false; private static boolean trace = false; /** * Initialize any state for this DB. Called once per DB instance; there is one * DB instance per client thread. */ @Override public void init() throws DBException { // Keep track of number of calls to init (for later cleanup) INIT_COUNT.incrementAndGet(); // Synchronized so that we only have a single // cluster/session instance for all the threads. synchronized (INIT_COUNT) { // Check if the cluster has already been initialized if (cluster != null) { return; } try { debug = Boolean.parseBoolean(getProperties().getProperty("debug", "false")); trace = Boolean.valueOf(getProperties().getProperty(TRACING_PROPERTY, TRACING_PROPERTY_DEFAULT)); String host = getProperties().getProperty(HOSTS_PROPERTY); if (host == null) { throw new DBException(String.format( "Required property \"%s\" missing for CassandraCQLClient", HOSTS_PROPERTY)); } String[] hosts = host.split(","); String port = getProperties().getProperty(PORT_PROPERTY, PORT_PROPERTY_DEFAULT); String username = getProperties().getProperty(USERNAME_PROPERTY); String password = getProperties().getProperty(PASSWORD_PROPERTY); String keyspace = getProperties().getProperty(KEYSPACE_PROPERTY, KEYSPACE_PROPERTY_DEFAULT); readConsistencyLevel = ConsistencyLevel.valueOf( getProperties().getProperty(READ_CONSISTENCY_LEVEL_PROPERTY, READ_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); writeConsistencyLevel = ConsistencyLevel.valueOf( getProperties().getProperty(WRITE_CONSISTENCY_LEVEL_PROPERTY, WRITE_CONSISTENCY_LEVEL_PROPERTY_DEFAULT)); Boolean useSSL = Boolean.parseBoolean(getProperties().getProperty(USE_SSL_CONNECTION, DEFAULT_USE_SSL_CONNECTION)); if ((username != null) && !username.isEmpty()) { Cluster.Builder clusterBuilder = Cluster.builder().withCredentials(username, password) .withPort(Integer.valueOf(port)).addContactPoints(hosts); if (useSSL) { clusterBuilder = clusterBuilder.withSSL(); } cluster = clusterBuilder.build(); } else { cluster = Cluster.builder().withPort(Integer.valueOf(port)) .addContactPoints(hosts).build(); } String maxConnections = getProperties().getProperty( MAX_CONNECTIONS_PROPERTY); if (maxConnections != null) { cluster.getConfiguration().getPoolingOptions() .setMaxConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(maxConnections)); } String coreConnections = getProperties().getProperty( CORE_CONNECTIONS_PROPERTY); if (coreConnections != null) { cluster.getConfiguration().getPoolingOptions() .setCoreConnectionsPerHost(HostDistance.LOCAL, Integer.valueOf(coreConnections)); } String connectTimoutMillis = getProperties().getProperty( CONNECT_TIMEOUT_MILLIS_PROPERTY); if (connectTimoutMillis != null) { cluster.getConfiguration().getSocketOptions() .setConnectTimeoutMillis(Integer.valueOf(connectTimoutMillis)); } String readTimoutMillis = getProperties().getProperty( READ_TIMEOUT_MILLIS_PROPERTY); if (readTimoutMillis != null) { cluster.getConfiguration().getSocketOptions() .setReadTimeoutMillis(Integer.valueOf(readTimoutMillis)); } Metadata metadata = cluster.getMetadata(); logger.info("Connected to cluster: {}\n", metadata.getClusterName()); for (Host discoveredHost : metadata.getAllHosts()) { logger.info("Datacenter: {}; Host: {}; Rack: {}\n", discoveredHost.getDatacenter(), discoveredHost.getAddress(), discoveredHost.getRack()); } session = cluster.connect(keyspace); } catch (Exception e) { throw new DBException(e); } } // synchronized } /** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { synchronized (INIT_COUNT) { final int curInitCount = INIT_COUNT.decrementAndGet(); if (curInitCount <= 0) { readStmts.clear(); scanStmts.clear(); insertStmts.clear(); updateStmts.clear(); readAllStmt.set(null); scanAllStmt.set(null); deleteStmt.set(null); session.close(); cluster.close(); cluster = null; session = null; } if (curInitCount < 0) { // This should never happen. throw new DBException( String.format("initCount is negative: %d", curInitCount)); } } } /** * Read a record from the database. Each field/value pair from the result will * be stored in a HashMap. * * @param table * The name of the table * @param key * The record key of the record to read. * @param fields * The list of fields to read, or null for all of them * @param result * A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ @Override public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { try { PreparedStatement stmt = (fields == null) ? readAllStmt.get() : readStmts.get(fields); // Prepare statement on demand if (stmt == null) { Select.Builder selectBuilder; if (fields == null) { selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); } } stmt = session.prepare(selectBuilder.from(table) .where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker())) .limit(1)); stmt.setConsistencyLevel(readConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = (fields == null) ? readAllStmt.getAndSet(stmt) : readStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); ResultSet rs = session.execute(stmt.bind(key)); if (rs.isExhausted()) { return Status.NOT_FOUND; } // Should be only 1 row Row row = rs.one(); ColumnDefinitions cd = row.getColumnDefinitions(); for (ColumnDefinitions.Definition def : cd) { ByteBuffer val = row.getBytesUnsafe(def.getName()); if (val != null) { result.put(def.getName(), new ByteArrayByteIterator(val.array())); } else { result.put(def.getName(), null); } } return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error reading key: {}", key).getMessage(), e); return Status.ERROR; } } /** * Perform a range scan for a set of records in the database. Each field/value * pair from the result will be stored in a HashMap. * * Cassandra CQL uses "token" method for range scan which doesn't always yield * intuitive results. * * @param table * The name of the table * @param startkey * The record key of the first record to read. * @param recordcount * The number of records to read * @param fields * The list of fields to read, or null for all of them * @param result * A Vector of HashMaps, where each HashMap is a set field/value * pairs for one record * @return Zero on success, a non-zero error code on error */ @Override public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { try { PreparedStatement stmt = (fields == null) ? scanAllStmt.get() : scanStmts.get(fields); // Prepare statement on demand if (stmt == null) { Select.Builder selectBuilder; if (fields == null) { selectBuilder = QueryBuilder.select().all(); } else { selectBuilder = QueryBuilder.select(); for (String col : fields) { ((Select.Selection) selectBuilder).column(col); } } Select selectStmt = selectBuilder.from(table); // The statement builder is not setup right for tokens. // So, we need to build it manually. String initialStmt = selectStmt.toString(); StringBuilder scanStmt = new StringBuilder(); scanStmt.append(initialStmt.substring(0, initialStmt.length() - 1)); scanStmt.append(" WHERE "); scanStmt.append(QueryBuilder.token(YCSB_KEY)); scanStmt.append(" >= "); scanStmt.append("token("); scanStmt.append(QueryBuilder.bindMarker()); scanStmt.append(")"); scanStmt.append(" LIMIT "); scanStmt.append(QueryBuilder.bindMarker()); stmt = session.prepare(scanStmt.toString()); stmt.setConsistencyLevel(readConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = (fields == null) ? scanAllStmt.getAndSet(stmt) : scanStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("startKey = {}, recordcount = {}", startkey, recordcount); ResultSet rs = session.execute(stmt.bind(startkey, Integer.valueOf(recordcount))); HashMap<String, ByteIterator> tuple; while (!rs.isExhausted()) { Row row = rs.one(); tuple = new HashMap<String, ByteIterator>(); ColumnDefinitions cd = row.getColumnDefinitions(); for (ColumnDefinitions.Definition def : cd) { ByteBuffer val = row.getBytesUnsafe(def.getName()); if (val != null) { tuple.put(def.getName(), new ByteArrayByteIterator(val.array())); } else { tuple.put(def.getName(), null); } } result.add(tuple); } return Status.OK; } catch (Exception e) { logger.error( MessageFormatter.format("Error scanning with startkey: {}", startkey).getMessage(), e); return Status.ERROR; } } /** * Update a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key, overwriting any existing values with the same field name. * * @param table * The name of the table * @param key * The record key of the record to write. * @param values * A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ @Override public Status update(String table, String key, Map<String, ByteIterator> values) { try { Set<String> fields = values.keySet(); PreparedStatement stmt = updateStmts.get(fields); // Prepare statement on demand if (stmt == null) { Update updateStmt = QueryBuilder.update(table); // Add fields for (String field : fields) { updateStmt.with(QueryBuilder.set(field, QueryBuilder.bindMarker())); } // Add key updateStmt.where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker())); stmt = session.prepare(updateStmt); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = updateStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } if (logger.isDebugEnabled()) { logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { logger.debug("{} = {}", entry.getKey(), entry.getValue()); } } // Add fields ColumnDefinitions vars = stmt.getVariables(); BoundStatement boundStmt = stmt.bind(); for (int i = 0; i < vars.size() - 1; i++) { boundStmt.setString(i, values.get(vars.getName(i)).toString()); } // Add key boundStmt.setString(vars.size() - 1, key); session.execute(boundStmt); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error updating key: {}", key).getMessage(), e); } return Status.ERROR; } /** * Insert a record in the database. Any field/value pairs in the specified * values HashMap will be written into the record with the specified record * key. * * @param table * The name of the table * @param key * The record key of the record to insert. * @param values * A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { try { Set<String> fields = values.keySet(); PreparedStatement stmt = insertStmts.get(fields); // Prepare statement on demand if (stmt == null) { Insert insertStmt = QueryBuilder.insertInto(table); // Add key insertStmt.value(YCSB_KEY, QueryBuilder.bindMarker()); // Add fields for (String field : fields) { insertStmt.value(field, QueryBuilder.bindMarker()); } stmt = session.prepare(insertStmt); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = insertStmts.putIfAbsent(new HashSet(fields), stmt); if (prevStmt != null) { stmt = prevStmt; } } if (logger.isDebugEnabled()) { logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { logger.debug("{} = {}", entry.getKey(), entry.getValue()); } } // Add key BoundStatement boundStmt = stmt.bind().setString(0, key); // Add fields ColumnDefinitions vars = stmt.getVariables(); for (int i = 1; i < vars.size(); i++) { boundStmt.setString(i, values.get(vars.getName(i)).toString()); } session.execute(boundStmt); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error inserting key: {}", key).getMessage(), e); } return Status.ERROR; } /** * Delete a record from the database. * * @param table * The name of the table * @param key * The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ @Override public Status delete(String table, String key) {<FILL_FUNCTION_BODY>} }
try { PreparedStatement stmt = deleteStmt.get(); // Prepare statement on demand if (stmt == null) { stmt = session.prepare(QueryBuilder.delete().from(table) .where(QueryBuilder.eq(YCSB_KEY, QueryBuilder.bindMarker()))); stmt.setConsistencyLevel(writeConsistencyLevel); if (trace) { stmt.enableTracing(); } PreparedStatement prevStmt = deleteStmt.getAndSet(stmt); if (prevStmt != null) { stmt = prevStmt; } } logger.debug(stmt.getQueryString()); logger.debug("key = {}", key); session.execute(stmt.bind(key)); return Status.OK; } catch (Exception e) { logger.error(MessageFormatter.format("Error deleting key: {}", key).getMessage(), e); } return Status.ERROR;
brianfrankcooper_YCSB
YCSB/cloudspanner/src/main/java/site/ycsb/db/cloudspanner/CloudSpannerClient.java
CloudSpannerProperties
constructStandardQueriesAndFields
class CloudSpannerProperties { private CloudSpannerProperties() {} /** * The Cloud Spanner database name to use when running the YCSB benchmark, e.g. 'ycsb-database'. */ static final String DATABASE = "cloudspanner.database"; /** * The Cloud Spanner instance ID to use when running the YCSB benchmark, e.g. 'ycsb-instance'. */ static final String INSTANCE = "cloudspanner.instance"; /** * Choose between 'read' and 'query'. Affects both read() and scan() operations. */ static final String READ_MODE = "cloudspanner.readmode"; /** * The number of inserts to batch during the bulk loading phase. The default value is 1, which means no batching * is done. Recommended value during data load is 1000. */ static final String BATCH_INSERTS = "cloudspanner.batchinserts"; /** * Number of seconds we allow reads to be stale for. Set to 0 for strong reads (default). * For performance gains, this should be set to 10 seconds. */ static final String BOUNDED_STALENESS = "cloudspanner.boundedstaleness"; // The properties below usually do not need to be set explicitly. /** * The Cloud Spanner project ID to use when running the YCSB benchmark, e.g. 'myproject'. This is not strictly * necessary and can often be inferred from the environment. */ static final String PROJECT = "cloudspanner.project"; /** * The Cloud Spanner host name to use in the YCSB run. */ static final String HOST = "cloudspanner.host"; /** * Number of Cloud Spanner client channels to use. It's recommended to leave this to be the default value. */ static final String NUM_CHANNELS = "cloudspanner.channels"; } private static int fieldCount; private static boolean queriesForReads; private static int batchInserts; private static TimestampBound timestampBound; private static String standardQuery; private static String standardScan; private static final ArrayList<String> STANDARD_FIELDS = new ArrayList<>(); private static final String PRIMARY_KEY_COLUMN = "id"; private static final Logger LOGGER = Logger.getLogger(CloudSpannerClient.class.getName()); // Static lock for the class. private static final Object CLASS_LOCK = new Object(); // Single Spanner client per process. private static Spanner spanner = null; // Single database client per process. private static DatabaseClient dbClient = null; // Buffered mutations on a per object/thread basis for batch inserts. // Note that we have a separate CloudSpannerClient object per thread. private final ArrayList<Mutation> bufferedMutations = new ArrayList<>(); private static void constructStandardQueriesAndFields(Properties properties) {<FILL_FUNCTION_BODY>
String table = properties.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT); final String fieldprefix = properties.getProperty(CoreWorkload.FIELD_NAME_PREFIX, CoreWorkload.FIELD_NAME_PREFIX_DEFAULT); standardQuery = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id=@key").toString(); standardScan = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id>=@startKey LIMIT @count").toString(); for (int i = 0; i < fieldCount; i++) { STANDARD_FIELDS.add(fieldprefix + i); }
brianfrankcooper_YCSB
YCSB/cloudspanner/src/main/java/site/ycsb/db/cloudspanner/CloudSpannerClient.java
CloudSpannerProperties
getSpanner
class CloudSpannerProperties { private CloudSpannerProperties() {} /** * The Cloud Spanner database name to use when running the YCSB benchmark, e.g. 'ycsb-database'. */ static final String DATABASE = "cloudspanner.database"; /** * The Cloud Spanner instance ID to use when running the YCSB benchmark, e.g. 'ycsb-instance'. */ static final String INSTANCE = "cloudspanner.instance"; /** * Choose between 'read' and 'query'. Affects both read() and scan() operations. */ static final String READ_MODE = "cloudspanner.readmode"; /** * The number of inserts to batch during the bulk loading phase. The default value is 1, which means no batching * is done. Recommended value during data load is 1000. */ static final String BATCH_INSERTS = "cloudspanner.batchinserts"; /** * Number of seconds we allow reads to be stale for. Set to 0 for strong reads (default). * For performance gains, this should be set to 10 seconds. */ static final String BOUNDED_STALENESS = "cloudspanner.boundedstaleness"; // The properties below usually do not need to be set explicitly. /** * The Cloud Spanner project ID to use when running the YCSB benchmark, e.g. 'myproject'. This is not strictly * necessary and can often be inferred from the environment. */ static final String PROJECT = "cloudspanner.project"; /** * The Cloud Spanner host name to use in the YCSB run. */ static final String HOST = "cloudspanner.host"; /** * Number of Cloud Spanner client channels to use. It's recommended to leave this to be the default value. */ static final String NUM_CHANNELS = "cloudspanner.channels"; } private static int fieldCount; private static boolean queriesForReads; private static int batchInserts; private static TimestampBound timestampBound; private static String standardQuery; private static String standardScan; private static final ArrayList<String> STANDARD_FIELDS = new ArrayList<>(); private static final String PRIMARY_KEY_COLUMN = "id"; private static final Logger LOGGER = Logger.getLogger(CloudSpannerClient.class.getName()); // Static lock for the class. private static final Object CLASS_LOCK = new Object(); // Single Spanner client per process. private static Spanner spanner = null; // Single database client per process. private static DatabaseClient dbClient = null; // Buffered mutations on a per object/thread basis for batch inserts. // Note that we have a separate CloudSpannerClient object per thread. private final ArrayList<Mutation> bufferedMutations = new ArrayList<>(); private static void constructStandardQueriesAndFields(Properties properties) { String table = properties.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT); final String fieldprefix = properties.getProperty(CoreWorkload.FIELD_NAME_PREFIX, CoreWorkload.FIELD_NAME_PREFIX_DEFAULT); standardQuery = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id=@key").toString(); standardScan = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id>=@startKey LIMIT @count").toString(); for (int i = 0; i < fieldCount; i++) { STANDARD_FIELDS.add(fieldprefix + i); } } private static Spanner getSpanner(Properties properties, String host, String project) {<FILL_FUNCTION_BODY>
if (spanner != null) { return spanner; } String numChannels = properties.getProperty(CloudSpannerProperties.NUM_CHANNELS); int numThreads = Integer.parseInt(properties.getProperty(Client.THREAD_COUNT_PROPERTY, "1")); SpannerOptions.Builder optionsBuilder = SpannerOptions.newBuilder() .setSessionPoolOption(SessionPoolOptions.newBuilder() .setMinSessions(numThreads) // Since we have no read-write transactions, we can set the write session fraction to 0. .setWriteSessionsFraction(0) .build()); if (host != null) { optionsBuilder.setHost(host); } if (project != null) { optionsBuilder.setProjectId(project); } if (numChannels != null) { optionsBuilder.setNumChannels(Integer.parseInt(numChannels)); } spanner = optionsBuilder.build().getService(); Runtime.getRuntime().addShutdownHook(new Thread("spannerShutdown") { @Override public void run() { spanner.close(); } }); return spanner;
brianfrankcooper_YCSB
YCSB/cloudspanner/src/main/java/site/ycsb/db/cloudspanner/CloudSpannerClient.java
CloudSpannerProperties
run
class CloudSpannerProperties { private CloudSpannerProperties() {} /** * The Cloud Spanner database name to use when running the YCSB benchmark, e.g. 'ycsb-database'. */ static final String DATABASE = "cloudspanner.database"; /** * The Cloud Spanner instance ID to use when running the YCSB benchmark, e.g. 'ycsb-instance'. */ static final String INSTANCE = "cloudspanner.instance"; /** * Choose between 'read' and 'query'. Affects both read() and scan() operations. */ static final String READ_MODE = "cloudspanner.readmode"; /** * The number of inserts to batch during the bulk loading phase. The default value is 1, which means no batching * is done. Recommended value during data load is 1000. */ static final String BATCH_INSERTS = "cloudspanner.batchinserts"; /** * Number of seconds we allow reads to be stale for. Set to 0 for strong reads (default). * For performance gains, this should be set to 10 seconds. */ static final String BOUNDED_STALENESS = "cloudspanner.boundedstaleness"; // The properties below usually do not need to be set explicitly. /** * The Cloud Spanner project ID to use when running the YCSB benchmark, e.g. 'myproject'. This is not strictly * necessary and can often be inferred from the environment. */ static final String PROJECT = "cloudspanner.project"; /** * The Cloud Spanner host name to use in the YCSB run. */ static final String HOST = "cloudspanner.host"; /** * Number of Cloud Spanner client channels to use. It's recommended to leave this to be the default value. */ static final String NUM_CHANNELS = "cloudspanner.channels"; } private static int fieldCount; private static boolean queriesForReads; private static int batchInserts; private static TimestampBound timestampBound; private static String standardQuery; private static String standardScan; private static final ArrayList<String> STANDARD_FIELDS = new ArrayList<>(); private static final String PRIMARY_KEY_COLUMN = "id"; private static final Logger LOGGER = Logger.getLogger(CloudSpannerClient.class.getName()); // Static lock for the class. private static final Object CLASS_LOCK = new Object(); // Single Spanner client per process. private static Spanner spanner = null; // Single database client per process. private static DatabaseClient dbClient = null; // Buffered mutations on a per object/thread basis for batch inserts. // Note that we have a separate CloudSpannerClient object per thread. private final ArrayList<Mutation> bufferedMutations = new ArrayList<>(); private static void constructStandardQueriesAndFields(Properties properties) { String table = properties.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT); final String fieldprefix = properties.getProperty(CoreWorkload.FIELD_NAME_PREFIX, CoreWorkload.FIELD_NAME_PREFIX_DEFAULT); standardQuery = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id=@key").toString(); standardScan = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id>=@startKey LIMIT @count").toString(); for (int i = 0; i < fieldCount; i++) { STANDARD_FIELDS.add(fieldprefix + i); } } private static Spanner getSpanner(Properties properties, String host, String project) { if (spanner != null) { return spanner; } String numChannels = properties.getProperty(CloudSpannerProperties.NUM_CHANNELS); int numThreads = Integer.parseInt(properties.getProperty(Client.THREAD_COUNT_PROPERTY, "1")); SpannerOptions.Builder optionsBuilder = SpannerOptions.newBuilder() .setSessionPoolOption(SessionPoolOptions.newBuilder() .setMinSessions(numThreads) // Since we have no read-write transactions, we can set the write session fraction to 0. .setWriteSessionsFraction(0) .build()); if (host != null) { optionsBuilder.setHost(host); } if (project != null) { optionsBuilder.setProjectId(project); } if (numChannels != null) { optionsBuilder.setNumChannels(Integer.parseInt(numChannels)); } spanner = optionsBuilder.build().getService(); Runtime.getRuntime().addShutdownHook(new Thread("spannerShutdown") { @Override public void run() {<FILL_FUNCTION_BODY>
spanner.close();
brianfrankcooper_YCSB
YCSB/cloudspanner/src/main/java/site/ycsb/db/cloudspanner/CloudSpannerClient.java
CloudSpannerProperties
init
class CloudSpannerProperties { private CloudSpannerProperties() {} /** * The Cloud Spanner database name to use when running the YCSB benchmark, e.g. 'ycsb-database'. */ static final String DATABASE = "cloudspanner.database"; /** * The Cloud Spanner instance ID to use when running the YCSB benchmark, e.g. 'ycsb-instance'. */ static final String INSTANCE = "cloudspanner.instance"; /** * Choose between 'read' and 'query'. Affects both read() and scan() operations. */ static final String READ_MODE = "cloudspanner.readmode"; /** * The number of inserts to batch during the bulk loading phase. The default value is 1, which means no batching * is done. Recommended value during data load is 1000. */ static final String BATCH_INSERTS = "cloudspanner.batchinserts"; /** * Number of seconds we allow reads to be stale for. Set to 0 for strong reads (default). * For performance gains, this should be set to 10 seconds. */ static final String BOUNDED_STALENESS = "cloudspanner.boundedstaleness"; // The properties below usually do not need to be set explicitly. /** * The Cloud Spanner project ID to use when running the YCSB benchmark, e.g. 'myproject'. This is not strictly * necessary and can often be inferred from the environment. */ static final String PROJECT = "cloudspanner.project"; /** * The Cloud Spanner host name to use in the YCSB run. */ static final String HOST = "cloudspanner.host"; /** * Number of Cloud Spanner client channels to use. It's recommended to leave this to be the default value. */ static final String NUM_CHANNELS = "cloudspanner.channels"; } private static int fieldCount; private static boolean queriesForReads; private static int batchInserts; private static TimestampBound timestampBound; private static String standardQuery; private static String standardScan; private static final ArrayList<String> STANDARD_FIELDS = new ArrayList<>(); private static final String PRIMARY_KEY_COLUMN = "id"; private static final Logger LOGGER = Logger.getLogger(CloudSpannerClient.class.getName()); // Static lock for the class. private static final Object CLASS_LOCK = new Object(); // Single Spanner client per process. private static Spanner spanner = null; // Single database client per process. private static DatabaseClient dbClient = null; // Buffered mutations on a per object/thread basis for batch inserts. // Note that we have a separate CloudSpannerClient object per thread. private final ArrayList<Mutation> bufferedMutations = new ArrayList<>(); private static void constructStandardQueriesAndFields(Properties properties) { String table = properties.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT); final String fieldprefix = properties.getProperty(CoreWorkload.FIELD_NAME_PREFIX, CoreWorkload.FIELD_NAME_PREFIX_DEFAULT); standardQuery = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id=@key").toString(); standardScan = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id>=@startKey LIMIT @count").toString(); for (int i = 0; i < fieldCount; i++) { STANDARD_FIELDS.add(fieldprefix + i); } } private static Spanner getSpanner(Properties properties, String host, String project) { if (spanner != null) { return spanner; } String numChannels = properties.getProperty(CloudSpannerProperties.NUM_CHANNELS); int numThreads = Integer.parseInt(properties.getProperty(Client.THREAD_COUNT_PROPERTY, "1")); SpannerOptions.Builder optionsBuilder = SpannerOptions.newBuilder() .setSessionPoolOption(SessionPoolOptions.newBuilder() .setMinSessions(numThreads) // Since we have no read-write transactions, we can set the write session fraction to 0. .setWriteSessionsFraction(0) .build()); if (host != null) { optionsBuilder.setHost(host); } if (project != null) { optionsBuilder.setProjectId(project); } if (numChannels != null) { optionsBuilder.setNumChannels(Integer.parseInt(numChannels)); } spanner = optionsBuilder.build().getService(); Runtime.getRuntime().addShutdownHook(new Thread("spannerShutdown") { @Override public void run() { spanner.close(); } }); return spanner; } @Override public void init() throws DBException {<FILL_FUNCTION_BODY>
synchronized (CLASS_LOCK) { if (dbClient != null) { return; } Properties properties = getProperties(); String host = properties.getProperty(CloudSpannerProperties.HOST); String project = properties.getProperty(CloudSpannerProperties.PROJECT); String instance = properties.getProperty(CloudSpannerProperties.INSTANCE, "ycsb-instance"); String database = properties.getProperty(CloudSpannerProperties.DATABASE, "ycsb-database"); fieldCount = Integer.parseInt(properties.getProperty( CoreWorkload.FIELD_COUNT_PROPERTY, CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT)); queriesForReads = properties.getProperty(CloudSpannerProperties.READ_MODE, "query").equals("query"); batchInserts = Integer.parseInt(properties.getProperty(CloudSpannerProperties.BATCH_INSERTS, "1")); constructStandardQueriesAndFields(properties); int boundedStalenessSeconds = Integer.parseInt(properties.getProperty( CloudSpannerProperties.BOUNDED_STALENESS, "0")); timestampBound = (boundedStalenessSeconds <= 0) ? TimestampBound.strong() : TimestampBound.ofMaxStaleness(boundedStalenessSeconds, TimeUnit.SECONDS); try { spanner = getSpanner(properties, host, project); if (project == null) { project = spanner.getOptions().getProjectId(); } dbClient = spanner.getDatabaseClient(DatabaseId.of(project, instance, database)); } catch (Exception e) { LOGGER.log(Level.SEVERE, "init()", e); throw new DBException(e); } LOGGER.log(Level.INFO, new StringBuilder() .append("\nHost: ").append(spanner.getOptions().getHost()) .append("\nProject: ").append(project) .append("\nInstance: ").append(instance) .append("\nDatabase: ").append(database) .append("\nUsing queries for reads: ").append(queriesForReads) .append("\nBatching inserts: ").append(batchInserts) .append("\nBounded staleness seconds: ").append(boundedStalenessSeconds) .toString()); }
brianfrankcooper_YCSB
YCSB/cloudspanner/src/main/java/site/ycsb/db/cloudspanner/CloudSpannerClient.java
CloudSpannerProperties
readUsingQuery
class CloudSpannerProperties { private CloudSpannerProperties() {} /** * The Cloud Spanner database name to use when running the YCSB benchmark, e.g. 'ycsb-database'. */ static final String DATABASE = "cloudspanner.database"; /** * The Cloud Spanner instance ID to use when running the YCSB benchmark, e.g. 'ycsb-instance'. */ static final String INSTANCE = "cloudspanner.instance"; /** * Choose between 'read' and 'query'. Affects both read() and scan() operations. */ static final String READ_MODE = "cloudspanner.readmode"; /** * The number of inserts to batch during the bulk loading phase. The default value is 1, which means no batching * is done. Recommended value during data load is 1000. */ static final String BATCH_INSERTS = "cloudspanner.batchinserts"; /** * Number of seconds we allow reads to be stale for. Set to 0 for strong reads (default). * For performance gains, this should be set to 10 seconds. */ static final String BOUNDED_STALENESS = "cloudspanner.boundedstaleness"; // The properties below usually do not need to be set explicitly. /** * The Cloud Spanner project ID to use when running the YCSB benchmark, e.g. 'myproject'. This is not strictly * necessary and can often be inferred from the environment. */ static final String PROJECT = "cloudspanner.project"; /** * The Cloud Spanner host name to use in the YCSB run. */ static final String HOST = "cloudspanner.host"; /** * Number of Cloud Spanner client channels to use. It's recommended to leave this to be the default value. */ static final String NUM_CHANNELS = "cloudspanner.channels"; } private static int fieldCount; private static boolean queriesForReads; private static int batchInserts; private static TimestampBound timestampBound; private static String standardQuery; private static String standardScan; private static final ArrayList<String> STANDARD_FIELDS = new ArrayList<>(); private static final String PRIMARY_KEY_COLUMN = "id"; private static final Logger LOGGER = Logger.getLogger(CloudSpannerClient.class.getName()); // Static lock for the class. private static final Object CLASS_LOCK = new Object(); // Single Spanner client per process. private static Spanner spanner = null; // Single database client per process. private static DatabaseClient dbClient = null; // Buffered mutations on a per object/thread basis for batch inserts. // Note that we have a separate CloudSpannerClient object per thread. private final ArrayList<Mutation> bufferedMutations = new ArrayList<>(); private static void constructStandardQueriesAndFields(Properties properties) { String table = properties.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT); final String fieldprefix = properties.getProperty(CoreWorkload.FIELD_NAME_PREFIX, CoreWorkload.FIELD_NAME_PREFIX_DEFAULT); standardQuery = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id=@key").toString(); standardScan = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id>=@startKey LIMIT @count").toString(); for (int i = 0; i < fieldCount; i++) { STANDARD_FIELDS.add(fieldprefix + i); } } private static Spanner getSpanner(Properties properties, String host, String project) { if (spanner != null) { return spanner; } String numChannels = properties.getProperty(CloudSpannerProperties.NUM_CHANNELS); int numThreads = Integer.parseInt(properties.getProperty(Client.THREAD_COUNT_PROPERTY, "1")); SpannerOptions.Builder optionsBuilder = SpannerOptions.newBuilder() .setSessionPoolOption(SessionPoolOptions.newBuilder() .setMinSessions(numThreads) // Since we have no read-write transactions, we can set the write session fraction to 0. .setWriteSessionsFraction(0) .build()); if (host != null) { optionsBuilder.setHost(host); } if (project != null) { optionsBuilder.setProjectId(project); } if (numChannels != null) { optionsBuilder.setNumChannels(Integer.parseInt(numChannels)); } spanner = optionsBuilder.build().getService(); Runtime.getRuntime().addShutdownHook(new Thread("spannerShutdown") { @Override public void run() { spanner.close(); } }); return spanner; } @Override public void init() throws DBException { synchronized (CLASS_LOCK) { if (dbClient != null) { return; } Properties properties = getProperties(); String host = properties.getProperty(CloudSpannerProperties.HOST); String project = properties.getProperty(CloudSpannerProperties.PROJECT); String instance = properties.getProperty(CloudSpannerProperties.INSTANCE, "ycsb-instance"); String database = properties.getProperty(CloudSpannerProperties.DATABASE, "ycsb-database"); fieldCount = Integer.parseInt(properties.getProperty( CoreWorkload.FIELD_COUNT_PROPERTY, CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT)); queriesForReads = properties.getProperty(CloudSpannerProperties.READ_MODE, "query").equals("query"); batchInserts = Integer.parseInt(properties.getProperty(CloudSpannerProperties.BATCH_INSERTS, "1")); constructStandardQueriesAndFields(properties); int boundedStalenessSeconds = Integer.parseInt(properties.getProperty( CloudSpannerProperties.BOUNDED_STALENESS, "0")); timestampBound = (boundedStalenessSeconds <= 0) ? TimestampBound.strong() : TimestampBound.ofMaxStaleness(boundedStalenessSeconds, TimeUnit.SECONDS); try { spanner = getSpanner(properties, host, project); if (project == null) { project = spanner.getOptions().getProjectId(); } dbClient = spanner.getDatabaseClient(DatabaseId.of(project, instance, database)); } catch (Exception e) { LOGGER.log(Level.SEVERE, "init()", e); throw new DBException(e); } LOGGER.log(Level.INFO, new StringBuilder() .append("\nHost: ").append(spanner.getOptions().getHost()) .append("\nProject: ").append(project) .append("\nInstance: ").append(instance) .append("\nDatabase: ").append(database) .append("\nUsing queries for reads: ").append(queriesForReads) .append("\nBatching inserts: ").append(batchInserts) .append("\nBounded staleness seconds: ").append(boundedStalenessSeconds) .toString()); } } private Status readUsingQuery( String table, String key, Set<String> fields, Map<String, ByteIterator> result) {<FILL_FUNCTION_BODY>
Statement query; Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; if (fields == null || fields.size() == fieldCount) { query = Statement.newBuilder(standardQuery).bind("key").to(key).build(); } else { Joiner joiner = Joiner.on(','); query = Statement.newBuilder("SELECT ") .append(joiner.join(fields)) .append(" FROM ") .append(table) .append(" WHERE id=@key") .bind("key").to(key) .build(); } try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) { resultSet.next(); decodeStruct(columns, resultSet, result); if (resultSet.next()) { throw new Exception("Expected exactly one row for each read."); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "readUsingQuery()", e); return Status.ERROR; }
brianfrankcooper_YCSB
YCSB/cloudspanner/src/main/java/site/ycsb/db/cloudspanner/CloudSpannerClient.java
CloudSpannerProperties
read
class CloudSpannerProperties { private CloudSpannerProperties() {} /** * The Cloud Spanner database name to use when running the YCSB benchmark, e.g. 'ycsb-database'. */ static final String DATABASE = "cloudspanner.database"; /** * The Cloud Spanner instance ID to use when running the YCSB benchmark, e.g. 'ycsb-instance'. */ static final String INSTANCE = "cloudspanner.instance"; /** * Choose between 'read' and 'query'. Affects both read() and scan() operations. */ static final String READ_MODE = "cloudspanner.readmode"; /** * The number of inserts to batch during the bulk loading phase. The default value is 1, which means no batching * is done. Recommended value during data load is 1000. */ static final String BATCH_INSERTS = "cloudspanner.batchinserts"; /** * Number of seconds we allow reads to be stale for. Set to 0 for strong reads (default). * For performance gains, this should be set to 10 seconds. */ static final String BOUNDED_STALENESS = "cloudspanner.boundedstaleness"; // The properties below usually do not need to be set explicitly. /** * The Cloud Spanner project ID to use when running the YCSB benchmark, e.g. 'myproject'. This is not strictly * necessary and can often be inferred from the environment. */ static final String PROJECT = "cloudspanner.project"; /** * The Cloud Spanner host name to use in the YCSB run. */ static final String HOST = "cloudspanner.host"; /** * Number of Cloud Spanner client channels to use. It's recommended to leave this to be the default value. */ static final String NUM_CHANNELS = "cloudspanner.channels"; } private static int fieldCount; private static boolean queriesForReads; private static int batchInserts; private static TimestampBound timestampBound; private static String standardQuery; private static String standardScan; private static final ArrayList<String> STANDARD_FIELDS = new ArrayList<>(); private static final String PRIMARY_KEY_COLUMN = "id"; private static final Logger LOGGER = Logger.getLogger(CloudSpannerClient.class.getName()); // Static lock for the class. private static final Object CLASS_LOCK = new Object(); // Single Spanner client per process. private static Spanner spanner = null; // Single database client per process. private static DatabaseClient dbClient = null; // Buffered mutations on a per object/thread basis for batch inserts. // Note that we have a separate CloudSpannerClient object per thread. private final ArrayList<Mutation> bufferedMutations = new ArrayList<>(); private static void constructStandardQueriesAndFields(Properties properties) { String table = properties.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT); final String fieldprefix = properties.getProperty(CoreWorkload.FIELD_NAME_PREFIX, CoreWorkload.FIELD_NAME_PREFIX_DEFAULT); standardQuery = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id=@key").toString(); standardScan = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id>=@startKey LIMIT @count").toString(); for (int i = 0; i < fieldCount; i++) { STANDARD_FIELDS.add(fieldprefix + i); } } private static Spanner getSpanner(Properties properties, String host, String project) { if (spanner != null) { return spanner; } String numChannels = properties.getProperty(CloudSpannerProperties.NUM_CHANNELS); int numThreads = Integer.parseInt(properties.getProperty(Client.THREAD_COUNT_PROPERTY, "1")); SpannerOptions.Builder optionsBuilder = SpannerOptions.newBuilder() .setSessionPoolOption(SessionPoolOptions.newBuilder() .setMinSessions(numThreads) // Since we have no read-write transactions, we can set the write session fraction to 0. .setWriteSessionsFraction(0) .build()); if (host != null) { optionsBuilder.setHost(host); } if (project != null) { optionsBuilder.setProjectId(project); } if (numChannels != null) { optionsBuilder.setNumChannels(Integer.parseInt(numChannels)); } spanner = optionsBuilder.build().getService(); Runtime.getRuntime().addShutdownHook(new Thread("spannerShutdown") { @Override public void run() { spanner.close(); } }); return spanner; } @Override public void init() throws DBException { synchronized (CLASS_LOCK) { if (dbClient != null) { return; } Properties properties = getProperties(); String host = properties.getProperty(CloudSpannerProperties.HOST); String project = properties.getProperty(CloudSpannerProperties.PROJECT); String instance = properties.getProperty(CloudSpannerProperties.INSTANCE, "ycsb-instance"); String database = properties.getProperty(CloudSpannerProperties.DATABASE, "ycsb-database"); fieldCount = Integer.parseInt(properties.getProperty( CoreWorkload.FIELD_COUNT_PROPERTY, CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT)); queriesForReads = properties.getProperty(CloudSpannerProperties.READ_MODE, "query").equals("query"); batchInserts = Integer.parseInt(properties.getProperty(CloudSpannerProperties.BATCH_INSERTS, "1")); constructStandardQueriesAndFields(properties); int boundedStalenessSeconds = Integer.parseInt(properties.getProperty( CloudSpannerProperties.BOUNDED_STALENESS, "0")); timestampBound = (boundedStalenessSeconds <= 0) ? TimestampBound.strong() : TimestampBound.ofMaxStaleness(boundedStalenessSeconds, TimeUnit.SECONDS); try { spanner = getSpanner(properties, host, project); if (project == null) { project = spanner.getOptions().getProjectId(); } dbClient = spanner.getDatabaseClient(DatabaseId.of(project, instance, database)); } catch (Exception e) { LOGGER.log(Level.SEVERE, "init()", e); throw new DBException(e); } LOGGER.log(Level.INFO, new StringBuilder() .append("\nHost: ").append(spanner.getOptions().getHost()) .append("\nProject: ").append(project) .append("\nInstance: ").append(instance) .append("\nDatabase: ").append(database) .append("\nUsing queries for reads: ").append(queriesForReads) .append("\nBatching inserts: ").append(batchInserts) .append("\nBounded staleness seconds: ").append(boundedStalenessSeconds) .toString()); } } private Status readUsingQuery( String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Statement query; Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; if (fields == null || fields.size() == fieldCount) { query = Statement.newBuilder(standardQuery).bind("key").to(key).build(); } else { Joiner joiner = Joiner.on(','); query = Statement.newBuilder("SELECT ") .append(joiner.join(fields)) .append(" FROM ") .append(table) .append(" WHERE id=@key") .bind("key").to(key) .build(); } try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) { resultSet.next(); decodeStruct(columns, resultSet, result); if (resultSet.next()) { throw new Exception("Expected exactly one row for each read."); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "readUsingQuery()", e); return Status.ERROR; } } @Override public Status read( String table, String key, Set<String> fields, Map<String, ByteIterator> result) {<FILL_FUNCTION_BODY>
if (queriesForReads) { return readUsingQuery(table, key, fields, result); } Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; try { Struct row = dbClient.singleUse(timestampBound).readRow(table, Key.of(key), columns); decodeStruct(columns, row, result); return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "read()", e); return Status.ERROR; }
brianfrankcooper_YCSB
YCSB/cloudspanner/src/main/java/site/ycsb/db/cloudspanner/CloudSpannerClient.java
CloudSpannerProperties
scanUsingQuery
class CloudSpannerProperties { private CloudSpannerProperties() {} /** * The Cloud Spanner database name to use when running the YCSB benchmark, e.g. 'ycsb-database'. */ static final String DATABASE = "cloudspanner.database"; /** * The Cloud Spanner instance ID to use when running the YCSB benchmark, e.g. 'ycsb-instance'. */ static final String INSTANCE = "cloudspanner.instance"; /** * Choose between 'read' and 'query'. Affects both read() and scan() operations. */ static final String READ_MODE = "cloudspanner.readmode"; /** * The number of inserts to batch during the bulk loading phase. The default value is 1, which means no batching * is done. Recommended value during data load is 1000. */ static final String BATCH_INSERTS = "cloudspanner.batchinserts"; /** * Number of seconds we allow reads to be stale for. Set to 0 for strong reads (default). * For performance gains, this should be set to 10 seconds. */ static final String BOUNDED_STALENESS = "cloudspanner.boundedstaleness"; // The properties below usually do not need to be set explicitly. /** * The Cloud Spanner project ID to use when running the YCSB benchmark, e.g. 'myproject'. This is not strictly * necessary and can often be inferred from the environment. */ static final String PROJECT = "cloudspanner.project"; /** * The Cloud Spanner host name to use in the YCSB run. */ static final String HOST = "cloudspanner.host"; /** * Number of Cloud Spanner client channels to use. It's recommended to leave this to be the default value. */ static final String NUM_CHANNELS = "cloudspanner.channels"; } private static int fieldCount; private static boolean queriesForReads; private static int batchInserts; private static TimestampBound timestampBound; private static String standardQuery; private static String standardScan; private static final ArrayList<String> STANDARD_FIELDS = new ArrayList<>(); private static final String PRIMARY_KEY_COLUMN = "id"; private static final Logger LOGGER = Logger.getLogger(CloudSpannerClient.class.getName()); // Static lock for the class. private static final Object CLASS_LOCK = new Object(); // Single Spanner client per process. private static Spanner spanner = null; // Single database client per process. private static DatabaseClient dbClient = null; // Buffered mutations on a per object/thread basis for batch inserts. // Note that we have a separate CloudSpannerClient object per thread. private final ArrayList<Mutation> bufferedMutations = new ArrayList<>(); private static void constructStandardQueriesAndFields(Properties properties) { String table = properties.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT); final String fieldprefix = properties.getProperty(CoreWorkload.FIELD_NAME_PREFIX, CoreWorkload.FIELD_NAME_PREFIX_DEFAULT); standardQuery = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id=@key").toString(); standardScan = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id>=@startKey LIMIT @count").toString(); for (int i = 0; i < fieldCount; i++) { STANDARD_FIELDS.add(fieldprefix + i); } } private static Spanner getSpanner(Properties properties, String host, String project) { if (spanner != null) { return spanner; } String numChannels = properties.getProperty(CloudSpannerProperties.NUM_CHANNELS); int numThreads = Integer.parseInt(properties.getProperty(Client.THREAD_COUNT_PROPERTY, "1")); SpannerOptions.Builder optionsBuilder = SpannerOptions.newBuilder() .setSessionPoolOption(SessionPoolOptions.newBuilder() .setMinSessions(numThreads) // Since we have no read-write transactions, we can set the write session fraction to 0. .setWriteSessionsFraction(0) .build()); if (host != null) { optionsBuilder.setHost(host); } if (project != null) { optionsBuilder.setProjectId(project); } if (numChannels != null) { optionsBuilder.setNumChannels(Integer.parseInt(numChannels)); } spanner = optionsBuilder.build().getService(); Runtime.getRuntime().addShutdownHook(new Thread("spannerShutdown") { @Override public void run() { spanner.close(); } }); return spanner; } @Override public void init() throws DBException { synchronized (CLASS_LOCK) { if (dbClient != null) { return; } Properties properties = getProperties(); String host = properties.getProperty(CloudSpannerProperties.HOST); String project = properties.getProperty(CloudSpannerProperties.PROJECT); String instance = properties.getProperty(CloudSpannerProperties.INSTANCE, "ycsb-instance"); String database = properties.getProperty(CloudSpannerProperties.DATABASE, "ycsb-database"); fieldCount = Integer.parseInt(properties.getProperty( CoreWorkload.FIELD_COUNT_PROPERTY, CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT)); queriesForReads = properties.getProperty(CloudSpannerProperties.READ_MODE, "query").equals("query"); batchInserts = Integer.parseInt(properties.getProperty(CloudSpannerProperties.BATCH_INSERTS, "1")); constructStandardQueriesAndFields(properties); int boundedStalenessSeconds = Integer.parseInt(properties.getProperty( CloudSpannerProperties.BOUNDED_STALENESS, "0")); timestampBound = (boundedStalenessSeconds <= 0) ? TimestampBound.strong() : TimestampBound.ofMaxStaleness(boundedStalenessSeconds, TimeUnit.SECONDS); try { spanner = getSpanner(properties, host, project); if (project == null) { project = spanner.getOptions().getProjectId(); } dbClient = spanner.getDatabaseClient(DatabaseId.of(project, instance, database)); } catch (Exception e) { LOGGER.log(Level.SEVERE, "init()", e); throw new DBException(e); } LOGGER.log(Level.INFO, new StringBuilder() .append("\nHost: ").append(spanner.getOptions().getHost()) .append("\nProject: ").append(project) .append("\nInstance: ").append(instance) .append("\nDatabase: ").append(database) .append("\nUsing queries for reads: ").append(queriesForReads) .append("\nBatching inserts: ").append(batchInserts) .append("\nBounded staleness seconds: ").append(boundedStalenessSeconds) .toString()); } } private Status readUsingQuery( String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Statement query; Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; if (fields == null || fields.size() == fieldCount) { query = Statement.newBuilder(standardQuery).bind("key").to(key).build(); } else { Joiner joiner = Joiner.on(','); query = Statement.newBuilder("SELECT ") .append(joiner.join(fields)) .append(" FROM ") .append(table) .append(" WHERE id=@key") .bind("key").to(key) .build(); } try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) { resultSet.next(); decodeStruct(columns, resultSet, result); if (resultSet.next()) { throw new Exception("Expected exactly one row for each read."); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "readUsingQuery()", e); return Status.ERROR; } } @Override public Status read( String table, String key, Set<String> fields, Map<String, ByteIterator> result) { if (queriesForReads) { return readUsingQuery(table, key, fields, result); } Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; try { Struct row = dbClient.singleUse(timestampBound).readRow(table, Key.of(key), columns); decodeStruct(columns, row, result); return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "read()", e); return Status.ERROR; } } private Status scanUsingQuery( String table, String startKey, int recordCount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {<FILL_FUNCTION_BODY>
Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; Statement query; if (fields == null || fields.size() == fieldCount) { query = Statement.newBuilder(standardScan).bind("startKey").to(startKey).bind("count").to(recordCount).build(); } else { Joiner joiner = Joiner.on(','); query = Statement.newBuilder("SELECT ") .append(joiner.join(fields)) .append(" FROM ") .append(table) .append(" WHERE id>=@startKey LIMIT @count") .bind("startKey").to(startKey) .bind("count").to(recordCount) .build(); } try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) { while (resultSet.next()) { HashMap<String, ByteIterator> row = new HashMap<>(); decodeStruct(columns, resultSet, row); result.add(row); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "scanUsingQuery()", e); return Status.ERROR; }
brianfrankcooper_YCSB
YCSB/cloudspanner/src/main/java/site/ycsb/db/cloudspanner/CloudSpannerClient.java
CloudSpannerProperties
scan
class CloudSpannerProperties { private CloudSpannerProperties() {} /** * The Cloud Spanner database name to use when running the YCSB benchmark, e.g. 'ycsb-database'. */ static final String DATABASE = "cloudspanner.database"; /** * The Cloud Spanner instance ID to use when running the YCSB benchmark, e.g. 'ycsb-instance'. */ static final String INSTANCE = "cloudspanner.instance"; /** * Choose between 'read' and 'query'. Affects both read() and scan() operations. */ static final String READ_MODE = "cloudspanner.readmode"; /** * The number of inserts to batch during the bulk loading phase. The default value is 1, which means no batching * is done. Recommended value during data load is 1000. */ static final String BATCH_INSERTS = "cloudspanner.batchinserts"; /** * Number of seconds we allow reads to be stale for. Set to 0 for strong reads (default). * For performance gains, this should be set to 10 seconds. */ static final String BOUNDED_STALENESS = "cloudspanner.boundedstaleness"; // The properties below usually do not need to be set explicitly. /** * The Cloud Spanner project ID to use when running the YCSB benchmark, e.g. 'myproject'. This is not strictly * necessary and can often be inferred from the environment. */ static final String PROJECT = "cloudspanner.project"; /** * The Cloud Spanner host name to use in the YCSB run. */ static final String HOST = "cloudspanner.host"; /** * Number of Cloud Spanner client channels to use. It's recommended to leave this to be the default value. */ static final String NUM_CHANNELS = "cloudspanner.channels"; } private static int fieldCount; private static boolean queriesForReads; private static int batchInserts; private static TimestampBound timestampBound; private static String standardQuery; private static String standardScan; private static final ArrayList<String> STANDARD_FIELDS = new ArrayList<>(); private static final String PRIMARY_KEY_COLUMN = "id"; private static final Logger LOGGER = Logger.getLogger(CloudSpannerClient.class.getName()); // Static lock for the class. private static final Object CLASS_LOCK = new Object(); // Single Spanner client per process. private static Spanner spanner = null; // Single database client per process. private static DatabaseClient dbClient = null; // Buffered mutations on a per object/thread basis for batch inserts. // Note that we have a separate CloudSpannerClient object per thread. private final ArrayList<Mutation> bufferedMutations = new ArrayList<>(); private static void constructStandardQueriesAndFields(Properties properties) { String table = properties.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT); final String fieldprefix = properties.getProperty(CoreWorkload.FIELD_NAME_PREFIX, CoreWorkload.FIELD_NAME_PREFIX_DEFAULT); standardQuery = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id=@key").toString(); standardScan = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id>=@startKey LIMIT @count").toString(); for (int i = 0; i < fieldCount; i++) { STANDARD_FIELDS.add(fieldprefix + i); } } private static Spanner getSpanner(Properties properties, String host, String project) { if (spanner != null) { return spanner; } String numChannels = properties.getProperty(CloudSpannerProperties.NUM_CHANNELS); int numThreads = Integer.parseInt(properties.getProperty(Client.THREAD_COUNT_PROPERTY, "1")); SpannerOptions.Builder optionsBuilder = SpannerOptions.newBuilder() .setSessionPoolOption(SessionPoolOptions.newBuilder() .setMinSessions(numThreads) // Since we have no read-write transactions, we can set the write session fraction to 0. .setWriteSessionsFraction(0) .build()); if (host != null) { optionsBuilder.setHost(host); } if (project != null) { optionsBuilder.setProjectId(project); } if (numChannels != null) { optionsBuilder.setNumChannels(Integer.parseInt(numChannels)); } spanner = optionsBuilder.build().getService(); Runtime.getRuntime().addShutdownHook(new Thread("spannerShutdown") { @Override public void run() { spanner.close(); } }); return spanner; } @Override public void init() throws DBException { synchronized (CLASS_LOCK) { if (dbClient != null) { return; } Properties properties = getProperties(); String host = properties.getProperty(CloudSpannerProperties.HOST); String project = properties.getProperty(CloudSpannerProperties.PROJECT); String instance = properties.getProperty(CloudSpannerProperties.INSTANCE, "ycsb-instance"); String database = properties.getProperty(CloudSpannerProperties.DATABASE, "ycsb-database"); fieldCount = Integer.parseInt(properties.getProperty( CoreWorkload.FIELD_COUNT_PROPERTY, CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT)); queriesForReads = properties.getProperty(CloudSpannerProperties.READ_MODE, "query").equals("query"); batchInserts = Integer.parseInt(properties.getProperty(CloudSpannerProperties.BATCH_INSERTS, "1")); constructStandardQueriesAndFields(properties); int boundedStalenessSeconds = Integer.parseInt(properties.getProperty( CloudSpannerProperties.BOUNDED_STALENESS, "0")); timestampBound = (boundedStalenessSeconds <= 0) ? TimestampBound.strong() : TimestampBound.ofMaxStaleness(boundedStalenessSeconds, TimeUnit.SECONDS); try { spanner = getSpanner(properties, host, project); if (project == null) { project = spanner.getOptions().getProjectId(); } dbClient = spanner.getDatabaseClient(DatabaseId.of(project, instance, database)); } catch (Exception e) { LOGGER.log(Level.SEVERE, "init()", e); throw new DBException(e); } LOGGER.log(Level.INFO, new StringBuilder() .append("\nHost: ").append(spanner.getOptions().getHost()) .append("\nProject: ").append(project) .append("\nInstance: ").append(instance) .append("\nDatabase: ").append(database) .append("\nUsing queries for reads: ").append(queriesForReads) .append("\nBatching inserts: ").append(batchInserts) .append("\nBounded staleness seconds: ").append(boundedStalenessSeconds) .toString()); } } private Status readUsingQuery( String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Statement query; Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; if (fields == null || fields.size() == fieldCount) { query = Statement.newBuilder(standardQuery).bind("key").to(key).build(); } else { Joiner joiner = Joiner.on(','); query = Statement.newBuilder("SELECT ") .append(joiner.join(fields)) .append(" FROM ") .append(table) .append(" WHERE id=@key") .bind("key").to(key) .build(); } try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) { resultSet.next(); decodeStruct(columns, resultSet, result); if (resultSet.next()) { throw new Exception("Expected exactly one row for each read."); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "readUsingQuery()", e); return Status.ERROR; } } @Override public Status read( String table, String key, Set<String> fields, Map<String, ByteIterator> result) { if (queriesForReads) { return readUsingQuery(table, key, fields, result); } Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; try { Struct row = dbClient.singleUse(timestampBound).readRow(table, Key.of(key), columns); decodeStruct(columns, row, result); return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "read()", e); return Status.ERROR; } } private Status scanUsingQuery( String table, String startKey, int recordCount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; Statement query; if (fields == null || fields.size() == fieldCount) { query = Statement.newBuilder(standardScan).bind("startKey").to(startKey).bind("count").to(recordCount).build(); } else { Joiner joiner = Joiner.on(','); query = Statement.newBuilder("SELECT ") .append(joiner.join(fields)) .append(" FROM ") .append(table) .append(" WHERE id>=@startKey LIMIT @count") .bind("startKey").to(startKey) .bind("count").to(recordCount) .build(); } try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) { while (resultSet.next()) { HashMap<String, ByteIterator> row = new HashMap<>(); decodeStruct(columns, resultSet, row); result.add(row); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "scanUsingQuery()", e); return Status.ERROR; } } @Override public Status scan( String table, String startKey, int recordCount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {<FILL_FUNCTION_BODY>
if (queriesForReads) { return scanUsingQuery(table, startKey, recordCount, fields, result); } Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; KeySet keySet = KeySet.newBuilder().addRange(KeyRange.closedClosed(Key.of(startKey), Key.of())).build(); try (ResultSet resultSet = dbClient.singleUse(timestampBound) .read(table, keySet, columns, Options.limit(recordCount))) { while (resultSet.next()) { HashMap<String, ByteIterator> row = new HashMap<>(); decodeStruct(columns, resultSet, row); result.add(row); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "scan()", e); return Status.ERROR; }
brianfrankcooper_YCSB
YCSB/cloudspanner/src/main/java/site/ycsb/db/cloudspanner/CloudSpannerClient.java
CloudSpannerProperties
update
class CloudSpannerProperties { private CloudSpannerProperties() {} /** * The Cloud Spanner database name to use when running the YCSB benchmark, e.g. 'ycsb-database'. */ static final String DATABASE = "cloudspanner.database"; /** * The Cloud Spanner instance ID to use when running the YCSB benchmark, e.g. 'ycsb-instance'. */ static final String INSTANCE = "cloudspanner.instance"; /** * Choose between 'read' and 'query'. Affects both read() and scan() operations. */ static final String READ_MODE = "cloudspanner.readmode"; /** * The number of inserts to batch during the bulk loading phase. The default value is 1, which means no batching * is done. Recommended value during data load is 1000. */ static final String BATCH_INSERTS = "cloudspanner.batchinserts"; /** * Number of seconds we allow reads to be stale for. Set to 0 for strong reads (default). * For performance gains, this should be set to 10 seconds. */ static final String BOUNDED_STALENESS = "cloudspanner.boundedstaleness"; // The properties below usually do not need to be set explicitly. /** * The Cloud Spanner project ID to use when running the YCSB benchmark, e.g. 'myproject'. This is not strictly * necessary and can often be inferred from the environment. */ static final String PROJECT = "cloudspanner.project"; /** * The Cloud Spanner host name to use in the YCSB run. */ static final String HOST = "cloudspanner.host"; /** * Number of Cloud Spanner client channels to use. It's recommended to leave this to be the default value. */ static final String NUM_CHANNELS = "cloudspanner.channels"; } private static int fieldCount; private static boolean queriesForReads; private static int batchInserts; private static TimestampBound timestampBound; private static String standardQuery; private static String standardScan; private static final ArrayList<String> STANDARD_FIELDS = new ArrayList<>(); private static final String PRIMARY_KEY_COLUMN = "id"; private static final Logger LOGGER = Logger.getLogger(CloudSpannerClient.class.getName()); // Static lock for the class. private static final Object CLASS_LOCK = new Object(); // Single Spanner client per process. private static Spanner spanner = null; // Single database client per process. private static DatabaseClient dbClient = null; // Buffered mutations on a per object/thread basis for batch inserts. // Note that we have a separate CloudSpannerClient object per thread. private final ArrayList<Mutation> bufferedMutations = new ArrayList<>(); private static void constructStandardQueriesAndFields(Properties properties) { String table = properties.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT); final String fieldprefix = properties.getProperty(CoreWorkload.FIELD_NAME_PREFIX, CoreWorkload.FIELD_NAME_PREFIX_DEFAULT); standardQuery = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id=@key").toString(); standardScan = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id>=@startKey LIMIT @count").toString(); for (int i = 0; i < fieldCount; i++) { STANDARD_FIELDS.add(fieldprefix + i); } } private static Spanner getSpanner(Properties properties, String host, String project) { if (spanner != null) { return spanner; } String numChannels = properties.getProperty(CloudSpannerProperties.NUM_CHANNELS); int numThreads = Integer.parseInt(properties.getProperty(Client.THREAD_COUNT_PROPERTY, "1")); SpannerOptions.Builder optionsBuilder = SpannerOptions.newBuilder() .setSessionPoolOption(SessionPoolOptions.newBuilder() .setMinSessions(numThreads) // Since we have no read-write transactions, we can set the write session fraction to 0. .setWriteSessionsFraction(0) .build()); if (host != null) { optionsBuilder.setHost(host); } if (project != null) { optionsBuilder.setProjectId(project); } if (numChannels != null) { optionsBuilder.setNumChannels(Integer.parseInt(numChannels)); } spanner = optionsBuilder.build().getService(); Runtime.getRuntime().addShutdownHook(new Thread("spannerShutdown") { @Override public void run() { spanner.close(); } }); return spanner; } @Override public void init() throws DBException { synchronized (CLASS_LOCK) { if (dbClient != null) { return; } Properties properties = getProperties(); String host = properties.getProperty(CloudSpannerProperties.HOST); String project = properties.getProperty(CloudSpannerProperties.PROJECT); String instance = properties.getProperty(CloudSpannerProperties.INSTANCE, "ycsb-instance"); String database = properties.getProperty(CloudSpannerProperties.DATABASE, "ycsb-database"); fieldCount = Integer.parseInt(properties.getProperty( CoreWorkload.FIELD_COUNT_PROPERTY, CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT)); queriesForReads = properties.getProperty(CloudSpannerProperties.READ_MODE, "query").equals("query"); batchInserts = Integer.parseInt(properties.getProperty(CloudSpannerProperties.BATCH_INSERTS, "1")); constructStandardQueriesAndFields(properties); int boundedStalenessSeconds = Integer.parseInt(properties.getProperty( CloudSpannerProperties.BOUNDED_STALENESS, "0")); timestampBound = (boundedStalenessSeconds <= 0) ? TimestampBound.strong() : TimestampBound.ofMaxStaleness(boundedStalenessSeconds, TimeUnit.SECONDS); try { spanner = getSpanner(properties, host, project); if (project == null) { project = spanner.getOptions().getProjectId(); } dbClient = spanner.getDatabaseClient(DatabaseId.of(project, instance, database)); } catch (Exception e) { LOGGER.log(Level.SEVERE, "init()", e); throw new DBException(e); } LOGGER.log(Level.INFO, new StringBuilder() .append("\nHost: ").append(spanner.getOptions().getHost()) .append("\nProject: ").append(project) .append("\nInstance: ").append(instance) .append("\nDatabase: ").append(database) .append("\nUsing queries for reads: ").append(queriesForReads) .append("\nBatching inserts: ").append(batchInserts) .append("\nBounded staleness seconds: ").append(boundedStalenessSeconds) .toString()); } } private Status readUsingQuery( String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Statement query; Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; if (fields == null || fields.size() == fieldCount) { query = Statement.newBuilder(standardQuery).bind("key").to(key).build(); } else { Joiner joiner = Joiner.on(','); query = Statement.newBuilder("SELECT ") .append(joiner.join(fields)) .append(" FROM ") .append(table) .append(" WHERE id=@key") .bind("key").to(key) .build(); } try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) { resultSet.next(); decodeStruct(columns, resultSet, result); if (resultSet.next()) { throw new Exception("Expected exactly one row for each read."); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "readUsingQuery()", e); return Status.ERROR; } } @Override public Status read( String table, String key, Set<String> fields, Map<String, ByteIterator> result) { if (queriesForReads) { return readUsingQuery(table, key, fields, result); } Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; try { Struct row = dbClient.singleUse(timestampBound).readRow(table, Key.of(key), columns); decodeStruct(columns, row, result); return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "read()", e); return Status.ERROR; } } private Status scanUsingQuery( String table, String startKey, int recordCount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; Statement query; if (fields == null || fields.size() == fieldCount) { query = Statement.newBuilder(standardScan).bind("startKey").to(startKey).bind("count").to(recordCount).build(); } else { Joiner joiner = Joiner.on(','); query = Statement.newBuilder("SELECT ") .append(joiner.join(fields)) .append(" FROM ") .append(table) .append(" WHERE id>=@startKey LIMIT @count") .bind("startKey").to(startKey) .bind("count").to(recordCount) .build(); } try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) { while (resultSet.next()) { HashMap<String, ByteIterator> row = new HashMap<>(); decodeStruct(columns, resultSet, row); result.add(row); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "scanUsingQuery()", e); return Status.ERROR; } } @Override public Status scan( String table, String startKey, int recordCount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { if (queriesForReads) { return scanUsingQuery(table, startKey, recordCount, fields, result); } Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; KeySet keySet = KeySet.newBuilder().addRange(KeyRange.closedClosed(Key.of(startKey), Key.of())).build(); try (ResultSet resultSet = dbClient.singleUse(timestampBound) .read(table, keySet, columns, Options.limit(recordCount))) { while (resultSet.next()) { HashMap<String, ByteIterator> row = new HashMap<>(); decodeStruct(columns, resultSet, row); result.add(row); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "scan()", e); return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>
Mutation.WriteBuilder m = Mutation.newInsertOrUpdateBuilder(table); m.set(PRIMARY_KEY_COLUMN).to(key); for (Map.Entry<String, ByteIterator> e : values.entrySet()) { m.set(e.getKey()).to(e.getValue().toString()); } try { dbClient.writeAtLeastOnce(Arrays.asList(m.build())); } catch (Exception e) { LOGGER.log(Level.INFO, "update()", e); return Status.ERROR; } return Status.OK;
brianfrankcooper_YCSB
YCSB/cloudspanner/src/main/java/site/ycsb/db/cloudspanner/CloudSpannerClient.java
CloudSpannerProperties
insert
class CloudSpannerProperties { private CloudSpannerProperties() {} /** * The Cloud Spanner database name to use when running the YCSB benchmark, e.g. 'ycsb-database'. */ static final String DATABASE = "cloudspanner.database"; /** * The Cloud Spanner instance ID to use when running the YCSB benchmark, e.g. 'ycsb-instance'. */ static final String INSTANCE = "cloudspanner.instance"; /** * Choose between 'read' and 'query'. Affects both read() and scan() operations. */ static final String READ_MODE = "cloudspanner.readmode"; /** * The number of inserts to batch during the bulk loading phase. The default value is 1, which means no batching * is done. Recommended value during data load is 1000. */ static final String BATCH_INSERTS = "cloudspanner.batchinserts"; /** * Number of seconds we allow reads to be stale for. Set to 0 for strong reads (default). * For performance gains, this should be set to 10 seconds. */ static final String BOUNDED_STALENESS = "cloudspanner.boundedstaleness"; // The properties below usually do not need to be set explicitly. /** * The Cloud Spanner project ID to use when running the YCSB benchmark, e.g. 'myproject'. This is not strictly * necessary and can often be inferred from the environment. */ static final String PROJECT = "cloudspanner.project"; /** * The Cloud Spanner host name to use in the YCSB run. */ static final String HOST = "cloudspanner.host"; /** * Number of Cloud Spanner client channels to use. It's recommended to leave this to be the default value. */ static final String NUM_CHANNELS = "cloudspanner.channels"; } private static int fieldCount; private static boolean queriesForReads; private static int batchInserts; private static TimestampBound timestampBound; private static String standardQuery; private static String standardScan; private static final ArrayList<String> STANDARD_FIELDS = new ArrayList<>(); private static final String PRIMARY_KEY_COLUMN = "id"; private static final Logger LOGGER = Logger.getLogger(CloudSpannerClient.class.getName()); // Static lock for the class. private static final Object CLASS_LOCK = new Object(); // Single Spanner client per process. private static Spanner spanner = null; // Single database client per process. private static DatabaseClient dbClient = null; // Buffered mutations on a per object/thread basis for batch inserts. // Note that we have a separate CloudSpannerClient object per thread. private final ArrayList<Mutation> bufferedMutations = new ArrayList<>(); private static void constructStandardQueriesAndFields(Properties properties) { String table = properties.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT); final String fieldprefix = properties.getProperty(CoreWorkload.FIELD_NAME_PREFIX, CoreWorkload.FIELD_NAME_PREFIX_DEFAULT); standardQuery = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id=@key").toString(); standardScan = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id>=@startKey LIMIT @count").toString(); for (int i = 0; i < fieldCount; i++) { STANDARD_FIELDS.add(fieldprefix + i); } } private static Spanner getSpanner(Properties properties, String host, String project) { if (spanner != null) { return spanner; } String numChannels = properties.getProperty(CloudSpannerProperties.NUM_CHANNELS); int numThreads = Integer.parseInt(properties.getProperty(Client.THREAD_COUNT_PROPERTY, "1")); SpannerOptions.Builder optionsBuilder = SpannerOptions.newBuilder() .setSessionPoolOption(SessionPoolOptions.newBuilder() .setMinSessions(numThreads) // Since we have no read-write transactions, we can set the write session fraction to 0. .setWriteSessionsFraction(0) .build()); if (host != null) { optionsBuilder.setHost(host); } if (project != null) { optionsBuilder.setProjectId(project); } if (numChannels != null) { optionsBuilder.setNumChannels(Integer.parseInt(numChannels)); } spanner = optionsBuilder.build().getService(); Runtime.getRuntime().addShutdownHook(new Thread("spannerShutdown") { @Override public void run() { spanner.close(); } }); return spanner; } @Override public void init() throws DBException { synchronized (CLASS_LOCK) { if (dbClient != null) { return; } Properties properties = getProperties(); String host = properties.getProperty(CloudSpannerProperties.HOST); String project = properties.getProperty(CloudSpannerProperties.PROJECT); String instance = properties.getProperty(CloudSpannerProperties.INSTANCE, "ycsb-instance"); String database = properties.getProperty(CloudSpannerProperties.DATABASE, "ycsb-database"); fieldCount = Integer.parseInt(properties.getProperty( CoreWorkload.FIELD_COUNT_PROPERTY, CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT)); queriesForReads = properties.getProperty(CloudSpannerProperties.READ_MODE, "query").equals("query"); batchInserts = Integer.parseInt(properties.getProperty(CloudSpannerProperties.BATCH_INSERTS, "1")); constructStandardQueriesAndFields(properties); int boundedStalenessSeconds = Integer.parseInt(properties.getProperty( CloudSpannerProperties.BOUNDED_STALENESS, "0")); timestampBound = (boundedStalenessSeconds <= 0) ? TimestampBound.strong() : TimestampBound.ofMaxStaleness(boundedStalenessSeconds, TimeUnit.SECONDS); try { spanner = getSpanner(properties, host, project); if (project == null) { project = spanner.getOptions().getProjectId(); } dbClient = spanner.getDatabaseClient(DatabaseId.of(project, instance, database)); } catch (Exception e) { LOGGER.log(Level.SEVERE, "init()", e); throw new DBException(e); } LOGGER.log(Level.INFO, new StringBuilder() .append("\nHost: ").append(spanner.getOptions().getHost()) .append("\nProject: ").append(project) .append("\nInstance: ").append(instance) .append("\nDatabase: ").append(database) .append("\nUsing queries for reads: ").append(queriesForReads) .append("\nBatching inserts: ").append(batchInserts) .append("\nBounded staleness seconds: ").append(boundedStalenessSeconds) .toString()); } } private Status readUsingQuery( String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Statement query; Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; if (fields == null || fields.size() == fieldCount) { query = Statement.newBuilder(standardQuery).bind("key").to(key).build(); } else { Joiner joiner = Joiner.on(','); query = Statement.newBuilder("SELECT ") .append(joiner.join(fields)) .append(" FROM ") .append(table) .append(" WHERE id=@key") .bind("key").to(key) .build(); } try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) { resultSet.next(); decodeStruct(columns, resultSet, result); if (resultSet.next()) { throw new Exception("Expected exactly one row for each read."); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "readUsingQuery()", e); return Status.ERROR; } } @Override public Status read( String table, String key, Set<String> fields, Map<String, ByteIterator> result) { if (queriesForReads) { return readUsingQuery(table, key, fields, result); } Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; try { Struct row = dbClient.singleUse(timestampBound).readRow(table, Key.of(key), columns); decodeStruct(columns, row, result); return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "read()", e); return Status.ERROR; } } private Status scanUsingQuery( String table, String startKey, int recordCount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; Statement query; if (fields == null || fields.size() == fieldCount) { query = Statement.newBuilder(standardScan).bind("startKey").to(startKey).bind("count").to(recordCount).build(); } else { Joiner joiner = Joiner.on(','); query = Statement.newBuilder("SELECT ") .append(joiner.join(fields)) .append(" FROM ") .append(table) .append(" WHERE id>=@startKey LIMIT @count") .bind("startKey").to(startKey) .bind("count").to(recordCount) .build(); } try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) { while (resultSet.next()) { HashMap<String, ByteIterator> row = new HashMap<>(); decodeStruct(columns, resultSet, row); result.add(row); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "scanUsingQuery()", e); return Status.ERROR; } } @Override public Status scan( String table, String startKey, int recordCount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { if (queriesForReads) { return scanUsingQuery(table, startKey, recordCount, fields, result); } Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; KeySet keySet = KeySet.newBuilder().addRange(KeyRange.closedClosed(Key.of(startKey), Key.of())).build(); try (ResultSet resultSet = dbClient.singleUse(timestampBound) .read(table, keySet, columns, Options.limit(recordCount))) { while (resultSet.next()) { HashMap<String, ByteIterator> row = new HashMap<>(); decodeStruct(columns, resultSet, row); result.add(row); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "scan()", e); return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { Mutation.WriteBuilder m = Mutation.newInsertOrUpdateBuilder(table); m.set(PRIMARY_KEY_COLUMN).to(key); for (Map.Entry<String, ByteIterator> e : values.entrySet()) { m.set(e.getKey()).to(e.getValue().toString()); } try { dbClient.writeAtLeastOnce(Arrays.asList(m.build())); } catch (Exception e) { LOGGER.log(Level.INFO, "update()", e); return Status.ERROR; } return Status.OK; } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>
if (bufferedMutations.size() < batchInserts) { Mutation.WriteBuilder m = Mutation.newInsertOrUpdateBuilder(table); m.set(PRIMARY_KEY_COLUMN).to(key); for (Map.Entry<String, ByteIterator> e : values.entrySet()) { m.set(e.getKey()).to(e.getValue().toString()); } bufferedMutations.add(m.build()); } else { LOGGER.log(Level.INFO, "Limit of cached mutations reached. The given mutation with key " + key + " is ignored. Is this a retry?"); } if (bufferedMutations.size() < batchInserts) { return Status.BATCHED_OK; } try { dbClient.writeAtLeastOnce(bufferedMutations); bufferedMutations.clear(); } catch (Exception e) { LOGGER.log(Level.INFO, "insert()", e); return Status.ERROR; } return Status.OK;
brianfrankcooper_YCSB
YCSB/cloudspanner/src/main/java/site/ycsb/db/cloudspanner/CloudSpannerClient.java
CloudSpannerProperties
cleanup
class CloudSpannerProperties { private CloudSpannerProperties() {} /** * The Cloud Spanner database name to use when running the YCSB benchmark, e.g. 'ycsb-database'. */ static final String DATABASE = "cloudspanner.database"; /** * The Cloud Spanner instance ID to use when running the YCSB benchmark, e.g. 'ycsb-instance'. */ static final String INSTANCE = "cloudspanner.instance"; /** * Choose between 'read' and 'query'. Affects both read() and scan() operations. */ static final String READ_MODE = "cloudspanner.readmode"; /** * The number of inserts to batch during the bulk loading phase. The default value is 1, which means no batching * is done. Recommended value during data load is 1000. */ static final String BATCH_INSERTS = "cloudspanner.batchinserts"; /** * Number of seconds we allow reads to be stale for. Set to 0 for strong reads (default). * For performance gains, this should be set to 10 seconds. */ static final String BOUNDED_STALENESS = "cloudspanner.boundedstaleness"; // The properties below usually do not need to be set explicitly. /** * The Cloud Spanner project ID to use when running the YCSB benchmark, e.g. 'myproject'. This is not strictly * necessary and can often be inferred from the environment. */ static final String PROJECT = "cloudspanner.project"; /** * The Cloud Spanner host name to use in the YCSB run. */ static final String HOST = "cloudspanner.host"; /** * Number of Cloud Spanner client channels to use. It's recommended to leave this to be the default value. */ static final String NUM_CHANNELS = "cloudspanner.channels"; } private static int fieldCount; private static boolean queriesForReads; private static int batchInserts; private static TimestampBound timestampBound; private static String standardQuery; private static String standardScan; private static final ArrayList<String> STANDARD_FIELDS = new ArrayList<>(); private static final String PRIMARY_KEY_COLUMN = "id"; private static final Logger LOGGER = Logger.getLogger(CloudSpannerClient.class.getName()); // Static lock for the class. private static final Object CLASS_LOCK = new Object(); // Single Spanner client per process. private static Spanner spanner = null; // Single database client per process. private static DatabaseClient dbClient = null; // Buffered mutations on a per object/thread basis for batch inserts. // Note that we have a separate CloudSpannerClient object per thread. private final ArrayList<Mutation> bufferedMutations = new ArrayList<>(); private static void constructStandardQueriesAndFields(Properties properties) { String table = properties.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT); final String fieldprefix = properties.getProperty(CoreWorkload.FIELD_NAME_PREFIX, CoreWorkload.FIELD_NAME_PREFIX_DEFAULT); standardQuery = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id=@key").toString(); standardScan = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id>=@startKey LIMIT @count").toString(); for (int i = 0; i < fieldCount; i++) { STANDARD_FIELDS.add(fieldprefix + i); } } private static Spanner getSpanner(Properties properties, String host, String project) { if (spanner != null) { return spanner; } String numChannels = properties.getProperty(CloudSpannerProperties.NUM_CHANNELS); int numThreads = Integer.parseInt(properties.getProperty(Client.THREAD_COUNT_PROPERTY, "1")); SpannerOptions.Builder optionsBuilder = SpannerOptions.newBuilder() .setSessionPoolOption(SessionPoolOptions.newBuilder() .setMinSessions(numThreads) // Since we have no read-write transactions, we can set the write session fraction to 0. .setWriteSessionsFraction(0) .build()); if (host != null) { optionsBuilder.setHost(host); } if (project != null) { optionsBuilder.setProjectId(project); } if (numChannels != null) { optionsBuilder.setNumChannels(Integer.parseInt(numChannels)); } spanner = optionsBuilder.build().getService(); Runtime.getRuntime().addShutdownHook(new Thread("spannerShutdown") { @Override public void run() { spanner.close(); } }); return spanner; } @Override public void init() throws DBException { synchronized (CLASS_LOCK) { if (dbClient != null) { return; } Properties properties = getProperties(); String host = properties.getProperty(CloudSpannerProperties.HOST); String project = properties.getProperty(CloudSpannerProperties.PROJECT); String instance = properties.getProperty(CloudSpannerProperties.INSTANCE, "ycsb-instance"); String database = properties.getProperty(CloudSpannerProperties.DATABASE, "ycsb-database"); fieldCount = Integer.parseInt(properties.getProperty( CoreWorkload.FIELD_COUNT_PROPERTY, CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT)); queriesForReads = properties.getProperty(CloudSpannerProperties.READ_MODE, "query").equals("query"); batchInserts = Integer.parseInt(properties.getProperty(CloudSpannerProperties.BATCH_INSERTS, "1")); constructStandardQueriesAndFields(properties); int boundedStalenessSeconds = Integer.parseInt(properties.getProperty( CloudSpannerProperties.BOUNDED_STALENESS, "0")); timestampBound = (boundedStalenessSeconds <= 0) ? TimestampBound.strong() : TimestampBound.ofMaxStaleness(boundedStalenessSeconds, TimeUnit.SECONDS); try { spanner = getSpanner(properties, host, project); if (project == null) { project = spanner.getOptions().getProjectId(); } dbClient = spanner.getDatabaseClient(DatabaseId.of(project, instance, database)); } catch (Exception e) { LOGGER.log(Level.SEVERE, "init()", e); throw new DBException(e); } LOGGER.log(Level.INFO, new StringBuilder() .append("\nHost: ").append(spanner.getOptions().getHost()) .append("\nProject: ").append(project) .append("\nInstance: ").append(instance) .append("\nDatabase: ").append(database) .append("\nUsing queries for reads: ").append(queriesForReads) .append("\nBatching inserts: ").append(batchInserts) .append("\nBounded staleness seconds: ").append(boundedStalenessSeconds) .toString()); } } private Status readUsingQuery( String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Statement query; Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; if (fields == null || fields.size() == fieldCount) { query = Statement.newBuilder(standardQuery).bind("key").to(key).build(); } else { Joiner joiner = Joiner.on(','); query = Statement.newBuilder("SELECT ") .append(joiner.join(fields)) .append(" FROM ") .append(table) .append(" WHERE id=@key") .bind("key").to(key) .build(); } try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) { resultSet.next(); decodeStruct(columns, resultSet, result); if (resultSet.next()) { throw new Exception("Expected exactly one row for each read."); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "readUsingQuery()", e); return Status.ERROR; } } @Override public Status read( String table, String key, Set<String> fields, Map<String, ByteIterator> result) { if (queriesForReads) { return readUsingQuery(table, key, fields, result); } Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; try { Struct row = dbClient.singleUse(timestampBound).readRow(table, Key.of(key), columns); decodeStruct(columns, row, result); return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "read()", e); return Status.ERROR; } } private Status scanUsingQuery( String table, String startKey, int recordCount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; Statement query; if (fields == null || fields.size() == fieldCount) { query = Statement.newBuilder(standardScan).bind("startKey").to(startKey).bind("count").to(recordCount).build(); } else { Joiner joiner = Joiner.on(','); query = Statement.newBuilder("SELECT ") .append(joiner.join(fields)) .append(" FROM ") .append(table) .append(" WHERE id>=@startKey LIMIT @count") .bind("startKey").to(startKey) .bind("count").to(recordCount) .build(); } try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) { while (resultSet.next()) { HashMap<String, ByteIterator> row = new HashMap<>(); decodeStruct(columns, resultSet, row); result.add(row); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "scanUsingQuery()", e); return Status.ERROR; } } @Override public Status scan( String table, String startKey, int recordCount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { if (queriesForReads) { return scanUsingQuery(table, startKey, recordCount, fields, result); } Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; KeySet keySet = KeySet.newBuilder().addRange(KeyRange.closedClosed(Key.of(startKey), Key.of())).build(); try (ResultSet resultSet = dbClient.singleUse(timestampBound) .read(table, keySet, columns, Options.limit(recordCount))) { while (resultSet.next()) { HashMap<String, ByteIterator> row = new HashMap<>(); decodeStruct(columns, resultSet, row); result.add(row); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "scan()", e); return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { Mutation.WriteBuilder m = Mutation.newInsertOrUpdateBuilder(table); m.set(PRIMARY_KEY_COLUMN).to(key); for (Map.Entry<String, ByteIterator> e : values.entrySet()) { m.set(e.getKey()).to(e.getValue().toString()); } try { dbClient.writeAtLeastOnce(Arrays.asList(m.build())); } catch (Exception e) { LOGGER.log(Level.INFO, "update()", e); return Status.ERROR; } return Status.OK; } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (bufferedMutations.size() < batchInserts) { Mutation.WriteBuilder m = Mutation.newInsertOrUpdateBuilder(table); m.set(PRIMARY_KEY_COLUMN).to(key); for (Map.Entry<String, ByteIterator> e : values.entrySet()) { m.set(e.getKey()).to(e.getValue().toString()); } bufferedMutations.add(m.build()); } else { LOGGER.log(Level.INFO, "Limit of cached mutations reached. The given mutation with key " + key + " is ignored. Is this a retry?"); } if (bufferedMutations.size() < batchInserts) { return Status.BATCHED_OK; } try { dbClient.writeAtLeastOnce(bufferedMutations); bufferedMutations.clear(); } catch (Exception e) { LOGGER.log(Level.INFO, "insert()", e); return Status.ERROR; } return Status.OK; } @Override public void cleanup() {<FILL_FUNCTION_BODY>
try { if (bufferedMutations.size() > 0) { dbClient.writeAtLeastOnce(bufferedMutations); bufferedMutations.clear(); } } catch (Exception e) { LOGGER.log(Level.INFO, "cleanup()", e); }
brianfrankcooper_YCSB
YCSB/cloudspanner/src/main/java/site/ycsb/db/cloudspanner/CloudSpannerClient.java
CloudSpannerProperties
delete
class CloudSpannerProperties { private CloudSpannerProperties() {} /** * The Cloud Spanner database name to use when running the YCSB benchmark, e.g. 'ycsb-database'. */ static final String DATABASE = "cloudspanner.database"; /** * The Cloud Spanner instance ID to use when running the YCSB benchmark, e.g. 'ycsb-instance'. */ static final String INSTANCE = "cloudspanner.instance"; /** * Choose between 'read' and 'query'. Affects both read() and scan() operations. */ static final String READ_MODE = "cloudspanner.readmode"; /** * The number of inserts to batch during the bulk loading phase. The default value is 1, which means no batching * is done. Recommended value during data load is 1000. */ static final String BATCH_INSERTS = "cloudspanner.batchinserts"; /** * Number of seconds we allow reads to be stale for. Set to 0 for strong reads (default). * For performance gains, this should be set to 10 seconds. */ static final String BOUNDED_STALENESS = "cloudspanner.boundedstaleness"; // The properties below usually do not need to be set explicitly. /** * The Cloud Spanner project ID to use when running the YCSB benchmark, e.g. 'myproject'. This is not strictly * necessary and can often be inferred from the environment. */ static final String PROJECT = "cloudspanner.project"; /** * The Cloud Spanner host name to use in the YCSB run. */ static final String HOST = "cloudspanner.host"; /** * Number of Cloud Spanner client channels to use. It's recommended to leave this to be the default value. */ static final String NUM_CHANNELS = "cloudspanner.channels"; } private static int fieldCount; private static boolean queriesForReads; private static int batchInserts; private static TimestampBound timestampBound; private static String standardQuery; private static String standardScan; private static final ArrayList<String> STANDARD_FIELDS = new ArrayList<>(); private static final String PRIMARY_KEY_COLUMN = "id"; private static final Logger LOGGER = Logger.getLogger(CloudSpannerClient.class.getName()); // Static lock for the class. private static final Object CLASS_LOCK = new Object(); // Single Spanner client per process. private static Spanner spanner = null; // Single database client per process. private static DatabaseClient dbClient = null; // Buffered mutations on a per object/thread basis for batch inserts. // Note that we have a separate CloudSpannerClient object per thread. private final ArrayList<Mutation> bufferedMutations = new ArrayList<>(); private static void constructStandardQueriesAndFields(Properties properties) { String table = properties.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT); final String fieldprefix = properties.getProperty(CoreWorkload.FIELD_NAME_PREFIX, CoreWorkload.FIELD_NAME_PREFIX_DEFAULT); standardQuery = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id=@key").toString(); standardScan = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id>=@startKey LIMIT @count").toString(); for (int i = 0; i < fieldCount; i++) { STANDARD_FIELDS.add(fieldprefix + i); } } private static Spanner getSpanner(Properties properties, String host, String project) { if (spanner != null) { return spanner; } String numChannels = properties.getProperty(CloudSpannerProperties.NUM_CHANNELS); int numThreads = Integer.parseInt(properties.getProperty(Client.THREAD_COUNT_PROPERTY, "1")); SpannerOptions.Builder optionsBuilder = SpannerOptions.newBuilder() .setSessionPoolOption(SessionPoolOptions.newBuilder() .setMinSessions(numThreads) // Since we have no read-write transactions, we can set the write session fraction to 0. .setWriteSessionsFraction(0) .build()); if (host != null) { optionsBuilder.setHost(host); } if (project != null) { optionsBuilder.setProjectId(project); } if (numChannels != null) { optionsBuilder.setNumChannels(Integer.parseInt(numChannels)); } spanner = optionsBuilder.build().getService(); Runtime.getRuntime().addShutdownHook(new Thread("spannerShutdown") { @Override public void run() { spanner.close(); } }); return spanner; } @Override public void init() throws DBException { synchronized (CLASS_LOCK) { if (dbClient != null) { return; } Properties properties = getProperties(); String host = properties.getProperty(CloudSpannerProperties.HOST); String project = properties.getProperty(CloudSpannerProperties.PROJECT); String instance = properties.getProperty(CloudSpannerProperties.INSTANCE, "ycsb-instance"); String database = properties.getProperty(CloudSpannerProperties.DATABASE, "ycsb-database"); fieldCount = Integer.parseInt(properties.getProperty( CoreWorkload.FIELD_COUNT_PROPERTY, CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT)); queriesForReads = properties.getProperty(CloudSpannerProperties.READ_MODE, "query").equals("query"); batchInserts = Integer.parseInt(properties.getProperty(CloudSpannerProperties.BATCH_INSERTS, "1")); constructStandardQueriesAndFields(properties); int boundedStalenessSeconds = Integer.parseInt(properties.getProperty( CloudSpannerProperties.BOUNDED_STALENESS, "0")); timestampBound = (boundedStalenessSeconds <= 0) ? TimestampBound.strong() : TimestampBound.ofMaxStaleness(boundedStalenessSeconds, TimeUnit.SECONDS); try { spanner = getSpanner(properties, host, project); if (project == null) { project = spanner.getOptions().getProjectId(); } dbClient = spanner.getDatabaseClient(DatabaseId.of(project, instance, database)); } catch (Exception e) { LOGGER.log(Level.SEVERE, "init()", e); throw new DBException(e); } LOGGER.log(Level.INFO, new StringBuilder() .append("\nHost: ").append(spanner.getOptions().getHost()) .append("\nProject: ").append(project) .append("\nInstance: ").append(instance) .append("\nDatabase: ").append(database) .append("\nUsing queries for reads: ").append(queriesForReads) .append("\nBatching inserts: ").append(batchInserts) .append("\nBounded staleness seconds: ").append(boundedStalenessSeconds) .toString()); } } private Status readUsingQuery( String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Statement query; Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; if (fields == null || fields.size() == fieldCount) { query = Statement.newBuilder(standardQuery).bind("key").to(key).build(); } else { Joiner joiner = Joiner.on(','); query = Statement.newBuilder("SELECT ") .append(joiner.join(fields)) .append(" FROM ") .append(table) .append(" WHERE id=@key") .bind("key").to(key) .build(); } try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) { resultSet.next(); decodeStruct(columns, resultSet, result); if (resultSet.next()) { throw new Exception("Expected exactly one row for each read."); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "readUsingQuery()", e); return Status.ERROR; } } @Override public Status read( String table, String key, Set<String> fields, Map<String, ByteIterator> result) { if (queriesForReads) { return readUsingQuery(table, key, fields, result); } Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; try { Struct row = dbClient.singleUse(timestampBound).readRow(table, Key.of(key), columns); decodeStruct(columns, row, result); return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "read()", e); return Status.ERROR; } } private Status scanUsingQuery( String table, String startKey, int recordCount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; Statement query; if (fields == null || fields.size() == fieldCount) { query = Statement.newBuilder(standardScan).bind("startKey").to(startKey).bind("count").to(recordCount).build(); } else { Joiner joiner = Joiner.on(','); query = Statement.newBuilder("SELECT ") .append(joiner.join(fields)) .append(" FROM ") .append(table) .append(" WHERE id>=@startKey LIMIT @count") .bind("startKey").to(startKey) .bind("count").to(recordCount) .build(); } try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) { while (resultSet.next()) { HashMap<String, ByteIterator> row = new HashMap<>(); decodeStruct(columns, resultSet, row); result.add(row); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "scanUsingQuery()", e); return Status.ERROR; } } @Override public Status scan( String table, String startKey, int recordCount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { if (queriesForReads) { return scanUsingQuery(table, startKey, recordCount, fields, result); } Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; KeySet keySet = KeySet.newBuilder().addRange(KeyRange.closedClosed(Key.of(startKey), Key.of())).build(); try (ResultSet resultSet = dbClient.singleUse(timestampBound) .read(table, keySet, columns, Options.limit(recordCount))) { while (resultSet.next()) { HashMap<String, ByteIterator> row = new HashMap<>(); decodeStruct(columns, resultSet, row); result.add(row); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "scan()", e); return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { Mutation.WriteBuilder m = Mutation.newInsertOrUpdateBuilder(table); m.set(PRIMARY_KEY_COLUMN).to(key); for (Map.Entry<String, ByteIterator> e : values.entrySet()) { m.set(e.getKey()).to(e.getValue().toString()); } try { dbClient.writeAtLeastOnce(Arrays.asList(m.build())); } catch (Exception e) { LOGGER.log(Level.INFO, "update()", e); return Status.ERROR; } return Status.OK; } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (bufferedMutations.size() < batchInserts) { Mutation.WriteBuilder m = Mutation.newInsertOrUpdateBuilder(table); m.set(PRIMARY_KEY_COLUMN).to(key); for (Map.Entry<String, ByteIterator> e : values.entrySet()) { m.set(e.getKey()).to(e.getValue().toString()); } bufferedMutations.add(m.build()); } else { LOGGER.log(Level.INFO, "Limit of cached mutations reached. The given mutation with key " + key + " is ignored. Is this a retry?"); } if (bufferedMutations.size() < batchInserts) { return Status.BATCHED_OK; } try { dbClient.writeAtLeastOnce(bufferedMutations); bufferedMutations.clear(); } catch (Exception e) { LOGGER.log(Level.INFO, "insert()", e); return Status.ERROR; } return Status.OK; } @Override public void cleanup() { try { if (bufferedMutations.size() > 0) { dbClient.writeAtLeastOnce(bufferedMutations); bufferedMutations.clear(); } } catch (Exception e) { LOGGER.log(Level.INFO, "cleanup()", e); } } @Override public Status delete(String table, String key) {<FILL_FUNCTION_BODY>
try { dbClient.writeAtLeastOnce(Arrays.asList(Mutation.delete(table, Key.of(key)))); } catch (Exception e) { LOGGER.log(Level.INFO, "delete()", e); return Status.ERROR; } return Status.OK;
brianfrankcooper_YCSB
YCSB/cloudspanner/src/main/java/site/ycsb/db/cloudspanner/CloudSpannerClient.java
CloudSpannerProperties
decodeStruct
class CloudSpannerProperties { private CloudSpannerProperties() {} /** * The Cloud Spanner database name to use when running the YCSB benchmark, e.g. 'ycsb-database'. */ static final String DATABASE = "cloudspanner.database"; /** * The Cloud Spanner instance ID to use when running the YCSB benchmark, e.g. 'ycsb-instance'. */ static final String INSTANCE = "cloudspanner.instance"; /** * Choose between 'read' and 'query'. Affects both read() and scan() operations. */ static final String READ_MODE = "cloudspanner.readmode"; /** * The number of inserts to batch during the bulk loading phase. The default value is 1, which means no batching * is done. Recommended value during data load is 1000. */ static final String BATCH_INSERTS = "cloudspanner.batchinserts"; /** * Number of seconds we allow reads to be stale for. Set to 0 for strong reads (default). * For performance gains, this should be set to 10 seconds. */ static final String BOUNDED_STALENESS = "cloudspanner.boundedstaleness"; // The properties below usually do not need to be set explicitly. /** * The Cloud Spanner project ID to use when running the YCSB benchmark, e.g. 'myproject'. This is not strictly * necessary and can often be inferred from the environment. */ static final String PROJECT = "cloudspanner.project"; /** * The Cloud Spanner host name to use in the YCSB run. */ static final String HOST = "cloudspanner.host"; /** * Number of Cloud Spanner client channels to use. It's recommended to leave this to be the default value. */ static final String NUM_CHANNELS = "cloudspanner.channels"; } private static int fieldCount; private static boolean queriesForReads; private static int batchInserts; private static TimestampBound timestampBound; private static String standardQuery; private static String standardScan; private static final ArrayList<String> STANDARD_FIELDS = new ArrayList<>(); private static final String PRIMARY_KEY_COLUMN = "id"; private static final Logger LOGGER = Logger.getLogger(CloudSpannerClient.class.getName()); // Static lock for the class. private static final Object CLASS_LOCK = new Object(); // Single Spanner client per process. private static Spanner spanner = null; // Single database client per process. private static DatabaseClient dbClient = null; // Buffered mutations on a per object/thread basis for batch inserts. // Note that we have a separate CloudSpannerClient object per thread. private final ArrayList<Mutation> bufferedMutations = new ArrayList<>(); private static void constructStandardQueriesAndFields(Properties properties) { String table = properties.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT); final String fieldprefix = properties.getProperty(CoreWorkload.FIELD_NAME_PREFIX, CoreWorkload.FIELD_NAME_PREFIX_DEFAULT); standardQuery = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id=@key").toString(); standardScan = new StringBuilder() .append("SELECT * FROM ").append(table).append(" WHERE id>=@startKey LIMIT @count").toString(); for (int i = 0; i < fieldCount; i++) { STANDARD_FIELDS.add(fieldprefix + i); } } private static Spanner getSpanner(Properties properties, String host, String project) { if (spanner != null) { return spanner; } String numChannels = properties.getProperty(CloudSpannerProperties.NUM_CHANNELS); int numThreads = Integer.parseInt(properties.getProperty(Client.THREAD_COUNT_PROPERTY, "1")); SpannerOptions.Builder optionsBuilder = SpannerOptions.newBuilder() .setSessionPoolOption(SessionPoolOptions.newBuilder() .setMinSessions(numThreads) // Since we have no read-write transactions, we can set the write session fraction to 0. .setWriteSessionsFraction(0) .build()); if (host != null) { optionsBuilder.setHost(host); } if (project != null) { optionsBuilder.setProjectId(project); } if (numChannels != null) { optionsBuilder.setNumChannels(Integer.parseInt(numChannels)); } spanner = optionsBuilder.build().getService(); Runtime.getRuntime().addShutdownHook(new Thread("spannerShutdown") { @Override public void run() { spanner.close(); } }); return spanner; } @Override public void init() throws DBException { synchronized (CLASS_LOCK) { if (dbClient != null) { return; } Properties properties = getProperties(); String host = properties.getProperty(CloudSpannerProperties.HOST); String project = properties.getProperty(CloudSpannerProperties.PROJECT); String instance = properties.getProperty(CloudSpannerProperties.INSTANCE, "ycsb-instance"); String database = properties.getProperty(CloudSpannerProperties.DATABASE, "ycsb-database"); fieldCount = Integer.parseInt(properties.getProperty( CoreWorkload.FIELD_COUNT_PROPERTY, CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT)); queriesForReads = properties.getProperty(CloudSpannerProperties.READ_MODE, "query").equals("query"); batchInserts = Integer.parseInt(properties.getProperty(CloudSpannerProperties.BATCH_INSERTS, "1")); constructStandardQueriesAndFields(properties); int boundedStalenessSeconds = Integer.parseInt(properties.getProperty( CloudSpannerProperties.BOUNDED_STALENESS, "0")); timestampBound = (boundedStalenessSeconds <= 0) ? TimestampBound.strong() : TimestampBound.ofMaxStaleness(boundedStalenessSeconds, TimeUnit.SECONDS); try { spanner = getSpanner(properties, host, project); if (project == null) { project = spanner.getOptions().getProjectId(); } dbClient = spanner.getDatabaseClient(DatabaseId.of(project, instance, database)); } catch (Exception e) { LOGGER.log(Level.SEVERE, "init()", e); throw new DBException(e); } LOGGER.log(Level.INFO, new StringBuilder() .append("\nHost: ").append(spanner.getOptions().getHost()) .append("\nProject: ").append(project) .append("\nInstance: ").append(instance) .append("\nDatabase: ").append(database) .append("\nUsing queries for reads: ").append(queriesForReads) .append("\nBatching inserts: ").append(batchInserts) .append("\nBounded staleness seconds: ").append(boundedStalenessSeconds) .toString()); } } private Status readUsingQuery( String table, String key, Set<String> fields, Map<String, ByteIterator> result) { Statement query; Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; if (fields == null || fields.size() == fieldCount) { query = Statement.newBuilder(standardQuery).bind("key").to(key).build(); } else { Joiner joiner = Joiner.on(','); query = Statement.newBuilder("SELECT ") .append(joiner.join(fields)) .append(" FROM ") .append(table) .append(" WHERE id=@key") .bind("key").to(key) .build(); } try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) { resultSet.next(); decodeStruct(columns, resultSet, result); if (resultSet.next()) { throw new Exception("Expected exactly one row for each read."); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "readUsingQuery()", e); return Status.ERROR; } } @Override public Status read( String table, String key, Set<String> fields, Map<String, ByteIterator> result) { if (queriesForReads) { return readUsingQuery(table, key, fields, result); } Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; try { Struct row = dbClient.singleUse(timestampBound).readRow(table, Key.of(key), columns); decodeStruct(columns, row, result); return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "read()", e); return Status.ERROR; } } private Status scanUsingQuery( String table, String startKey, int recordCount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; Statement query; if (fields == null || fields.size() == fieldCount) { query = Statement.newBuilder(standardScan).bind("startKey").to(startKey).bind("count").to(recordCount).build(); } else { Joiner joiner = Joiner.on(','); query = Statement.newBuilder("SELECT ") .append(joiner.join(fields)) .append(" FROM ") .append(table) .append(" WHERE id>=@startKey LIMIT @count") .bind("startKey").to(startKey) .bind("count").to(recordCount) .build(); } try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) { while (resultSet.next()) { HashMap<String, ByteIterator> row = new HashMap<>(); decodeStruct(columns, resultSet, row); result.add(row); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "scanUsingQuery()", e); return Status.ERROR; } } @Override public Status scan( String table, String startKey, int recordCount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { if (queriesForReads) { return scanUsingQuery(table, startKey, recordCount, fields, result); } Iterable<String> columns = fields == null ? STANDARD_FIELDS : fields; KeySet keySet = KeySet.newBuilder().addRange(KeyRange.closedClosed(Key.of(startKey), Key.of())).build(); try (ResultSet resultSet = dbClient.singleUse(timestampBound) .read(table, keySet, columns, Options.limit(recordCount))) { while (resultSet.next()) { HashMap<String, ByteIterator> row = new HashMap<>(); decodeStruct(columns, resultSet, row); result.add(row); } return Status.OK; } catch (Exception e) { LOGGER.log(Level.INFO, "scan()", e); return Status.ERROR; } } @Override public Status update(String table, String key, Map<String, ByteIterator> values) { Mutation.WriteBuilder m = Mutation.newInsertOrUpdateBuilder(table); m.set(PRIMARY_KEY_COLUMN).to(key); for (Map.Entry<String, ByteIterator> e : values.entrySet()) { m.set(e.getKey()).to(e.getValue().toString()); } try { dbClient.writeAtLeastOnce(Arrays.asList(m.build())); } catch (Exception e) { LOGGER.log(Level.INFO, "update()", e); return Status.ERROR; } return Status.OK; } @Override public Status insert(String table, String key, Map<String, ByteIterator> values) { if (bufferedMutations.size() < batchInserts) { Mutation.WriteBuilder m = Mutation.newInsertOrUpdateBuilder(table); m.set(PRIMARY_KEY_COLUMN).to(key); for (Map.Entry<String, ByteIterator> e : values.entrySet()) { m.set(e.getKey()).to(e.getValue().toString()); } bufferedMutations.add(m.build()); } else { LOGGER.log(Level.INFO, "Limit of cached mutations reached. The given mutation with key " + key + " is ignored. Is this a retry?"); } if (bufferedMutations.size() < batchInserts) { return Status.BATCHED_OK; } try { dbClient.writeAtLeastOnce(bufferedMutations); bufferedMutations.clear(); } catch (Exception e) { LOGGER.log(Level.INFO, "insert()", e); return Status.ERROR; } return Status.OK; } @Override public void cleanup() { try { if (bufferedMutations.size() > 0) { dbClient.writeAtLeastOnce(bufferedMutations); bufferedMutations.clear(); } } catch (Exception e) { LOGGER.log(Level.INFO, "cleanup()", e); } } @Override public Status delete(String table, String key) { try { dbClient.writeAtLeastOnce(Arrays.asList(Mutation.delete(table, Key.of(key)))); } catch (Exception e) { LOGGER.log(Level.INFO, "delete()", e); return Status.ERROR; } return Status.OK; } private static void decodeStruct( Iterable<String> columns, StructReader structReader, Map<String, ByteIterator> result) {<FILL_FUNCTION_BODY>
for (String col : columns) { result.put(col, new StringByteIterator(structReader.getString(col))); }
brianfrankcooper_YCSB
YCSB/core/src/main/java/site/ycsb/BasicDB.java
BasicDB
delay
class BasicDB extends DB { public static final String COUNT = "basicdb.count"; public static final String COUNT_DEFAULT = "false"; public static final String VERBOSE = "basicdb.verbose"; public static final String VERBOSE_DEFAULT = "true"; public static final String SIMULATE_DELAY = "basicdb.simulatedelay"; public static final String SIMULATE_DELAY_DEFAULT = "0"; public static final String RANDOMIZE_DELAY = "basicdb.randomizedelay"; public static final String RANDOMIZE_DELAY_DEFAULT = "true"; protected static final Object MUTEX = new Object(); protected static int counter = 0; protected static Map<Integer, Integer> reads; protected static Map<Integer, Integer> scans; protected static Map<Integer, Integer> updates; protected static Map<Integer, Integer> inserts; protected static Map<Integer, Integer> deletes; protected boolean verbose; protected boolean randomizedelay; protected int todelay; protected boolean count; public BasicDB() { todelay = 0; } protected void delay() {<FILL_FUNCTION_BODY>} /** * Initialize any state for this DB. * Called once per DB instance; there is one DB instance per client thread. */ public void init() { verbose = Boolean.parseBoolean(getProperties().getProperty(VERBOSE, VERBOSE_DEFAULT)); todelay = Integer.parseInt(getProperties().getProperty(SIMULATE_DELAY, SIMULATE_DELAY_DEFAULT)); randomizedelay = Boolean.parseBoolean(getProperties().getProperty(RANDOMIZE_DELAY, RANDOMIZE_DELAY_DEFAULT)); count = Boolean.parseBoolean(getProperties().getProperty(COUNT, COUNT_DEFAULT)); if (verbose) { synchronized (System.out) { System.out.println("***************** properties *****************"); Properties p = getProperties(); if (p != null) { for (Enumeration e = p.propertyNames(); e.hasMoreElements();) { String k = (String) e.nextElement(); System.out.println("\"" + k + "\"=\"" + p.getProperty(k) + "\""); } } System.out.println("**********************************************"); } } synchronized (MUTEX) { if (counter == 0 && count) { reads = new HashMap<Integer, Integer>(); scans = new HashMap<Integer, Integer>(); updates = new HashMap<Integer, Integer>(); inserts = new HashMap<Integer, Integer>(); deletes = new HashMap<Integer, Integer>(); } counter++; } } protected static final ThreadLocal<StringBuilder> TL_STRING_BUILDER = new ThreadLocal<StringBuilder>() { @Override protected StringBuilder initialValue() { return new StringBuilder(); } }; protected static StringBuilder getStringBuilder() { StringBuilder sb = TL_STRING_BUILDER.get(); sb.setLength(0); return sb; } /** * Read a record from the database. Each field/value pair from the result will be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("READ ").append(table).append(" ").append(key).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append("<all fields>"); } sb.append("]"); System.out.println(sb); } if (count) { incCounter(reads, hash(table, key, fields)); } return Status.OK; } /** * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored * in a HashMap. * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return Zero on success, a non-zero error code on error */ public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("SCAN ").append(table).append(" ").append(startkey).append(" ").append(recordcount).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append("<all fields>"); } sb.append("]"); System.out.println(sb); } if (count) { incCounter(scans, hash(table, startkey, fields)); } return Status.OK; } /** * Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key, overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ public Status update(String table, String key, Map<String, ByteIterator> values) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("UPDATE ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } if (count) { incCounter(updates, hash(table, key, values)); } return Status.OK; } /** * Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ public Status insert(String table, String key, Map<String, ByteIterator> values) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("INSERT ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } if (count) { incCounter(inserts, hash(table, key, values)); } return Status.OK; } /** * Delete a record from the database. * * @param table The name of the table * @param key The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ public Status delete(String table, String key) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("DELETE ").append(table).append(" ").append(key); System.out.println(sb); } if (count) { incCounter(deletes, (table + key).hashCode()); } return Status.OK; } @Override public void cleanup() { synchronized (MUTEX) { int countDown = --counter; if (count && countDown < 1) { // TODO - would be nice to call something like: // Measurements.getMeasurements().oneOffMeasurement("READS", "Uniques", reads.size()); System.out.println("[READS], Uniques, " + reads.size()); System.out.println("[SCANS], Uniques, " + scans.size()); System.out.println("[UPDATES], Uniques, " + updates.size()); System.out.println("[INSERTS], Uniques, " + inserts.size()); System.out.println("[DELETES], Uniques, " + deletes.size()); } } } /** * Increments the count on the hash in the map. * @param map A non-null map to sync and use for incrementing. * @param hash A hash code to increment. */ protected void incCounter(final Map<Integer, Integer> map, final int hash) { synchronized (map) { Integer ctr = map.get(hash); if (ctr == null) { map.put(hash, 1); } else { map.put(hash, ctr + 1); } } } /** * Hashes the table, key and fields, sorting the fields first for a consistent * hash. * Note that this is expensive as we generate a copy of the fields and a string * buffer to hash on. Hashing on the objects is problematic. * @param table The user table. * @param key The key read or scanned. * @param fields The fields read or scanned. * @return The hash code. */ protected int hash(final String table, final String key, final Set<String> fields) { if (fields == null) { return (table + key).hashCode(); } StringBuilder buf = getStringBuilder().append(table).append(key); List<String> sorted = new ArrayList<String>(fields); Collections.sort(sorted); for (final String field : sorted) { buf.append(field); } return buf.toString().hashCode(); } /** * Hashes the table, key and fields, sorting the fields first for a consistent * hash. * Note that this is expensive as we generate a copy of the fields and a string * buffer to hash on. Hashing on the objects is problematic. * @param table The user table. * @param key The key read or scanned. * @param values The values to hash on. * @return The hash code. */ protected int hash(final String table, final String key, final Map<String, ByteIterator> values) { if (values == null) { return (table + key).hashCode(); } final TreeMap<String, ByteIterator> sorted = new TreeMap<String, ByteIterator>(values); StringBuilder buf = getStringBuilder().append(table).append(key); for (final Entry<String, ByteIterator> entry : sorted.entrySet()) { entry.getValue().reset(); buf.append(entry.getKey()) .append(entry.getValue().toString()); } return buf.toString().hashCode(); } /** * Short test of BasicDB */ /* public static void main(String[] args) { BasicDB bdb = new BasicDB(); Properties p = new Properties(); p.setProperty("Sky", "Blue"); p.setProperty("Ocean", "Wet"); bdb.setProperties(p); bdb.init(); HashMap<String, String> fields = new HashMap<String, ByteIterator>(); fields.put("A", new StringByteIterator("X")); fields.put("B", new StringByteIterator("Y")); bdb.read("table", "key", null, null); bdb.insert("table", "key", fields); fields = new HashMap<String, ByteIterator>(); fields.put("C", new StringByteIterator("Z")); bdb.update("table", "key", fields); bdb.delete("table", "key"); } */ }
if (todelay > 0) { long delayNs; if (randomizedelay) { delayNs = TimeUnit.MILLISECONDS.toNanos(ThreadLocalRandom.current().nextInt(todelay)); if (delayNs == 0) { return; } } else { delayNs = TimeUnit.MILLISECONDS.toNanos(todelay); } final long deadline = System.nanoTime() + delayNs; do { LockSupport.parkNanos(deadline - System.nanoTime()); } while (System.nanoTime() < deadline && !Thread.interrupted()); }
brianfrankcooper_YCSB
YCSB/core/src/main/java/site/ycsb/BasicDB.java
BasicDB
init
class BasicDB extends DB { public static final String COUNT = "basicdb.count"; public static final String COUNT_DEFAULT = "false"; public static final String VERBOSE = "basicdb.verbose"; public static final String VERBOSE_DEFAULT = "true"; public static final String SIMULATE_DELAY = "basicdb.simulatedelay"; public static final String SIMULATE_DELAY_DEFAULT = "0"; public static final String RANDOMIZE_DELAY = "basicdb.randomizedelay"; public static final String RANDOMIZE_DELAY_DEFAULT = "true"; protected static final Object MUTEX = new Object(); protected static int counter = 0; protected static Map<Integer, Integer> reads; protected static Map<Integer, Integer> scans; protected static Map<Integer, Integer> updates; protected static Map<Integer, Integer> inserts; protected static Map<Integer, Integer> deletes; protected boolean verbose; protected boolean randomizedelay; protected int todelay; protected boolean count; public BasicDB() { todelay = 0; } protected void delay() { if (todelay > 0) { long delayNs; if (randomizedelay) { delayNs = TimeUnit.MILLISECONDS.toNanos(ThreadLocalRandom.current().nextInt(todelay)); if (delayNs == 0) { return; } } else { delayNs = TimeUnit.MILLISECONDS.toNanos(todelay); } final long deadline = System.nanoTime() + delayNs; do { LockSupport.parkNanos(deadline - System.nanoTime()); } while (System.nanoTime() < deadline && !Thread.interrupted()); } } /** * Initialize any state for this DB. * Called once per DB instance; there is one DB instance per client thread. */ public void init() {<FILL_FUNCTION_BODY>} protected static final ThreadLocal<StringBuilder> TL_STRING_BUILDER = new ThreadLocal<StringBuilder>() { @Override protected StringBuilder initialValue() { return new StringBuilder(); } }; protected static StringBuilder getStringBuilder() { StringBuilder sb = TL_STRING_BUILDER.get(); sb.setLength(0); return sb; } /** * Read a record from the database. Each field/value pair from the result will be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("READ ").append(table).append(" ").append(key).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append("<all fields>"); } sb.append("]"); System.out.println(sb); } if (count) { incCounter(reads, hash(table, key, fields)); } return Status.OK; } /** * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored * in a HashMap. * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return Zero on success, a non-zero error code on error */ public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("SCAN ").append(table).append(" ").append(startkey).append(" ").append(recordcount).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append("<all fields>"); } sb.append("]"); System.out.println(sb); } if (count) { incCounter(scans, hash(table, startkey, fields)); } return Status.OK; } /** * Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key, overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ public Status update(String table, String key, Map<String, ByteIterator> values) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("UPDATE ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } if (count) { incCounter(updates, hash(table, key, values)); } return Status.OK; } /** * Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ public Status insert(String table, String key, Map<String, ByteIterator> values) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("INSERT ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } if (count) { incCounter(inserts, hash(table, key, values)); } return Status.OK; } /** * Delete a record from the database. * * @param table The name of the table * @param key The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ public Status delete(String table, String key) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("DELETE ").append(table).append(" ").append(key); System.out.println(sb); } if (count) { incCounter(deletes, (table + key).hashCode()); } return Status.OK; } @Override public void cleanup() { synchronized (MUTEX) { int countDown = --counter; if (count && countDown < 1) { // TODO - would be nice to call something like: // Measurements.getMeasurements().oneOffMeasurement("READS", "Uniques", reads.size()); System.out.println("[READS], Uniques, " + reads.size()); System.out.println("[SCANS], Uniques, " + scans.size()); System.out.println("[UPDATES], Uniques, " + updates.size()); System.out.println("[INSERTS], Uniques, " + inserts.size()); System.out.println("[DELETES], Uniques, " + deletes.size()); } } } /** * Increments the count on the hash in the map. * @param map A non-null map to sync and use for incrementing. * @param hash A hash code to increment. */ protected void incCounter(final Map<Integer, Integer> map, final int hash) { synchronized (map) { Integer ctr = map.get(hash); if (ctr == null) { map.put(hash, 1); } else { map.put(hash, ctr + 1); } } } /** * Hashes the table, key and fields, sorting the fields first for a consistent * hash. * Note that this is expensive as we generate a copy of the fields and a string * buffer to hash on. Hashing on the objects is problematic. * @param table The user table. * @param key The key read or scanned. * @param fields The fields read or scanned. * @return The hash code. */ protected int hash(final String table, final String key, final Set<String> fields) { if (fields == null) { return (table + key).hashCode(); } StringBuilder buf = getStringBuilder().append(table).append(key); List<String> sorted = new ArrayList<String>(fields); Collections.sort(sorted); for (final String field : sorted) { buf.append(field); } return buf.toString().hashCode(); } /** * Hashes the table, key and fields, sorting the fields first for a consistent * hash. * Note that this is expensive as we generate a copy of the fields and a string * buffer to hash on. Hashing on the objects is problematic. * @param table The user table. * @param key The key read or scanned. * @param values The values to hash on. * @return The hash code. */ protected int hash(final String table, final String key, final Map<String, ByteIterator> values) { if (values == null) { return (table + key).hashCode(); } final TreeMap<String, ByteIterator> sorted = new TreeMap<String, ByteIterator>(values); StringBuilder buf = getStringBuilder().append(table).append(key); for (final Entry<String, ByteIterator> entry : sorted.entrySet()) { entry.getValue().reset(); buf.append(entry.getKey()) .append(entry.getValue().toString()); } return buf.toString().hashCode(); } /** * Short test of BasicDB */ /* public static void main(String[] args) { BasicDB bdb = new BasicDB(); Properties p = new Properties(); p.setProperty("Sky", "Blue"); p.setProperty("Ocean", "Wet"); bdb.setProperties(p); bdb.init(); HashMap<String, String> fields = new HashMap<String, ByteIterator>(); fields.put("A", new StringByteIterator("X")); fields.put("B", new StringByteIterator("Y")); bdb.read("table", "key", null, null); bdb.insert("table", "key", fields); fields = new HashMap<String, ByteIterator>(); fields.put("C", new StringByteIterator("Z")); bdb.update("table", "key", fields); bdb.delete("table", "key"); } */ }
verbose = Boolean.parseBoolean(getProperties().getProperty(VERBOSE, VERBOSE_DEFAULT)); todelay = Integer.parseInt(getProperties().getProperty(SIMULATE_DELAY, SIMULATE_DELAY_DEFAULT)); randomizedelay = Boolean.parseBoolean(getProperties().getProperty(RANDOMIZE_DELAY, RANDOMIZE_DELAY_DEFAULT)); count = Boolean.parseBoolean(getProperties().getProperty(COUNT, COUNT_DEFAULT)); if (verbose) { synchronized (System.out) { System.out.println("***************** properties *****************"); Properties p = getProperties(); if (p != null) { for (Enumeration e = p.propertyNames(); e.hasMoreElements();) { String k = (String) e.nextElement(); System.out.println("\"" + k + "\"=\"" + p.getProperty(k) + "\""); } } System.out.println("**********************************************"); } } synchronized (MUTEX) { if (counter == 0 && count) { reads = new HashMap<Integer, Integer>(); scans = new HashMap<Integer, Integer>(); updates = new HashMap<Integer, Integer>(); inserts = new HashMap<Integer, Integer>(); deletes = new HashMap<Integer, Integer>(); } counter++; }
brianfrankcooper_YCSB
YCSB/core/src/main/java/site/ycsb/BasicDB.java
BasicDB
initialValue
class BasicDB extends DB { public static final String COUNT = "basicdb.count"; public static final String COUNT_DEFAULT = "false"; public static final String VERBOSE = "basicdb.verbose"; public static final String VERBOSE_DEFAULT = "true"; public static final String SIMULATE_DELAY = "basicdb.simulatedelay"; public static final String SIMULATE_DELAY_DEFAULT = "0"; public static final String RANDOMIZE_DELAY = "basicdb.randomizedelay"; public static final String RANDOMIZE_DELAY_DEFAULT = "true"; protected static final Object MUTEX = new Object(); protected static int counter = 0; protected static Map<Integer, Integer> reads; protected static Map<Integer, Integer> scans; protected static Map<Integer, Integer> updates; protected static Map<Integer, Integer> inserts; protected static Map<Integer, Integer> deletes; protected boolean verbose; protected boolean randomizedelay; protected int todelay; protected boolean count; public BasicDB() { todelay = 0; } protected void delay() { if (todelay > 0) { long delayNs; if (randomizedelay) { delayNs = TimeUnit.MILLISECONDS.toNanos(ThreadLocalRandom.current().nextInt(todelay)); if (delayNs == 0) { return; } } else { delayNs = TimeUnit.MILLISECONDS.toNanos(todelay); } final long deadline = System.nanoTime() + delayNs; do { LockSupport.parkNanos(deadline - System.nanoTime()); } while (System.nanoTime() < deadline && !Thread.interrupted()); } } /** * Initialize any state for this DB. * Called once per DB instance; there is one DB instance per client thread. */ public void init() { verbose = Boolean.parseBoolean(getProperties().getProperty(VERBOSE, VERBOSE_DEFAULT)); todelay = Integer.parseInt(getProperties().getProperty(SIMULATE_DELAY, SIMULATE_DELAY_DEFAULT)); randomizedelay = Boolean.parseBoolean(getProperties().getProperty(RANDOMIZE_DELAY, RANDOMIZE_DELAY_DEFAULT)); count = Boolean.parseBoolean(getProperties().getProperty(COUNT, COUNT_DEFAULT)); if (verbose) { synchronized (System.out) { System.out.println("***************** properties *****************"); Properties p = getProperties(); if (p != null) { for (Enumeration e = p.propertyNames(); e.hasMoreElements();) { String k = (String) e.nextElement(); System.out.println("\"" + k + "\"=\"" + p.getProperty(k) + "\""); } } System.out.println("**********************************************"); } } synchronized (MUTEX) { if (counter == 0 && count) { reads = new HashMap<Integer, Integer>(); scans = new HashMap<Integer, Integer>(); updates = new HashMap<Integer, Integer>(); inserts = new HashMap<Integer, Integer>(); deletes = new HashMap<Integer, Integer>(); } counter++; } } protected static final ThreadLocal<StringBuilder> TL_STRING_BUILDER = new ThreadLocal<StringBuilder>() { @Override protected StringBuilder initialValue() {<FILL_FUNCTION_BODY>} }; protected static StringBuilder getStringBuilder() { StringBuilder sb = TL_STRING_BUILDER.get(); sb.setLength(0); return sb; } /** * Read a record from the database. Each field/value pair from the result will be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("READ ").append(table).append(" ").append(key).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append("<all fields>"); } sb.append("]"); System.out.println(sb); } if (count) { incCounter(reads, hash(table, key, fields)); } return Status.OK; } /** * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored * in a HashMap. * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return Zero on success, a non-zero error code on error */ public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("SCAN ").append(table).append(" ").append(startkey).append(" ").append(recordcount).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append("<all fields>"); } sb.append("]"); System.out.println(sb); } if (count) { incCounter(scans, hash(table, startkey, fields)); } return Status.OK; } /** * Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key, overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ public Status update(String table, String key, Map<String, ByteIterator> values) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("UPDATE ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } if (count) { incCounter(updates, hash(table, key, values)); } return Status.OK; } /** * Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ public Status insert(String table, String key, Map<String, ByteIterator> values) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("INSERT ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } if (count) { incCounter(inserts, hash(table, key, values)); } return Status.OK; } /** * Delete a record from the database. * * @param table The name of the table * @param key The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ public Status delete(String table, String key) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("DELETE ").append(table).append(" ").append(key); System.out.println(sb); } if (count) { incCounter(deletes, (table + key).hashCode()); } return Status.OK; } @Override public void cleanup() { synchronized (MUTEX) { int countDown = --counter; if (count && countDown < 1) { // TODO - would be nice to call something like: // Measurements.getMeasurements().oneOffMeasurement("READS", "Uniques", reads.size()); System.out.println("[READS], Uniques, " + reads.size()); System.out.println("[SCANS], Uniques, " + scans.size()); System.out.println("[UPDATES], Uniques, " + updates.size()); System.out.println("[INSERTS], Uniques, " + inserts.size()); System.out.println("[DELETES], Uniques, " + deletes.size()); } } } /** * Increments the count on the hash in the map. * @param map A non-null map to sync and use for incrementing. * @param hash A hash code to increment. */ protected void incCounter(final Map<Integer, Integer> map, final int hash) { synchronized (map) { Integer ctr = map.get(hash); if (ctr == null) { map.put(hash, 1); } else { map.put(hash, ctr + 1); } } } /** * Hashes the table, key and fields, sorting the fields first for a consistent * hash. * Note that this is expensive as we generate a copy of the fields and a string * buffer to hash on. Hashing on the objects is problematic. * @param table The user table. * @param key The key read or scanned. * @param fields The fields read or scanned. * @return The hash code. */ protected int hash(final String table, final String key, final Set<String> fields) { if (fields == null) { return (table + key).hashCode(); } StringBuilder buf = getStringBuilder().append(table).append(key); List<String> sorted = new ArrayList<String>(fields); Collections.sort(sorted); for (final String field : sorted) { buf.append(field); } return buf.toString().hashCode(); } /** * Hashes the table, key and fields, sorting the fields first for a consistent * hash. * Note that this is expensive as we generate a copy of the fields and a string * buffer to hash on. Hashing on the objects is problematic. * @param table The user table. * @param key The key read or scanned. * @param values The values to hash on. * @return The hash code. */ protected int hash(final String table, final String key, final Map<String, ByteIterator> values) { if (values == null) { return (table + key).hashCode(); } final TreeMap<String, ByteIterator> sorted = new TreeMap<String, ByteIterator>(values); StringBuilder buf = getStringBuilder().append(table).append(key); for (final Entry<String, ByteIterator> entry : sorted.entrySet()) { entry.getValue().reset(); buf.append(entry.getKey()) .append(entry.getValue().toString()); } return buf.toString().hashCode(); } /** * Short test of BasicDB */ /* public static void main(String[] args) { BasicDB bdb = new BasicDB(); Properties p = new Properties(); p.setProperty("Sky", "Blue"); p.setProperty("Ocean", "Wet"); bdb.setProperties(p); bdb.init(); HashMap<String, String> fields = new HashMap<String, ByteIterator>(); fields.put("A", new StringByteIterator("X")); fields.put("B", new StringByteIterator("Y")); bdb.read("table", "key", null, null); bdb.insert("table", "key", fields); fields = new HashMap<String, ByteIterator>(); fields.put("C", new StringByteIterator("Z")); bdb.update("table", "key", fields); bdb.delete("table", "key"); } */ }
return new StringBuilder();
brianfrankcooper_YCSB
YCSB/core/src/main/java/site/ycsb/BasicDB.java
BasicDB
getStringBuilder
class BasicDB extends DB { public static final String COUNT = "basicdb.count"; public static final String COUNT_DEFAULT = "false"; public static final String VERBOSE = "basicdb.verbose"; public static final String VERBOSE_DEFAULT = "true"; public static final String SIMULATE_DELAY = "basicdb.simulatedelay"; public static final String SIMULATE_DELAY_DEFAULT = "0"; public static final String RANDOMIZE_DELAY = "basicdb.randomizedelay"; public static final String RANDOMIZE_DELAY_DEFAULT = "true"; protected static final Object MUTEX = new Object(); protected static int counter = 0; protected static Map<Integer, Integer> reads; protected static Map<Integer, Integer> scans; protected static Map<Integer, Integer> updates; protected static Map<Integer, Integer> inserts; protected static Map<Integer, Integer> deletes; protected boolean verbose; protected boolean randomizedelay; protected int todelay; protected boolean count; public BasicDB() { todelay = 0; } protected void delay() { if (todelay > 0) { long delayNs; if (randomizedelay) { delayNs = TimeUnit.MILLISECONDS.toNanos(ThreadLocalRandom.current().nextInt(todelay)); if (delayNs == 0) { return; } } else { delayNs = TimeUnit.MILLISECONDS.toNanos(todelay); } final long deadline = System.nanoTime() + delayNs; do { LockSupport.parkNanos(deadline - System.nanoTime()); } while (System.nanoTime() < deadline && !Thread.interrupted()); } } /** * Initialize any state for this DB. * Called once per DB instance; there is one DB instance per client thread. */ public void init() { verbose = Boolean.parseBoolean(getProperties().getProperty(VERBOSE, VERBOSE_DEFAULT)); todelay = Integer.parseInt(getProperties().getProperty(SIMULATE_DELAY, SIMULATE_DELAY_DEFAULT)); randomizedelay = Boolean.parseBoolean(getProperties().getProperty(RANDOMIZE_DELAY, RANDOMIZE_DELAY_DEFAULT)); count = Boolean.parseBoolean(getProperties().getProperty(COUNT, COUNT_DEFAULT)); if (verbose) { synchronized (System.out) { System.out.println("***************** properties *****************"); Properties p = getProperties(); if (p != null) { for (Enumeration e = p.propertyNames(); e.hasMoreElements();) { String k = (String) e.nextElement(); System.out.println("\"" + k + "\"=\"" + p.getProperty(k) + "\""); } } System.out.println("**********************************************"); } } synchronized (MUTEX) { if (counter == 0 && count) { reads = new HashMap<Integer, Integer>(); scans = new HashMap<Integer, Integer>(); updates = new HashMap<Integer, Integer>(); inserts = new HashMap<Integer, Integer>(); deletes = new HashMap<Integer, Integer>(); } counter++; } } protected static final ThreadLocal<StringBuilder> TL_STRING_BUILDER = new ThreadLocal<StringBuilder>() { @Override protected StringBuilder initialValue() { return new StringBuilder(); } }; protected static StringBuilder getStringBuilder() {<FILL_FUNCTION_BODY>} /** * Read a record from the database. Each field/value pair from the result will be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("READ ").append(table).append(" ").append(key).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append("<all fields>"); } sb.append("]"); System.out.println(sb); } if (count) { incCounter(reads, hash(table, key, fields)); } return Status.OK; } /** * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored * in a HashMap. * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return Zero on success, a non-zero error code on error */ public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("SCAN ").append(table).append(" ").append(startkey).append(" ").append(recordcount).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append("<all fields>"); } sb.append("]"); System.out.println(sb); } if (count) { incCounter(scans, hash(table, startkey, fields)); } return Status.OK; } /** * Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key, overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ public Status update(String table, String key, Map<String, ByteIterator> values) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("UPDATE ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } if (count) { incCounter(updates, hash(table, key, values)); } return Status.OK; } /** * Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ public Status insert(String table, String key, Map<String, ByteIterator> values) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("INSERT ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } if (count) { incCounter(inserts, hash(table, key, values)); } return Status.OK; } /** * Delete a record from the database. * * @param table The name of the table * @param key The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ public Status delete(String table, String key) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("DELETE ").append(table).append(" ").append(key); System.out.println(sb); } if (count) { incCounter(deletes, (table + key).hashCode()); } return Status.OK; } @Override public void cleanup() { synchronized (MUTEX) { int countDown = --counter; if (count && countDown < 1) { // TODO - would be nice to call something like: // Measurements.getMeasurements().oneOffMeasurement("READS", "Uniques", reads.size()); System.out.println("[READS], Uniques, " + reads.size()); System.out.println("[SCANS], Uniques, " + scans.size()); System.out.println("[UPDATES], Uniques, " + updates.size()); System.out.println("[INSERTS], Uniques, " + inserts.size()); System.out.println("[DELETES], Uniques, " + deletes.size()); } } } /** * Increments the count on the hash in the map. * @param map A non-null map to sync and use for incrementing. * @param hash A hash code to increment. */ protected void incCounter(final Map<Integer, Integer> map, final int hash) { synchronized (map) { Integer ctr = map.get(hash); if (ctr == null) { map.put(hash, 1); } else { map.put(hash, ctr + 1); } } } /** * Hashes the table, key and fields, sorting the fields first for a consistent * hash. * Note that this is expensive as we generate a copy of the fields and a string * buffer to hash on. Hashing on the objects is problematic. * @param table The user table. * @param key The key read or scanned. * @param fields The fields read or scanned. * @return The hash code. */ protected int hash(final String table, final String key, final Set<String> fields) { if (fields == null) { return (table + key).hashCode(); } StringBuilder buf = getStringBuilder().append(table).append(key); List<String> sorted = new ArrayList<String>(fields); Collections.sort(sorted); for (final String field : sorted) { buf.append(field); } return buf.toString().hashCode(); } /** * Hashes the table, key and fields, sorting the fields first for a consistent * hash. * Note that this is expensive as we generate a copy of the fields and a string * buffer to hash on. Hashing on the objects is problematic. * @param table The user table. * @param key The key read or scanned. * @param values The values to hash on. * @return The hash code. */ protected int hash(final String table, final String key, final Map<String, ByteIterator> values) { if (values == null) { return (table + key).hashCode(); } final TreeMap<String, ByteIterator> sorted = new TreeMap<String, ByteIterator>(values); StringBuilder buf = getStringBuilder().append(table).append(key); for (final Entry<String, ByteIterator> entry : sorted.entrySet()) { entry.getValue().reset(); buf.append(entry.getKey()) .append(entry.getValue().toString()); } return buf.toString().hashCode(); } /** * Short test of BasicDB */ /* public static void main(String[] args) { BasicDB bdb = new BasicDB(); Properties p = new Properties(); p.setProperty("Sky", "Blue"); p.setProperty("Ocean", "Wet"); bdb.setProperties(p); bdb.init(); HashMap<String, String> fields = new HashMap<String, ByteIterator>(); fields.put("A", new StringByteIterator("X")); fields.put("B", new StringByteIterator("Y")); bdb.read("table", "key", null, null); bdb.insert("table", "key", fields); fields = new HashMap<String, ByteIterator>(); fields.put("C", new StringByteIterator("Z")); bdb.update("table", "key", fields); bdb.delete("table", "key"); } */ }
StringBuilder sb = TL_STRING_BUILDER.get(); sb.setLength(0); return sb;
brianfrankcooper_YCSB
YCSB/core/src/main/java/site/ycsb/BasicDB.java
BasicDB
read
class BasicDB extends DB { public static final String COUNT = "basicdb.count"; public static final String COUNT_DEFAULT = "false"; public static final String VERBOSE = "basicdb.verbose"; public static final String VERBOSE_DEFAULT = "true"; public static final String SIMULATE_DELAY = "basicdb.simulatedelay"; public static final String SIMULATE_DELAY_DEFAULT = "0"; public static final String RANDOMIZE_DELAY = "basicdb.randomizedelay"; public static final String RANDOMIZE_DELAY_DEFAULT = "true"; protected static final Object MUTEX = new Object(); protected static int counter = 0; protected static Map<Integer, Integer> reads; protected static Map<Integer, Integer> scans; protected static Map<Integer, Integer> updates; protected static Map<Integer, Integer> inserts; protected static Map<Integer, Integer> deletes; protected boolean verbose; protected boolean randomizedelay; protected int todelay; protected boolean count; public BasicDB() { todelay = 0; } protected void delay() { if (todelay > 0) { long delayNs; if (randomizedelay) { delayNs = TimeUnit.MILLISECONDS.toNanos(ThreadLocalRandom.current().nextInt(todelay)); if (delayNs == 0) { return; } } else { delayNs = TimeUnit.MILLISECONDS.toNanos(todelay); } final long deadline = System.nanoTime() + delayNs; do { LockSupport.parkNanos(deadline - System.nanoTime()); } while (System.nanoTime() < deadline && !Thread.interrupted()); } } /** * Initialize any state for this DB. * Called once per DB instance; there is one DB instance per client thread. */ public void init() { verbose = Boolean.parseBoolean(getProperties().getProperty(VERBOSE, VERBOSE_DEFAULT)); todelay = Integer.parseInt(getProperties().getProperty(SIMULATE_DELAY, SIMULATE_DELAY_DEFAULT)); randomizedelay = Boolean.parseBoolean(getProperties().getProperty(RANDOMIZE_DELAY, RANDOMIZE_DELAY_DEFAULT)); count = Boolean.parseBoolean(getProperties().getProperty(COUNT, COUNT_DEFAULT)); if (verbose) { synchronized (System.out) { System.out.println("***************** properties *****************"); Properties p = getProperties(); if (p != null) { for (Enumeration e = p.propertyNames(); e.hasMoreElements();) { String k = (String) e.nextElement(); System.out.println("\"" + k + "\"=\"" + p.getProperty(k) + "\""); } } System.out.println("**********************************************"); } } synchronized (MUTEX) { if (counter == 0 && count) { reads = new HashMap<Integer, Integer>(); scans = new HashMap<Integer, Integer>(); updates = new HashMap<Integer, Integer>(); inserts = new HashMap<Integer, Integer>(); deletes = new HashMap<Integer, Integer>(); } counter++; } } protected static final ThreadLocal<StringBuilder> TL_STRING_BUILDER = new ThreadLocal<StringBuilder>() { @Override protected StringBuilder initialValue() { return new StringBuilder(); } }; protected static StringBuilder getStringBuilder() { StringBuilder sb = TL_STRING_BUILDER.get(); sb.setLength(0); return sb; } /** * Read a record from the database. Each field/value pair from the result will be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) {<FILL_FUNCTION_BODY>} /** * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored * in a HashMap. * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return Zero on success, a non-zero error code on error */ public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("SCAN ").append(table).append(" ").append(startkey).append(" ").append(recordcount).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append("<all fields>"); } sb.append("]"); System.out.println(sb); } if (count) { incCounter(scans, hash(table, startkey, fields)); } return Status.OK; } /** * Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key, overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ public Status update(String table, String key, Map<String, ByteIterator> values) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("UPDATE ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } if (count) { incCounter(updates, hash(table, key, values)); } return Status.OK; } /** * Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ public Status insert(String table, String key, Map<String, ByteIterator> values) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("INSERT ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } if (count) { incCounter(inserts, hash(table, key, values)); } return Status.OK; } /** * Delete a record from the database. * * @param table The name of the table * @param key The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ public Status delete(String table, String key) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("DELETE ").append(table).append(" ").append(key); System.out.println(sb); } if (count) { incCounter(deletes, (table + key).hashCode()); } return Status.OK; } @Override public void cleanup() { synchronized (MUTEX) { int countDown = --counter; if (count && countDown < 1) { // TODO - would be nice to call something like: // Measurements.getMeasurements().oneOffMeasurement("READS", "Uniques", reads.size()); System.out.println("[READS], Uniques, " + reads.size()); System.out.println("[SCANS], Uniques, " + scans.size()); System.out.println("[UPDATES], Uniques, " + updates.size()); System.out.println("[INSERTS], Uniques, " + inserts.size()); System.out.println("[DELETES], Uniques, " + deletes.size()); } } } /** * Increments the count on the hash in the map. * @param map A non-null map to sync and use for incrementing. * @param hash A hash code to increment. */ protected void incCounter(final Map<Integer, Integer> map, final int hash) { synchronized (map) { Integer ctr = map.get(hash); if (ctr == null) { map.put(hash, 1); } else { map.put(hash, ctr + 1); } } } /** * Hashes the table, key and fields, sorting the fields first for a consistent * hash. * Note that this is expensive as we generate a copy of the fields and a string * buffer to hash on. Hashing on the objects is problematic. * @param table The user table. * @param key The key read or scanned. * @param fields The fields read or scanned. * @return The hash code. */ protected int hash(final String table, final String key, final Set<String> fields) { if (fields == null) { return (table + key).hashCode(); } StringBuilder buf = getStringBuilder().append(table).append(key); List<String> sorted = new ArrayList<String>(fields); Collections.sort(sorted); for (final String field : sorted) { buf.append(field); } return buf.toString().hashCode(); } /** * Hashes the table, key and fields, sorting the fields first for a consistent * hash. * Note that this is expensive as we generate a copy of the fields and a string * buffer to hash on. Hashing on the objects is problematic. * @param table The user table. * @param key The key read or scanned. * @param values The values to hash on. * @return The hash code. */ protected int hash(final String table, final String key, final Map<String, ByteIterator> values) { if (values == null) { return (table + key).hashCode(); } final TreeMap<String, ByteIterator> sorted = new TreeMap<String, ByteIterator>(values); StringBuilder buf = getStringBuilder().append(table).append(key); for (final Entry<String, ByteIterator> entry : sorted.entrySet()) { entry.getValue().reset(); buf.append(entry.getKey()) .append(entry.getValue().toString()); } return buf.toString().hashCode(); } /** * Short test of BasicDB */ /* public static void main(String[] args) { BasicDB bdb = new BasicDB(); Properties p = new Properties(); p.setProperty("Sky", "Blue"); p.setProperty("Ocean", "Wet"); bdb.setProperties(p); bdb.init(); HashMap<String, String> fields = new HashMap<String, ByteIterator>(); fields.put("A", new StringByteIterator("X")); fields.put("B", new StringByteIterator("Y")); bdb.read("table", "key", null, null); bdb.insert("table", "key", fields); fields = new HashMap<String, ByteIterator>(); fields.put("C", new StringByteIterator("Z")); bdb.update("table", "key", fields); bdb.delete("table", "key"); } */ }
delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("READ ").append(table).append(" ").append(key).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append("<all fields>"); } sb.append("]"); System.out.println(sb); } if (count) { incCounter(reads, hash(table, key, fields)); } return Status.OK;
brianfrankcooper_YCSB
YCSB/core/src/main/java/site/ycsb/BasicDB.java
BasicDB
scan
class BasicDB extends DB { public static final String COUNT = "basicdb.count"; public static final String COUNT_DEFAULT = "false"; public static final String VERBOSE = "basicdb.verbose"; public static final String VERBOSE_DEFAULT = "true"; public static final String SIMULATE_DELAY = "basicdb.simulatedelay"; public static final String SIMULATE_DELAY_DEFAULT = "0"; public static final String RANDOMIZE_DELAY = "basicdb.randomizedelay"; public static final String RANDOMIZE_DELAY_DEFAULT = "true"; protected static final Object MUTEX = new Object(); protected static int counter = 0; protected static Map<Integer, Integer> reads; protected static Map<Integer, Integer> scans; protected static Map<Integer, Integer> updates; protected static Map<Integer, Integer> inserts; protected static Map<Integer, Integer> deletes; protected boolean verbose; protected boolean randomizedelay; protected int todelay; protected boolean count; public BasicDB() { todelay = 0; } protected void delay() { if (todelay > 0) { long delayNs; if (randomizedelay) { delayNs = TimeUnit.MILLISECONDS.toNanos(ThreadLocalRandom.current().nextInt(todelay)); if (delayNs == 0) { return; } } else { delayNs = TimeUnit.MILLISECONDS.toNanos(todelay); } final long deadline = System.nanoTime() + delayNs; do { LockSupport.parkNanos(deadline - System.nanoTime()); } while (System.nanoTime() < deadline && !Thread.interrupted()); } } /** * Initialize any state for this DB. * Called once per DB instance; there is one DB instance per client thread. */ public void init() { verbose = Boolean.parseBoolean(getProperties().getProperty(VERBOSE, VERBOSE_DEFAULT)); todelay = Integer.parseInt(getProperties().getProperty(SIMULATE_DELAY, SIMULATE_DELAY_DEFAULT)); randomizedelay = Boolean.parseBoolean(getProperties().getProperty(RANDOMIZE_DELAY, RANDOMIZE_DELAY_DEFAULT)); count = Boolean.parseBoolean(getProperties().getProperty(COUNT, COUNT_DEFAULT)); if (verbose) { synchronized (System.out) { System.out.println("***************** properties *****************"); Properties p = getProperties(); if (p != null) { for (Enumeration e = p.propertyNames(); e.hasMoreElements();) { String k = (String) e.nextElement(); System.out.println("\"" + k + "\"=\"" + p.getProperty(k) + "\""); } } System.out.println("**********************************************"); } } synchronized (MUTEX) { if (counter == 0 && count) { reads = new HashMap<Integer, Integer>(); scans = new HashMap<Integer, Integer>(); updates = new HashMap<Integer, Integer>(); inserts = new HashMap<Integer, Integer>(); deletes = new HashMap<Integer, Integer>(); } counter++; } } protected static final ThreadLocal<StringBuilder> TL_STRING_BUILDER = new ThreadLocal<StringBuilder>() { @Override protected StringBuilder initialValue() { return new StringBuilder(); } }; protected static StringBuilder getStringBuilder() { StringBuilder sb = TL_STRING_BUILDER.get(); sb.setLength(0); return sb; } /** * Read a record from the database. Each field/value pair from the result will be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("READ ").append(table).append(" ").append(key).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append("<all fields>"); } sb.append("]"); System.out.println(sb); } if (count) { incCounter(reads, hash(table, key, fields)); } return Status.OK; } /** * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored * in a HashMap. * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return Zero on success, a non-zero error code on error */ public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) {<FILL_FUNCTION_BODY>} /** * Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key, overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ public Status update(String table, String key, Map<String, ByteIterator> values) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("UPDATE ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } if (count) { incCounter(updates, hash(table, key, values)); } return Status.OK; } /** * Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ public Status insert(String table, String key, Map<String, ByteIterator> values) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("INSERT ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } if (count) { incCounter(inserts, hash(table, key, values)); } return Status.OK; } /** * Delete a record from the database. * * @param table The name of the table * @param key The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ public Status delete(String table, String key) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("DELETE ").append(table).append(" ").append(key); System.out.println(sb); } if (count) { incCounter(deletes, (table + key).hashCode()); } return Status.OK; } @Override public void cleanup() { synchronized (MUTEX) { int countDown = --counter; if (count && countDown < 1) { // TODO - would be nice to call something like: // Measurements.getMeasurements().oneOffMeasurement("READS", "Uniques", reads.size()); System.out.println("[READS], Uniques, " + reads.size()); System.out.println("[SCANS], Uniques, " + scans.size()); System.out.println("[UPDATES], Uniques, " + updates.size()); System.out.println("[INSERTS], Uniques, " + inserts.size()); System.out.println("[DELETES], Uniques, " + deletes.size()); } } } /** * Increments the count on the hash in the map. * @param map A non-null map to sync and use for incrementing. * @param hash A hash code to increment. */ protected void incCounter(final Map<Integer, Integer> map, final int hash) { synchronized (map) { Integer ctr = map.get(hash); if (ctr == null) { map.put(hash, 1); } else { map.put(hash, ctr + 1); } } } /** * Hashes the table, key and fields, sorting the fields first for a consistent * hash. * Note that this is expensive as we generate a copy of the fields and a string * buffer to hash on. Hashing on the objects is problematic. * @param table The user table. * @param key The key read or scanned. * @param fields The fields read or scanned. * @return The hash code. */ protected int hash(final String table, final String key, final Set<String> fields) { if (fields == null) { return (table + key).hashCode(); } StringBuilder buf = getStringBuilder().append(table).append(key); List<String> sorted = new ArrayList<String>(fields); Collections.sort(sorted); for (final String field : sorted) { buf.append(field); } return buf.toString().hashCode(); } /** * Hashes the table, key and fields, sorting the fields first for a consistent * hash. * Note that this is expensive as we generate a copy of the fields and a string * buffer to hash on. Hashing on the objects is problematic. * @param table The user table. * @param key The key read or scanned. * @param values The values to hash on. * @return The hash code. */ protected int hash(final String table, final String key, final Map<String, ByteIterator> values) { if (values == null) { return (table + key).hashCode(); } final TreeMap<String, ByteIterator> sorted = new TreeMap<String, ByteIterator>(values); StringBuilder buf = getStringBuilder().append(table).append(key); for (final Entry<String, ByteIterator> entry : sorted.entrySet()) { entry.getValue().reset(); buf.append(entry.getKey()) .append(entry.getValue().toString()); } return buf.toString().hashCode(); } /** * Short test of BasicDB */ /* public static void main(String[] args) { BasicDB bdb = new BasicDB(); Properties p = new Properties(); p.setProperty("Sky", "Blue"); p.setProperty("Ocean", "Wet"); bdb.setProperties(p); bdb.init(); HashMap<String, String> fields = new HashMap<String, ByteIterator>(); fields.put("A", new StringByteIterator("X")); fields.put("B", new StringByteIterator("Y")); bdb.read("table", "key", null, null); bdb.insert("table", "key", fields); fields = new HashMap<String, ByteIterator>(); fields.put("C", new StringByteIterator("Z")); bdb.update("table", "key", fields); bdb.delete("table", "key"); } */ }
delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("SCAN ").append(table).append(" ").append(startkey).append(" ").append(recordcount).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append("<all fields>"); } sb.append("]"); System.out.println(sb); } if (count) { incCounter(scans, hash(table, startkey, fields)); } return Status.OK;
brianfrankcooper_YCSB
YCSB/core/src/main/java/site/ycsb/BasicDB.java
BasicDB
update
class BasicDB extends DB { public static final String COUNT = "basicdb.count"; public static final String COUNT_DEFAULT = "false"; public static final String VERBOSE = "basicdb.verbose"; public static final String VERBOSE_DEFAULT = "true"; public static final String SIMULATE_DELAY = "basicdb.simulatedelay"; public static final String SIMULATE_DELAY_DEFAULT = "0"; public static final String RANDOMIZE_DELAY = "basicdb.randomizedelay"; public static final String RANDOMIZE_DELAY_DEFAULT = "true"; protected static final Object MUTEX = new Object(); protected static int counter = 0; protected static Map<Integer, Integer> reads; protected static Map<Integer, Integer> scans; protected static Map<Integer, Integer> updates; protected static Map<Integer, Integer> inserts; protected static Map<Integer, Integer> deletes; protected boolean verbose; protected boolean randomizedelay; protected int todelay; protected boolean count; public BasicDB() { todelay = 0; } protected void delay() { if (todelay > 0) { long delayNs; if (randomizedelay) { delayNs = TimeUnit.MILLISECONDS.toNanos(ThreadLocalRandom.current().nextInt(todelay)); if (delayNs == 0) { return; } } else { delayNs = TimeUnit.MILLISECONDS.toNanos(todelay); } final long deadline = System.nanoTime() + delayNs; do { LockSupport.parkNanos(deadline - System.nanoTime()); } while (System.nanoTime() < deadline && !Thread.interrupted()); } } /** * Initialize any state for this DB. * Called once per DB instance; there is one DB instance per client thread. */ public void init() { verbose = Boolean.parseBoolean(getProperties().getProperty(VERBOSE, VERBOSE_DEFAULT)); todelay = Integer.parseInt(getProperties().getProperty(SIMULATE_DELAY, SIMULATE_DELAY_DEFAULT)); randomizedelay = Boolean.parseBoolean(getProperties().getProperty(RANDOMIZE_DELAY, RANDOMIZE_DELAY_DEFAULT)); count = Boolean.parseBoolean(getProperties().getProperty(COUNT, COUNT_DEFAULT)); if (verbose) { synchronized (System.out) { System.out.println("***************** properties *****************"); Properties p = getProperties(); if (p != null) { for (Enumeration e = p.propertyNames(); e.hasMoreElements();) { String k = (String) e.nextElement(); System.out.println("\"" + k + "\"=\"" + p.getProperty(k) + "\""); } } System.out.println("**********************************************"); } } synchronized (MUTEX) { if (counter == 0 && count) { reads = new HashMap<Integer, Integer>(); scans = new HashMap<Integer, Integer>(); updates = new HashMap<Integer, Integer>(); inserts = new HashMap<Integer, Integer>(); deletes = new HashMap<Integer, Integer>(); } counter++; } } protected static final ThreadLocal<StringBuilder> TL_STRING_BUILDER = new ThreadLocal<StringBuilder>() { @Override protected StringBuilder initialValue() { return new StringBuilder(); } }; protected static StringBuilder getStringBuilder() { StringBuilder sb = TL_STRING_BUILDER.get(); sb.setLength(0); return sb; } /** * Read a record from the database. Each field/value pair from the result will be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("READ ").append(table).append(" ").append(key).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append("<all fields>"); } sb.append("]"); System.out.println(sb); } if (count) { incCounter(reads, hash(table, key, fields)); } return Status.OK; } /** * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored * in a HashMap. * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return Zero on success, a non-zero error code on error */ public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("SCAN ").append(table).append(" ").append(startkey).append(" ").append(recordcount).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append("<all fields>"); } sb.append("]"); System.out.println(sb); } if (count) { incCounter(scans, hash(table, startkey, fields)); } return Status.OK; } /** * Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key, overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ public Status update(String table, String key, Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>} /** * Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ public Status insert(String table, String key, Map<String, ByteIterator> values) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("INSERT ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } if (count) { incCounter(inserts, hash(table, key, values)); } return Status.OK; } /** * Delete a record from the database. * * @param table The name of the table * @param key The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ public Status delete(String table, String key) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("DELETE ").append(table).append(" ").append(key); System.out.println(sb); } if (count) { incCounter(deletes, (table + key).hashCode()); } return Status.OK; } @Override public void cleanup() { synchronized (MUTEX) { int countDown = --counter; if (count && countDown < 1) { // TODO - would be nice to call something like: // Measurements.getMeasurements().oneOffMeasurement("READS", "Uniques", reads.size()); System.out.println("[READS], Uniques, " + reads.size()); System.out.println("[SCANS], Uniques, " + scans.size()); System.out.println("[UPDATES], Uniques, " + updates.size()); System.out.println("[INSERTS], Uniques, " + inserts.size()); System.out.println("[DELETES], Uniques, " + deletes.size()); } } } /** * Increments the count on the hash in the map. * @param map A non-null map to sync and use for incrementing. * @param hash A hash code to increment. */ protected void incCounter(final Map<Integer, Integer> map, final int hash) { synchronized (map) { Integer ctr = map.get(hash); if (ctr == null) { map.put(hash, 1); } else { map.put(hash, ctr + 1); } } } /** * Hashes the table, key and fields, sorting the fields first for a consistent * hash. * Note that this is expensive as we generate a copy of the fields and a string * buffer to hash on. Hashing on the objects is problematic. * @param table The user table. * @param key The key read or scanned. * @param fields The fields read or scanned. * @return The hash code. */ protected int hash(final String table, final String key, final Set<String> fields) { if (fields == null) { return (table + key).hashCode(); } StringBuilder buf = getStringBuilder().append(table).append(key); List<String> sorted = new ArrayList<String>(fields); Collections.sort(sorted); for (final String field : sorted) { buf.append(field); } return buf.toString().hashCode(); } /** * Hashes the table, key and fields, sorting the fields first for a consistent * hash. * Note that this is expensive as we generate a copy of the fields and a string * buffer to hash on. Hashing on the objects is problematic. * @param table The user table. * @param key The key read or scanned. * @param values The values to hash on. * @return The hash code. */ protected int hash(final String table, final String key, final Map<String, ByteIterator> values) { if (values == null) { return (table + key).hashCode(); } final TreeMap<String, ByteIterator> sorted = new TreeMap<String, ByteIterator>(values); StringBuilder buf = getStringBuilder().append(table).append(key); for (final Entry<String, ByteIterator> entry : sorted.entrySet()) { entry.getValue().reset(); buf.append(entry.getKey()) .append(entry.getValue().toString()); } return buf.toString().hashCode(); } /** * Short test of BasicDB */ /* public static void main(String[] args) { BasicDB bdb = new BasicDB(); Properties p = new Properties(); p.setProperty("Sky", "Blue"); p.setProperty("Ocean", "Wet"); bdb.setProperties(p); bdb.init(); HashMap<String, String> fields = new HashMap<String, ByteIterator>(); fields.put("A", new StringByteIterator("X")); fields.put("B", new StringByteIterator("Y")); bdb.read("table", "key", null, null); bdb.insert("table", "key", fields); fields = new HashMap<String, ByteIterator>(); fields.put("C", new StringByteIterator("Z")); bdb.update("table", "key", fields); bdb.delete("table", "key"); } */ }
delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("UPDATE ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } if (count) { incCounter(updates, hash(table, key, values)); } return Status.OK;
brianfrankcooper_YCSB
YCSB/core/src/main/java/site/ycsb/BasicDB.java
BasicDB
insert
class BasicDB extends DB { public static final String COUNT = "basicdb.count"; public static final String COUNT_DEFAULT = "false"; public static final String VERBOSE = "basicdb.verbose"; public static final String VERBOSE_DEFAULT = "true"; public static final String SIMULATE_DELAY = "basicdb.simulatedelay"; public static final String SIMULATE_DELAY_DEFAULT = "0"; public static final String RANDOMIZE_DELAY = "basicdb.randomizedelay"; public static final String RANDOMIZE_DELAY_DEFAULT = "true"; protected static final Object MUTEX = new Object(); protected static int counter = 0; protected static Map<Integer, Integer> reads; protected static Map<Integer, Integer> scans; protected static Map<Integer, Integer> updates; protected static Map<Integer, Integer> inserts; protected static Map<Integer, Integer> deletes; protected boolean verbose; protected boolean randomizedelay; protected int todelay; protected boolean count; public BasicDB() { todelay = 0; } protected void delay() { if (todelay > 0) { long delayNs; if (randomizedelay) { delayNs = TimeUnit.MILLISECONDS.toNanos(ThreadLocalRandom.current().nextInt(todelay)); if (delayNs == 0) { return; } } else { delayNs = TimeUnit.MILLISECONDS.toNanos(todelay); } final long deadline = System.nanoTime() + delayNs; do { LockSupport.parkNanos(deadline - System.nanoTime()); } while (System.nanoTime() < deadline && !Thread.interrupted()); } } /** * Initialize any state for this DB. * Called once per DB instance; there is one DB instance per client thread. */ public void init() { verbose = Boolean.parseBoolean(getProperties().getProperty(VERBOSE, VERBOSE_DEFAULT)); todelay = Integer.parseInt(getProperties().getProperty(SIMULATE_DELAY, SIMULATE_DELAY_DEFAULT)); randomizedelay = Boolean.parseBoolean(getProperties().getProperty(RANDOMIZE_DELAY, RANDOMIZE_DELAY_DEFAULT)); count = Boolean.parseBoolean(getProperties().getProperty(COUNT, COUNT_DEFAULT)); if (verbose) { synchronized (System.out) { System.out.println("***************** properties *****************"); Properties p = getProperties(); if (p != null) { for (Enumeration e = p.propertyNames(); e.hasMoreElements();) { String k = (String) e.nextElement(); System.out.println("\"" + k + "\"=\"" + p.getProperty(k) + "\""); } } System.out.println("**********************************************"); } } synchronized (MUTEX) { if (counter == 0 && count) { reads = new HashMap<Integer, Integer>(); scans = new HashMap<Integer, Integer>(); updates = new HashMap<Integer, Integer>(); inserts = new HashMap<Integer, Integer>(); deletes = new HashMap<Integer, Integer>(); } counter++; } } protected static final ThreadLocal<StringBuilder> TL_STRING_BUILDER = new ThreadLocal<StringBuilder>() { @Override protected StringBuilder initialValue() { return new StringBuilder(); } }; protected static StringBuilder getStringBuilder() { StringBuilder sb = TL_STRING_BUILDER.get(); sb.setLength(0); return sb; } /** * Read a record from the database. Each field/value pair from the result will be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("READ ").append(table).append(" ").append(key).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append("<all fields>"); } sb.append("]"); System.out.println(sb); } if (count) { incCounter(reads, hash(table, key, fields)); } return Status.OK; } /** * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored * in a HashMap. * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return Zero on success, a non-zero error code on error */ public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("SCAN ").append(table).append(" ").append(startkey).append(" ").append(recordcount).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append("<all fields>"); } sb.append("]"); System.out.println(sb); } if (count) { incCounter(scans, hash(table, startkey, fields)); } return Status.OK; } /** * Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key, overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ public Status update(String table, String key, Map<String, ByteIterator> values) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("UPDATE ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } if (count) { incCounter(updates, hash(table, key, values)); } return Status.OK; } /** * Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ public Status insert(String table, String key, Map<String, ByteIterator> values) {<FILL_FUNCTION_BODY>} /** * Delete a record from the database. * * @param table The name of the table * @param key The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ public Status delete(String table, String key) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("DELETE ").append(table).append(" ").append(key); System.out.println(sb); } if (count) { incCounter(deletes, (table + key).hashCode()); } return Status.OK; } @Override public void cleanup() { synchronized (MUTEX) { int countDown = --counter; if (count && countDown < 1) { // TODO - would be nice to call something like: // Measurements.getMeasurements().oneOffMeasurement("READS", "Uniques", reads.size()); System.out.println("[READS], Uniques, " + reads.size()); System.out.println("[SCANS], Uniques, " + scans.size()); System.out.println("[UPDATES], Uniques, " + updates.size()); System.out.println("[INSERTS], Uniques, " + inserts.size()); System.out.println("[DELETES], Uniques, " + deletes.size()); } } } /** * Increments the count on the hash in the map. * @param map A non-null map to sync and use for incrementing. * @param hash A hash code to increment. */ protected void incCounter(final Map<Integer, Integer> map, final int hash) { synchronized (map) { Integer ctr = map.get(hash); if (ctr == null) { map.put(hash, 1); } else { map.put(hash, ctr + 1); } } } /** * Hashes the table, key and fields, sorting the fields first for a consistent * hash. * Note that this is expensive as we generate a copy of the fields and a string * buffer to hash on. Hashing on the objects is problematic. * @param table The user table. * @param key The key read or scanned. * @param fields The fields read or scanned. * @return The hash code. */ protected int hash(final String table, final String key, final Set<String> fields) { if (fields == null) { return (table + key).hashCode(); } StringBuilder buf = getStringBuilder().append(table).append(key); List<String> sorted = new ArrayList<String>(fields); Collections.sort(sorted); for (final String field : sorted) { buf.append(field); } return buf.toString().hashCode(); } /** * Hashes the table, key and fields, sorting the fields first for a consistent * hash. * Note that this is expensive as we generate a copy of the fields and a string * buffer to hash on. Hashing on the objects is problematic. * @param table The user table. * @param key The key read or scanned. * @param values The values to hash on. * @return The hash code. */ protected int hash(final String table, final String key, final Map<String, ByteIterator> values) { if (values == null) { return (table + key).hashCode(); } final TreeMap<String, ByteIterator> sorted = new TreeMap<String, ByteIterator>(values); StringBuilder buf = getStringBuilder().append(table).append(key); for (final Entry<String, ByteIterator> entry : sorted.entrySet()) { entry.getValue().reset(); buf.append(entry.getKey()) .append(entry.getValue().toString()); } return buf.toString().hashCode(); } /** * Short test of BasicDB */ /* public static void main(String[] args) { BasicDB bdb = new BasicDB(); Properties p = new Properties(); p.setProperty("Sky", "Blue"); p.setProperty("Ocean", "Wet"); bdb.setProperties(p); bdb.init(); HashMap<String, String> fields = new HashMap<String, ByteIterator>(); fields.put("A", new StringByteIterator("X")); fields.put("B", new StringByteIterator("Y")); bdb.read("table", "key", null, null); bdb.insert("table", "key", fields); fields = new HashMap<String, ByteIterator>(); fields.put("C", new StringByteIterator("Z")); bdb.update("table", "key", fields); bdb.delete("table", "key"); } */ }
delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("INSERT ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } if (count) { incCounter(inserts, hash(table, key, values)); } return Status.OK;
brianfrankcooper_YCSB
YCSB/core/src/main/java/site/ycsb/BasicDB.java
BasicDB
delete
class BasicDB extends DB { public static final String COUNT = "basicdb.count"; public static final String COUNT_DEFAULT = "false"; public static final String VERBOSE = "basicdb.verbose"; public static final String VERBOSE_DEFAULT = "true"; public static final String SIMULATE_DELAY = "basicdb.simulatedelay"; public static final String SIMULATE_DELAY_DEFAULT = "0"; public static final String RANDOMIZE_DELAY = "basicdb.randomizedelay"; public static final String RANDOMIZE_DELAY_DEFAULT = "true"; protected static final Object MUTEX = new Object(); protected static int counter = 0; protected static Map<Integer, Integer> reads; protected static Map<Integer, Integer> scans; protected static Map<Integer, Integer> updates; protected static Map<Integer, Integer> inserts; protected static Map<Integer, Integer> deletes; protected boolean verbose; protected boolean randomizedelay; protected int todelay; protected boolean count; public BasicDB() { todelay = 0; } protected void delay() { if (todelay > 0) { long delayNs; if (randomizedelay) { delayNs = TimeUnit.MILLISECONDS.toNanos(ThreadLocalRandom.current().nextInt(todelay)); if (delayNs == 0) { return; } } else { delayNs = TimeUnit.MILLISECONDS.toNanos(todelay); } final long deadline = System.nanoTime() + delayNs; do { LockSupport.parkNanos(deadline - System.nanoTime()); } while (System.nanoTime() < deadline && !Thread.interrupted()); } } /** * Initialize any state for this DB. * Called once per DB instance; there is one DB instance per client thread. */ public void init() { verbose = Boolean.parseBoolean(getProperties().getProperty(VERBOSE, VERBOSE_DEFAULT)); todelay = Integer.parseInt(getProperties().getProperty(SIMULATE_DELAY, SIMULATE_DELAY_DEFAULT)); randomizedelay = Boolean.parseBoolean(getProperties().getProperty(RANDOMIZE_DELAY, RANDOMIZE_DELAY_DEFAULT)); count = Boolean.parseBoolean(getProperties().getProperty(COUNT, COUNT_DEFAULT)); if (verbose) { synchronized (System.out) { System.out.println("***************** properties *****************"); Properties p = getProperties(); if (p != null) { for (Enumeration e = p.propertyNames(); e.hasMoreElements();) { String k = (String) e.nextElement(); System.out.println("\"" + k + "\"=\"" + p.getProperty(k) + "\""); } } System.out.println("**********************************************"); } } synchronized (MUTEX) { if (counter == 0 && count) { reads = new HashMap<Integer, Integer>(); scans = new HashMap<Integer, Integer>(); updates = new HashMap<Integer, Integer>(); inserts = new HashMap<Integer, Integer>(); deletes = new HashMap<Integer, Integer>(); } counter++; } } protected static final ThreadLocal<StringBuilder> TL_STRING_BUILDER = new ThreadLocal<StringBuilder>() { @Override protected StringBuilder initialValue() { return new StringBuilder(); } }; protected static StringBuilder getStringBuilder() { StringBuilder sb = TL_STRING_BUILDER.get(); sb.setLength(0); return sb; } /** * Read a record from the database. Each field/value pair from the result will be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("READ ").append(table).append(" ").append(key).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append("<all fields>"); } sb.append("]"); System.out.println(sb); } if (count) { incCounter(reads, hash(table, key, fields)); } return Status.OK; } /** * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored * in a HashMap. * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return Zero on success, a non-zero error code on error */ public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("SCAN ").append(table).append(" ").append(startkey).append(" ").append(recordcount).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append("<all fields>"); } sb.append("]"); System.out.println(sb); } if (count) { incCounter(scans, hash(table, startkey, fields)); } return Status.OK; } /** * Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key, overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ public Status update(String table, String key, Map<String, ByteIterator> values) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("UPDATE ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } if (count) { incCounter(updates, hash(table, key, values)); } return Status.OK; } /** * Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ public Status insert(String table, String key, Map<String, ByteIterator> values) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("INSERT ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } if (count) { incCounter(inserts, hash(table, key, values)); } return Status.OK; } /** * Delete a record from the database. * * @param table The name of the table * @param key The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ public Status delete(String table, String key) {<FILL_FUNCTION_BODY>} @Override public void cleanup() { synchronized (MUTEX) { int countDown = --counter; if (count && countDown < 1) { // TODO - would be nice to call something like: // Measurements.getMeasurements().oneOffMeasurement("READS", "Uniques", reads.size()); System.out.println("[READS], Uniques, " + reads.size()); System.out.println("[SCANS], Uniques, " + scans.size()); System.out.println("[UPDATES], Uniques, " + updates.size()); System.out.println("[INSERTS], Uniques, " + inserts.size()); System.out.println("[DELETES], Uniques, " + deletes.size()); } } } /** * Increments the count on the hash in the map. * @param map A non-null map to sync and use for incrementing. * @param hash A hash code to increment. */ protected void incCounter(final Map<Integer, Integer> map, final int hash) { synchronized (map) { Integer ctr = map.get(hash); if (ctr == null) { map.put(hash, 1); } else { map.put(hash, ctr + 1); } } } /** * Hashes the table, key and fields, sorting the fields first for a consistent * hash. * Note that this is expensive as we generate a copy of the fields and a string * buffer to hash on. Hashing on the objects is problematic. * @param table The user table. * @param key The key read or scanned. * @param fields The fields read or scanned. * @return The hash code. */ protected int hash(final String table, final String key, final Set<String> fields) { if (fields == null) { return (table + key).hashCode(); } StringBuilder buf = getStringBuilder().append(table).append(key); List<String> sorted = new ArrayList<String>(fields); Collections.sort(sorted); for (final String field : sorted) { buf.append(field); } return buf.toString().hashCode(); } /** * Hashes the table, key and fields, sorting the fields first for a consistent * hash. * Note that this is expensive as we generate a copy of the fields and a string * buffer to hash on. Hashing on the objects is problematic. * @param table The user table. * @param key The key read or scanned. * @param values The values to hash on. * @return The hash code. */ protected int hash(final String table, final String key, final Map<String, ByteIterator> values) { if (values == null) { return (table + key).hashCode(); } final TreeMap<String, ByteIterator> sorted = new TreeMap<String, ByteIterator>(values); StringBuilder buf = getStringBuilder().append(table).append(key); for (final Entry<String, ByteIterator> entry : sorted.entrySet()) { entry.getValue().reset(); buf.append(entry.getKey()) .append(entry.getValue().toString()); } return buf.toString().hashCode(); } /** * Short test of BasicDB */ /* public static void main(String[] args) { BasicDB bdb = new BasicDB(); Properties p = new Properties(); p.setProperty("Sky", "Blue"); p.setProperty("Ocean", "Wet"); bdb.setProperties(p); bdb.init(); HashMap<String, String> fields = new HashMap<String, ByteIterator>(); fields.put("A", new StringByteIterator("X")); fields.put("B", new StringByteIterator("Y")); bdb.read("table", "key", null, null); bdb.insert("table", "key", fields); fields = new HashMap<String, ByteIterator>(); fields.put("C", new StringByteIterator("Z")); bdb.update("table", "key", fields); bdb.delete("table", "key"); } */ }
delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("DELETE ").append(table).append(" ").append(key); System.out.println(sb); } if (count) { incCounter(deletes, (table + key).hashCode()); } return Status.OK;
brianfrankcooper_YCSB
YCSB/core/src/main/java/site/ycsb/BasicDB.java
BasicDB
cleanup
class BasicDB extends DB { public static final String COUNT = "basicdb.count"; public static final String COUNT_DEFAULT = "false"; public static final String VERBOSE = "basicdb.verbose"; public static final String VERBOSE_DEFAULT = "true"; public static final String SIMULATE_DELAY = "basicdb.simulatedelay"; public static final String SIMULATE_DELAY_DEFAULT = "0"; public static final String RANDOMIZE_DELAY = "basicdb.randomizedelay"; public static final String RANDOMIZE_DELAY_DEFAULT = "true"; protected static final Object MUTEX = new Object(); protected static int counter = 0; protected static Map<Integer, Integer> reads; protected static Map<Integer, Integer> scans; protected static Map<Integer, Integer> updates; protected static Map<Integer, Integer> inserts; protected static Map<Integer, Integer> deletes; protected boolean verbose; protected boolean randomizedelay; protected int todelay; protected boolean count; public BasicDB() { todelay = 0; } protected void delay() { if (todelay > 0) { long delayNs; if (randomizedelay) { delayNs = TimeUnit.MILLISECONDS.toNanos(ThreadLocalRandom.current().nextInt(todelay)); if (delayNs == 0) { return; } } else { delayNs = TimeUnit.MILLISECONDS.toNanos(todelay); } final long deadline = System.nanoTime() + delayNs; do { LockSupport.parkNanos(deadline - System.nanoTime()); } while (System.nanoTime() < deadline && !Thread.interrupted()); } } /** * Initialize any state for this DB. * Called once per DB instance; there is one DB instance per client thread. */ public void init() { verbose = Boolean.parseBoolean(getProperties().getProperty(VERBOSE, VERBOSE_DEFAULT)); todelay = Integer.parseInt(getProperties().getProperty(SIMULATE_DELAY, SIMULATE_DELAY_DEFAULT)); randomizedelay = Boolean.parseBoolean(getProperties().getProperty(RANDOMIZE_DELAY, RANDOMIZE_DELAY_DEFAULT)); count = Boolean.parseBoolean(getProperties().getProperty(COUNT, COUNT_DEFAULT)); if (verbose) { synchronized (System.out) { System.out.println("***************** properties *****************"); Properties p = getProperties(); if (p != null) { for (Enumeration e = p.propertyNames(); e.hasMoreElements();) { String k = (String) e.nextElement(); System.out.println("\"" + k + "\"=\"" + p.getProperty(k) + "\""); } } System.out.println("**********************************************"); } } synchronized (MUTEX) { if (counter == 0 && count) { reads = new HashMap<Integer, Integer>(); scans = new HashMap<Integer, Integer>(); updates = new HashMap<Integer, Integer>(); inserts = new HashMap<Integer, Integer>(); deletes = new HashMap<Integer, Integer>(); } counter++; } } protected static final ThreadLocal<StringBuilder> TL_STRING_BUILDER = new ThreadLocal<StringBuilder>() { @Override protected StringBuilder initialValue() { return new StringBuilder(); } }; protected static StringBuilder getStringBuilder() { StringBuilder sb = TL_STRING_BUILDER.get(); sb.setLength(0); return sb; } /** * Read a record from the database. Each field/value pair from the result will be stored in a HashMap. * * @param table The name of the table * @param key The record key of the record to read. * @param fields The list of fields to read, or null for all of them * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ public Status read(String table, String key, Set<String> fields, Map<String, ByteIterator> result) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("READ ").append(table).append(" ").append(key).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append("<all fields>"); } sb.append("]"); System.out.println(sb); } if (count) { incCounter(reads, hash(table, key, fields)); } return Status.OK; } /** * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored * in a HashMap. * * @param table The name of the table * @param startkey The record key of the first record to read. * @param recordcount The number of records to read * @param fields The list of fields to read, or null for all of them * @param result A Vector of HashMaps, where each HashMap is a set field/value pairs for one record * @return Zero on success, a non-zero error code on error */ public Status scan(String table, String startkey, int recordcount, Set<String> fields, Vector<HashMap<String, ByteIterator>> result) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("SCAN ").append(table).append(" ").append(startkey).append(" ").append(recordcount).append(" [ "); if (fields != null) { for (String f : fields) { sb.append(f).append(" "); } } else { sb.append("<all fields>"); } sb.append("]"); System.out.println(sb); } if (count) { incCounter(scans, hash(table, startkey, fields)); } return Status.OK; } /** * Update a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key, overwriting any existing values with the same field name. * * @param table The name of the table * @param key The record key of the record to write. * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ public Status update(String table, String key, Map<String, ByteIterator> values) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("UPDATE ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } if (count) { incCounter(updates, hash(table, key, values)); } return Status.OK; } /** * Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the * record with the specified record key. * * @param table The name of the table * @param key The record key of the record to insert. * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ public Status insert(String table, String key, Map<String, ByteIterator> values) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("INSERT ").append(table).append(" ").append(key).append(" [ "); if (values != null) { for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { sb.append(entry.getKey()).append("=").append(entry.getValue()).append(" "); } } sb.append("]"); System.out.println(sb); } if (count) { incCounter(inserts, hash(table, key, values)); } return Status.OK; } /** * Delete a record from the database. * * @param table The name of the table * @param key The record key of the record to delete. * @return Zero on success, a non-zero error code on error */ public Status delete(String table, String key) { delay(); if (verbose) { StringBuilder sb = getStringBuilder(); sb.append("DELETE ").append(table).append(" ").append(key); System.out.println(sb); } if (count) { incCounter(deletes, (table + key).hashCode()); } return Status.OK; } @Override public void cleanup() {<FILL_FUNCTION_BODY>} /** * Increments the count on the hash in the map. * @param map A non-null map to sync and use for incrementing. * @param hash A hash code to increment. */ protected void incCounter(final Map<Integer, Integer> map, final int hash) { synchronized (map) { Integer ctr = map.get(hash); if (ctr == null) { map.put(hash, 1); } else { map.put(hash, ctr + 1); } } } /** * Hashes the table, key and fields, sorting the fields first for a consistent * hash. * Note that this is expensive as we generate a copy of the fields and a string * buffer to hash on. Hashing on the objects is problematic. * @param table The user table. * @param key The key read or scanned. * @param fields The fields read or scanned. * @return The hash code. */ protected int hash(final String table, final String key, final Set<String> fields) { if (fields == null) { return (table + key).hashCode(); } StringBuilder buf = getStringBuilder().append(table).append(key); List<String> sorted = new ArrayList<String>(fields); Collections.sort(sorted); for (final String field : sorted) { buf.append(field); } return buf.toString().hashCode(); } /** * Hashes the table, key and fields, sorting the fields first for a consistent * hash. * Note that this is expensive as we generate a copy of the fields and a string * buffer to hash on. Hashing on the objects is problematic. * @param table The user table. * @param key The key read or scanned. * @param values The values to hash on. * @return The hash code. */ protected int hash(final String table, final String key, final Map<String, ByteIterator> values) { if (values == null) { return (table + key).hashCode(); } final TreeMap<String, ByteIterator> sorted = new TreeMap<String, ByteIterator>(values); StringBuilder buf = getStringBuilder().append(table).append(key); for (final Entry<String, ByteIterator> entry : sorted.entrySet()) { entry.getValue().reset(); buf.append(entry.getKey()) .append(entry.getValue().toString()); } return buf.toString().hashCode(); } /** * Short test of BasicDB */ /* public static void main(String[] args) { BasicDB bdb = new BasicDB(); Properties p = new Properties(); p.setProperty("Sky", "Blue"); p.setProperty("Ocean", "Wet"); bdb.setProperties(p); bdb.init(); HashMap<String, String> fields = new HashMap<String, ByteIterator>(); fields.put("A", new StringByteIterator("X")); fields.put("B", new StringByteIterator("Y")); bdb.read("table", "key", null, null); bdb.insert("table", "key", fields); fields = new HashMap<String, ByteIterator>(); fields.put("C", new StringByteIterator("Z")); bdb.update("table", "key", fields); bdb.delete("table", "key"); } */ }
synchronized (MUTEX) { int countDown = --counter; if (count && countDown < 1) { // TODO - would be nice to call something like: // Measurements.getMeasurements().oneOffMeasurement("READS", "Uniques", reads.size()); System.out.println("[READS], Uniques, " + reads.size()); System.out.println("[SCANS], Uniques, " + scans.size()); System.out.println("[UPDATES], Uniques, " + updates.size()); System.out.println("[INSERTS], Uniques, " + inserts.size()); System.out.println("[DELETES], Uniques, " + deletes.size()); } }