id
stringlengths 29
30
| content
stringlengths 152
2.6k
|
|---|---|
codereview_new_java_data_4508
|
private static void cleanAllOld(ServerContext context, final ZooReaderWriter zk)
private static boolean checkCurrentInstance(ServerContext context, String instanceName,
String instanceId) {
if (instanceId.equals(context.getInstanceID().canonical())) {
- String prompt = String.valueOf(
- System.console().readLine("Warning: This is the current instance, are you sure? Y/n: "));
- if (prompt == null || !prompt.equals("Y")) {
System.out.println("Instance deletion of '" + instanceName + "' cancelled.");
- return false;
}
}
- return true;
}
private static String getRootChildPath(String child) {
I know in other areas (like when deleting a table or namespace with the shell) we use (yes|no) and we also ignore cases. Not really sure how consistent we are with that though. At least ignoring the case would be a reasonable expectation from the user's viewpoint.
private static void cleanAllOld(ServerContext context, final ZooReaderWriter zk)
private static boolean checkCurrentInstance(ServerContext context, String instanceName,
String instanceId) {
+ boolean operate = true;
+ // If the instance given is the current instance we should verify the user actually wants to
+ // delete
if (instanceId.equals(context.getInstanceID().canonical())) {
+ String line = String.valueOf(System.console()
+ .readLine("Warning: This is the current instance, are you sure? (yes|no): "));
+ operate = line != null && (line.equalsIgnoreCase("y") || line.equalsIgnoreCase("yes"));
+ if (!operate) {
System.out.println("Instance deletion of '" + instanceName + "' cancelled.");
}
}
+ return operate;
}
private static String getRootChildPath(String child) {
|
codereview_new_java_data_4509
|
Fate<Manager> fate() {
+ " at time " + System.currentTimeMillis();
// include stack trace so we know where it's coming from, in case we need to troubleshoot it
log.warn("{} blocked until fate starts", msgPrefix,
- new IllegalStateException("Attempted fate action before fate was started; "
+ "if this doesn't make progress, please report it as a bug to the developers"));
try {
fateReadyLatch.await();
I like this message better first, as the msgPrefix and I don't think you need the IllegalStateException.
```suggestion
"Attempted fate action before manager finished starting up; "
```
Fate<Manager> fate() {
+ " at time " + System.currentTimeMillis();
// include stack trace so we know where it's coming from, in case we need to troubleshoot it
log.warn("{} blocked until fate starts", msgPrefix,
+ "Attempted fate action before manager finished starting up; "
+ "if this doesn't make progress, please report it as a bug to the developers"));
try {
fateReadyLatch.await();
|
codereview_new_java_data_4510
|
void modifyProperties(Consumer<Map<String,String>> mapMutator) throws AccumuloEx
Map<String,String> getSystemConfiguration() throws AccumuloException, AccumuloSecurityException;
/**
- * Retrieves the configured System properties from zookeeper. This method is different from
- * {@link #getSystemConfiguration()} as it will only return the configured properties in zookeeper
- * and not properties from accumulo.properties or defaults.
*
- * @return a map of system properties set in zookeeper
*/
- Map<String,String> getSystemProperties() throws AccumuloException, AccumuloSecurityException;
/**
* Retrieve the site configuration (that is set in the server configuration file).
Would renaming this to something like `getStoredProperties` help in highlighting the difference between the methods?
void modifyProperties(Consumer<Map<String,String>> mapMutator) throws AccumuloEx
Map<String,String> getSystemConfiguration() throws AccumuloException, AccumuloSecurityException;
/**
+ * Retrieves the stored System properties from zookeeper. This method is different from
+ * {@link #getSystemConfiguration()} as it will only return the stored properties in zookeeper and
+ * not properties from accumulo.properties or default values..
*
+ * @return a map of stored system properties set in zookeeper
*/
+ Map<String,String> getStoredProperties() throws AccumuloException, AccumuloSecurityException;
/**
* Retrieve the site configuration (that is set in the server configuration file).
|
codereview_new_java_data_4511
|
void modifyProperties(Consumer<Map<String,String>> mapMutator) throws AccumuloEx
*/
Map<String,String> getSystemConfiguration() throws AccumuloException, AccumuloSecurityException;
- /**
- * Retrieves the stored System properties from zookeeper. This method is different from
- * {@link #getSystemConfiguration()} as it will only return the stored properties in zookeeper and
- * not properties from accumulo.properties or default values..
- *
- * @return a map of stored system properties set in zookeeper
- */
- Map<String,String> getStoredProperties() throws AccumuloException, AccumuloSecurityException;
-
/**
* Retrieve the site configuration (that is set in the server configuration file).
*
Again, the word `stored` here does not convey how these properties are different than the ones returned by `getSystemConfiguration`. The user will need to look at the documentation to know, so IMO we should leave the name as `getSystemProperties`.
void modifyProperties(Consumer<Map<String,String>> mapMutator) throws AccumuloEx
*/
Map<String,String> getSystemConfiguration() throws AccumuloException, AccumuloSecurityException;
/**
* Retrieve the site configuration (that is set in the server configuration file).
*
|
codereview_new_java_data_4512
|
void setProperty(final String property, final String value)
* accepted.
*
* <p>
- * Accumulo has multiple layers of properties that for many APIs and SPIs presented as single
- * merged view. This API does not offer that merged view, it only offers the properties set at the
- * system layer to the mapMutator.
* </p>
*
* <p>
... APIs and SPIs **are** presented as **a** single merged view.
void setProperty(final String property, final String value)
* accepted.
*
* <p>
+ * Accumulo has multiple layers of properties that for many APIs and SPIs are presented as a
+ * single merged view. This API does not offer that merged view, it only offers the properties set
+ * at the system layer to the mapMutator.
* </p>
*
* <p>
|
codereview_new_java_data_4513
|
public int execute(final String fullCommand, final CommandLine cl, final Shell s
final Map<String,String> configuration = shellState.getAccumuloClient().tableOperations()
.getConfiguration(cl.getOptionValue(createTableOptCopyConfig.getOpt()));
- Map<String,
- String> propsToAdd = configuration.entrySet().stream()
- .filter(entry -> Property.isValidTablePropertyKey(entry.getKey()))
- .collect(Collectors.toMap(Entry::getKey, Entry::getValue));
-
shellState.getAccumuloClient().tableOperations().modifyProperties(tableName,
- properties -> properties.putAll(propsToAdd));
}
}
```suggestion
shellState.getAccumuloClient().tableOperations().modifyProperties(tableName,
properties -> configuration.entrySet().stream()
.filter(entry -> Property.isValidTablePropertyKey(entry.getKey()))
.forEach(entry -> properties.put(entry.getKey(), entry.getValue())));
```
public int execute(final String fullCommand, final CommandLine cl, final Shell s
final Map<String,String> configuration = shellState.getAccumuloClient().tableOperations()
.getConfiguration(cl.getOptionValue(createTableOptCopyConfig.getOpt()));
shellState.getAccumuloClient().tableOperations().modifyProperties(tableName,
+ properties -> configuration.entrySet().stream()
+ .filter(entry -> Property.isValidTablePropertyKey(entry.getKey()))
+ .forEach(entry -> properties.put(entry.getKey(), entry.getValue())));
}
}
|
codereview_new_java_data_4514
|
private static String getFmtTime(final long epoch) {
/**
* Get the ZooKeeper digest based on the instance secret that is used within ZooKeeper for
- * authentication. This method is primary intended to be used to valid ZooKeeper ACLs. Use
* {@link #digestAuth(ZooKeeper, String)} to add authorizations to ZooKeeper.
*/
- public static Id getZkAuthId(final String secret) {
try {
final String scheme = "digest";
String auth = DigestAuthenticationProvider.generateDigest("accumulo:" + secret);
```suggestion
* authentication. This method is primary intended to be used to validate ZooKeeper ACLs. Use
```
private static String getFmtTime(final long epoch) {
/**
* Get the ZooKeeper digest based on the instance secret that is used within ZooKeeper for
+ * authentication. This method is primary intended to be used to validate ZooKeeper ACLs. Use
* {@link #digestAuth(ZooKeeper, String)} to add authorizations to ZooKeeper.
*/
+ public static Id getZkDigestAuthId(final String secret) {
try {
final String scheme = "digest";
String auth = DigestAuthenticationProvider.generateDigest("accumulo:" + secret);
|
codereview_new_java_data_4515
|
private static String getFmtTime(final long epoch) {
/**
* Get the ZooKeeper digest based on the instance secret that is used within ZooKeeper for
- * authentication. This method is primary intended to be used to valid ZooKeeper ACLs. Use
* {@link #digestAuth(ZooKeeper, String)} to add authorizations to ZooKeeper.
*/
- public static Id getZkAuthId(final String secret) {
try {
final String scheme = "digest";
String auth = DigestAuthenticationProvider.generateDigest("accumulo:" + secret);
```suggestion
public static Id getZkDigestAuthId(final String secret) {
```
private static String getFmtTime(final long epoch) {
/**
* Get the ZooKeeper digest based on the instance secret that is used within ZooKeeper for
+ * authentication. This method is primary intended to be used to validate ZooKeeper ACLs. Use
* {@link #digestAuth(ZooKeeper, String)} to add authorizations to ZooKeeper.
*/
+ public static Id getZkDigestAuthId(final String secret) {
try {
final String scheme = "digest";
String auth = DigestAuthenticationProvider.generateDigest("accumulo:" + secret);
|
codereview_new_java_data_4516
|
public static void execute(final ServerContext context, final boolean clean,
}
if (clean) {
- // If clean is set to true thenn a specific instance should not be set
if (instance != null) {
throw new IllegalArgumentException(
"Cannot set clean flag to true and also an instance name");
```suggestion
// If clean is set to true then a specific instance should not be set
```
public static void execute(final ServerContext context, final boolean clean,
}
if (clean) {
+ // If clean is set to true then a specific instance should not be set
if (instance != null) {
throw new IllegalArgumentException(
"Cannot set clean flag to true and also an instance name");
|
codereview_new_java_data_4517
|
public boolean failTx(AdminUtil<FateCommand> admin, ZooStore<FateCommand> zs, Zo
@Override
public String description() {
- return "WARNING: This command is deprecated for removal";
}
@Override
```suggestion
return "manage FATE transactions (WARNING: This command is deprecated for removal)";
```
public boolean failTx(AdminUtil<FateCommand> admin, ZooStore<FateCommand> zs, Zo
@Override
public String description() {
+ return "manage FATE transactions (WARNING: This command is deprecated for removal)";
}
@Override
|
codereview_new_java_data_4518
|
static class FateOpsCommand {
@Parameter(names = "--summary", description = "Print a summary of FaTE transaction information")
boolean summarize = false;
- @Parameter(names = {"-j", "--json"}, description = "Print Operations in json")
boolean printJson = false;
@Parameter(names = {"-s", "--states"},
Should "Operations" be "transaction". I don't think we call them operations anywhere.
static class FateOpsCommand {
@Parameter(names = "--summary", description = "Print a summary of FaTE transaction information")
boolean summarize = false;
+ @Parameter(names = {"-j", "--json"}, description = "Print transactions in json")
boolean printJson = false;
@Parameter(names = {"-s", "--states"},
|
codereview_new_java_data_4519
|
public void testGetTimeType() throws Exception {
exec("createtable tmtype", true);
exec("gettimetype", true, TimeType.MILLIS.toString());
exec("gettimetype -t tmtype", true, TimeType.MILLIS.toString());
exec("gettimetype -t accumulo.metadata", true, TimeType.LOGICAL.toString());
exec("gettimetype -t accumulo.root", true, TimeType.LOGICAL.toString());
exec("gettimetype -t notable", false);
Could also try a user table with logical time to make the test cover a bit more.
```suggestion
exec("gettimetype -t tmtype", true, TimeType.MILLIS.toString());
exec("createtable -tl logicaltt", true);
exec("gettimetype -t logicaltt", true, TimeType.LOGICAL.toString());
```
public void testGetTimeType() throws Exception {
exec("createtable tmtype", true);
exec("gettimetype", true, TimeType.MILLIS.toString());
exec("gettimetype -t tmtype", true, TimeType.MILLIS.toString());
+ exec("createtable -tl logicaltt", true);
+ exec("gettimetype -t logicaltt", true, TimeType.LOGICAL.toString());
exec("gettimetype -t accumulo.metadata", true, TimeType.LOGICAL.toString());
exec("gettimetype -t accumulo.root", true, TimeType.LOGICAL.toString());
exec("gettimetype -t notable", false);
|
codereview_new_java_data_4520
|
public TimeType getTimeType(final String tableName) throws TableNotFoundExceptio
Optional<TabletMetadata> tabletMetadata = context.getAmple().readTablets().forTable(tableId)
.fetch(TabletMetadata.ColumnType.TIME).checkConsistency().build().stream().findFirst();
TabletMetadata timeData =
- tabletMetadata.orElseThrow(() -> new RuntimeException("Failed to retrieve TimeType"));
return timeData.getTime().getType();
}
Prefer a more specific RTE type. Perhaps IllegalStateException here?
public TimeType getTimeType(final String tableName) throws TableNotFoundExceptio
Optional<TabletMetadata> tabletMetadata = context.getAmple().readTablets().forTable(tableId)
.fetch(TabletMetadata.ColumnType.TIME).checkConsistency().build().stream().findFirst();
TabletMetadata timeData =
+ tabletMetadata.orElseThrow(() -> new IllegalStateException("Failed to retrieve TimeType"));
return timeData.getTime().getType();
}
|
codereview_new_java_data_4521
|
protected void merge(AccumuloClient client, String table, List<Size> sizes, int
Text start = sizes.get(0).extent.prevEndRow();
Text end = sizes.get(numToMerge - 1).extent.endRow();
message("Merging %d tablets from (%s to %s]", numToMerge,
- start == null ? "-inf" : Key.toPrintableString(start.getBytes(), 0, start.getLength(), end.getLength()),
end == null ? "+inf" : Key.toPrintableString(end.getBytes(), 0, end.getLength(), end.getLength()));
client.tableOperations().merge(table, start, end);
} catch (Exception ex) {
I believe the last parameter should be `start.getLength()`.
protected void merge(AccumuloClient client, String table, List<Size> sizes, int
Text start = sizes.get(0).extent.prevEndRow();
Text end = sizes.get(numToMerge - 1).extent.endRow();
message("Merging %d tablets from (%s to %s]", numToMerge,
+ start == null ? "-inf" : Key.toPrintableString(start.getBytes(), 0, start.getLength(), start.getLength()),
end == null ? "+inf" : Key.toPrintableString(end.getBytes(), 0, end.getLength(), end.getLength()));
client.tableOperations().merge(table, start, end);
} catch (Exception ex) {
|
codereview_new_java_data_4522
|
private String createTableAndReturnTableName(AccumuloClient client) throws Accum
} catch (TableExistsException e) {
log.debug("Table {} already exists. Deleting and trying again.", tableName);
client.tableOperations().delete(tableName);
- createTableAndReturnTableName(client);
}
// when the table is successfully created, return its name
return tableName;
Do tableOperations wait for the operation to complete - or do they create a FATE op and return? If the FATE op is launched, then this could end up with a race condition and end up throwing TableNotFound or another exception. If the FATE was in progress, the create fails because its there, the FATE completes and removes the table, then the delete fails becuase it does not exist. Another strategy may be to pull the create / delete into functions that retry and pause a little a few times.
private String createTableAndReturnTableName(AccumuloClient client) throws Accum
} catch (TableExistsException e) {
log.debug("Table {} already exists. Deleting and trying again.", tableName);
client.tableOperations().delete(tableName);
+ tableName = createTableAndReturnTableName(client);
}
// when the table is successfully created, return its name
return tableName;
|
codereview_new_java_data_4523
|
private String createTableAndReturnTableName(AccumuloClient client) throws Accum
} catch (TableExistsException e) {
log.debug("Table {} already exists. Deleting and trying again.", tableName);
client.tableOperations().delete(tableName);
- createTableAndReturnTableName(client);
}
// when the table is successfully created, return its name
return tableName;
```suggestion
tableName = createTableAndReturnTableName(client);
```
private String createTableAndReturnTableName(AccumuloClient client) throws Accum
} catch (TableExistsException e) {
log.debug("Table {} already exists. Deleting and trying again.", tableName);
client.tableOperations().delete(tableName);
+ tableName = createTableAndReturnTableName(client);
}
// when the table is successfully created, return its name
return tableName;
|
codereview_new_java_data_4524
|
* and other actions in the cluster can impact the estimated size such as flushes, tablet splits,
* compactions, etc.
*
- * For the most accurate information a compaction should first be run on the set of tables being
- * computed.
*/
public class TableDiskUsage {
```suggestion
* For more accurate information a compaction should first be run on all of the files for the set of tables being
```
* and other actions in the cluster can impact the estimated size such as flushes, tablet splits,
* compactions, etc.
*
+ * For more accurate information a compaction should first be run on all files for the set of tables
+ * being computed.
*/
public class TableDiskUsage {
|
codereview_new_java_data_4525
|
class PopulateMetadataTable extends ManagerRepo {
static void readMappingFile(VolumeManager fs, ImportedTableInfo tableInfo, String importDir,
Map<String,String> fileNameMappings) throws Exception {
- try (BufferedReader in = new BufferedReader(
- new InputStreamReader(fs.open(new Path(importDir, IMPORT_MAPPINGS_FILE)), UTF_8))) {
String line, prev;
while ((line = in.readLine()) != null) {
String[] sa = line.split(":", 2);
I'm pretty sure that with multiple resources in a try-with-resources block, you need to separate their initialization in order for their `close()` to be called correctly (as opposed to creating an object in another objects constructor like whats going on here). So in this case creating the `InputStreamReader` object followed by a semicolon then creating the `BufferedReader` would allow each objects `close` to be called correctly. I know you didn't set it up like this in this PR but might as well fix it while editing the same line.
class PopulateMetadataTable extends ManagerRepo {
static void readMappingFile(VolumeManager fs, ImportedTableInfo tableInfo, String importDir,
Map<String,String> fileNameMappings) throws Exception {
+ try (var fsDis = fs.open(new Path(importDir, IMPORT_MAPPINGS_FILE));
+ var isr = new InputStreamReader(fsDis, UTF_8);
+ BufferedReader in = new BufferedReader(isr)) {
String line, prev;
while ((line = in.readLine()) != null) {
String[] sa = line.split(":", 2);
|
codereview_new_java_data_4526
|
public void testExportImportOffline() throws Exception {
fs.deleteOnExit(importDirA);
fs.deleteOnExit(importDirB);
for (Path p : new Path[] {exportDir, importDirA, importDirB}) {
- assertTrue(fs.mkdirs(p), "Failed to create " + baseDir);
}
Set<String> importDirs = Set.of(importDirA.toString(), importDirB.toString());
```suggestion
assertTrue(fs.mkdirs(p), "Failed to create " + p);
```
public void testExportImportOffline() throws Exception {
fs.deleteOnExit(importDirA);
fs.deleteOnExit(importDirB);
for (Path p : new Path[] {exportDir, importDirA, importDirB}) {
+ assertTrue(fs.mkdirs(p), "Failed to create " + p);
}
Set<String> importDirs = Set.of(importDirA.toString(), importDirB.toString());
|
codereview_new_java_data_4527
|
default void importTable(String tableName, String importDir)
* A set of directories containing the files copied by distcp from exportTable
* @since 2.1.0
*/
- void importTable(String tableName, ImportConfiguration ic, Set<String> importDirs)
throws TableExistsException, AccumuloException, AccumuloSecurityException;
/**
Conceptually ImportConfiguration allows expressing one or more optional arguments. Could place the two required arguments first and the optional ones last. This is just personal preference, please ignore if it does not resonate.
```suggestion
void importTable(String tableName, Set<String> importDirs, ImportConfiguration ic)
```
default void importTable(String tableName, String importDir)
* A set of directories containing the files copied by distcp from exportTable
* @since 2.1.0
*/
+ void importTable(String tableName, Set<String> importDirs, ImportConfiguration ic)
throws TableExistsException, AccumuloException, AccumuloSecurityException;
/**
|
codereview_new_java_data_4528
|
private static boolean createDirs(VolumeManager fs, InstanceId instanceId, Set<S
} else {
success = fs.createNewFile(iidPath);
// the exists() call provides positive check that the instanceId file is present
- if (!success || fs.exists(iidPath)) {
log.info("Created instanceId file {} in hdfs", iidPath);
} else {
- log.warn("Failed to create instanceId file {} in hdfs", iidPath);
}
}
}
Assuming `createNewFile()` can return false without throwing an error, I think there is still a problem with this logic. For example, if the `iidPath` doesn't exist but `fs.createNewFile(iidPath)` returns false, the `!success` will cause the if to short circuit and this message will print a misleading message.
Since we aren't quite sure of the behavior of these method calls, you could pull this chunk of logic out into a separate static method that we could be tested in a unit test.
private static boolean createDirs(VolumeManager fs, InstanceId instanceId, Set<S
} else {
success = fs.createNewFile(iidPath);
// the exists() call provides positive check that the instanceId file is present
+ if (success && fs.exists(iidPath)) {
log.info("Created instanceId file {} in hdfs", iidPath);
} else {
+ log.warn("May have failed to create instanceId file {} in hdfs", iidPath);
}
}
}
|
codereview_new_java_data_4529
|
private void verifyUp() throws InterruptedException, IOException {
String secret = getSiteConfiguration().get(Property.INSTANCE_SECRET);
while (!(zk.getState() == States.CONNECTED)) {
- log.info("Waiting for ZK client to connect, state: {}", zk.getState());
Thread.sleep(1000);
}
```suggestion
log.info("Waiting for ZK client to connect, state: {} - will retry", zk.getState());
```
private void verifyUp() throws InterruptedException, IOException {
String secret = getSiteConfiguration().get(Property.INSTANCE_SECRET);
while (!(zk.getState() == States.CONNECTED)) {
+ log.info("Waiting for ZK client to connect, state: {} - will retry", zk.getState());
Thread.sleep(1000);
}
|
codereview_new_java_data_4530
|
enum TStatus {
*
* @param tid
* transaction id, previously reserved.
- * @param prop
- * name of property to retrieve.
*/
- Serializable getNodeData(long tid, Fate.NodeData prop);
/**
* list all transaction ids in store.
`nodeData` instead of `prop` ?
enum TStatus {
*
* @param tid
* transaction id, previously reserved.
+ * @param txInfo
+ * name of attribute of a transaction to retrieve.
*/
+ Serializable getTransactionInfo(long tid, Fate.TxInfo txInfo);
/**
* list all transaction ids in store.
|
codereview_new_java_data_4531
|
void setStatus(long tid, TStatus status);
/**
- * Set a transaction-specific property.
*
* @param tid
* transaction id
```suggestion
* Set transaction-specific information.
```
void setStatus(long tid, TStatus status);
/**
+ * Set transaction-specific information.
*
* @param tid
* transaction id
|
codereview_new_java_data_4532
|
public static long fromString(String fmtTid) {
*/
public static String formatTid(long tid) {
// do not change how this formats without considering implications for persistence
- // original format: String.format("%s%016x%s", PREFIX, tid, SUFFIX);
- // Since 2.1, this format was replaced with the faster version below
return FastFormat.toHexString(PREFIX, tid, SUFFIX);
}
Is this a change in format, or just a change in the method called to do the formatting? If it's the former, I'm curious what's different in the new format. If it's the latter, I'm not sure this comment about the original vs. the replacement implementation is very helpful, and it might be misleading.
public static long fromString(String fmtTid) {
*/
public static String formatTid(long tid) {
// do not change how this formats without considering implications for persistence
return FastFormat.toHexString(PREFIX, tid, SUFFIX);
}
|
codereview_new_java_data_4533
|
public void run() {
// check the time so that the read ahead thread is not monopolized
while (iter.hasNext() && bytesAdded < maxResultsSize
&& (System.currentTimeMillis() - startTime) < maxScanTime) {
- final KeyExtent extent;
- final List<Range> ranges;
- {
- final Entry<KeyExtent,List<Range>> entry = iter.next();
- extent = entry.getKey();
- ranges = entry.getValue();
- }
iter.remove();
Was this little code block created just to narrowly scope `entry`?
public void run() {
// check the time so that the read ahead thread is not monopolized
while (iter.hasNext() && bytesAdded < maxResultsSize
&& (System.currentTimeMillis() - startTime) < maxScanTime) {
+
+ final Entry<KeyExtent,List<Range>> entry = iter.next();
+ final KeyExtent extent = entry.getKey();
+ final List<Range> ranges = entry.getValue();
iter.remove();
|
codereview_new_java_data_4534
|
public void readFields(DataInput in) throws IOException {
final byte[] row, cf, cq, cv;
final long ts;
- row = getData(in, ROW_SAME, ROW_COMMON_PREFIX, () -> prevKey.getRowData());
- cf = getData(in, CF_SAME, CF_COMMON_PREFIX, () -> prevKey.getColumnFamilyData());
- cq = getData(in, CQ_SAME, CQ_COMMON_PREFIX, () -> prevKey.getColumnQualifierData());
- cv = getData(in, CV_SAME, CV_COMMON_PREFIX, () -> prevKey.getColumnVisibilityData());
if ((fieldsSame & TS_SAME) == TS_SAME) {
ts = prevKey.getTimestamp();
I love the use of lambdas, but I think these can just use the form like `prevKey::getColumnFamilyData`, right?
public void readFields(DataInput in) throws IOException {
final byte[] row, cf, cq, cv;
final long ts;
+ row = getData(in, ROW_SAME, ROW_COMMON_PREFIX, prevKey::getRowData);
+ cf = getData(in, CF_SAME, CF_COMMON_PREFIX, prevKey::getColumnFamilyData);
+ cq = getData(in, CQ_SAME, CQ_COMMON_PREFIX, prevKey::getColumnQualifierData);
+ cv = getData(in, CV_SAME, CV_COMMON_PREFIX, prevKey::getColumnVisibilityData);
if ((fieldsSame & TS_SAME) == TS_SAME) {
ts = prevKey.getTimestamp();
|
codereview_new_java_data_4535
|
Map<String,String> getConfiguration(String tableName)
* Gets per-table properties of a table. This operation is asynchronous and eventually consistent.
* It is not guaranteed that all tablets in a table will return the same values. Within a few
* seconds without another change, all tablets in a table should be consistent. The clone table
- * feature can be used if consistency is required. This new method returns a Map instead of an
- * Iterable.
*
* @param tableName
* the name of the table
Do we need to say "This new method returns a Map instead of an Iterable."? If we leave this in place (in multiple places), maybe we should add something to say why?
Map<String,String> getConfiguration(String tableName)
* Gets per-table properties of a table. This operation is asynchronous and eventually consistent.
* It is not guaranteed that all tablets in a table will return the same values. Within a few
* seconds without another change, all tablets in a table should be consistent. The clone table
+ * feature can be used if consistency is required.
*
* @param tableName
* the name of the table
|
codereview_new_java_data_4536
|
public interface PropStore {
void putAll(PropStoreKey<?> propStoreKey, Map<String,String> props);
/**
- * Replaces all current properties with map provided If a property is not included in the new map,
- * the property will not be set.
*
* @param propStoreKey
* the prop cache key
```suggestion
* Replaces all current properties with map provided. If a property is not included in the new map,
```
public interface PropStore {
void putAll(PropStoreKey<?> propStoreKey, Map<String,String> props);
/**
+ * Replaces all current properties with map provided. If a property is not included in the new
+ * map, the property will not be set.
*
* @param propStoreKey
* the prop cache key
|
codereview_new_java_data_4537
|
public interface GarbageCollectionEnvironment {
/**
* Return a list of TableIDs for which we are considering deletes. For the root table this would
- * be the metadata table. For the metadata table, this would be all of the other tables in the
- * system.
*
* @return The table ids
*/
```suggestion
* be the metadata table. For the metadata table, this would be the other tables in the
```
public interface GarbageCollectionEnvironment {
/**
* Return a list of TableIDs for which we are considering deletes. For the root table this would
+ * be the metadata table. For the metadata table, this would be the other tables in the system.
*
* @return The table ids
*/
|
codereview_new_java_data_4538
|
private long removeBlipCandidates(GarbageCollectionEnvironment gce,
@VisibleForTesting
/**
- *
*/
protected void ensureAllTablesChecked(Set<TableId> tableIdsBefore, Set<TableId> tableIdsSeen,
Set<TableId> tableIdsAfter) {
// if a table was added or deleted during this run, it is acceptable to not
// have seen those tables ids when scanning the metadata table. So get the intersection
- Set<TableId> tableIdsMustHaveSeen = new HashSet<>(tableIdsBefore);
tableIdsMustHaveSeen.retainAll(tableIdsAfter);
if (tableIdsMustHaveSeen.isEmpty() && !tableIdsSeen.isEmpty()) {
```suggestion
Set<TableId> tableIdsMustHaveSeen = Sets.intersection(tableIdsBefore, tableIdsAfter);
```
Could use this here to make it a bit more concise and match the comment.
private long removeBlipCandidates(GarbageCollectionEnvironment gce,
@VisibleForTesting
/**
+ * Double check no tables were missed during GC
*/
protected void ensureAllTablesChecked(Set<TableId> tableIdsBefore, Set<TableId> tableIdsSeen,
Set<TableId> tableIdsAfter) {
// if a table was added or deleted during this run, it is acceptable to not
// have seen those tables ids when scanning the metadata table. So get the intersection
+ final Set<TableId> tableIdsMustHaveSeen = new HashSet<>(tableIdsBefore);
tableIdsMustHaveSeen.retainAll(tableIdsAfter);
if (tableIdsMustHaveSeen.isEmpty() && !tableIdsSeen.isEmpty()) {
|
codereview_new_java_data_4539
|
public long getCandidatesStat() {
return candidates;
}
- @Override
- public boolean isRootTable() {
- return level == DataLevel.ROOT;
- }
-
- @Override
- public boolean isMetadataTable() {
- return level == DataLevel.METADATA;
- }
-
@Override
public Set<TableId> getCandidateTableIDs() {
- if (isRootTable()) {
return Collections.singleton(MetadataTable.ID);
- } else if (isMetadataTable()) {
Set<TableId> tableIds = new HashSet<>(getTableIDs());
tableIds.remove(MetadataTable.ID);
tableIds.remove(RootTable.ID);
This feels a bit off. Think it might need to be the following.
```suggestion
if (level == ROOT) {
return Set.of(RootTable.ID);
} else if(level == METADATA){
return Set.of(MetadataTable.ID);
} else if (level == USER) {
```
public long getCandidatesStat() {
return candidates;
}
@Override
public Set<TableId> getCandidateTableIDs() {
+ if (level == DataLevel.ROOT) {
return Collections.singleton(MetadataTable.ID);
+ } else if (level == DataLevel.METADATA) {
Set<TableId> tableIds = new HashSet<>(getTableIDs());
tableIds.remove(MetadataTable.ID);
tableIds.remove(RootTable.ID);
|
codereview_new_java_data_4540
|
public long getCandidatesStat() {
return candidates;
}
- @Override
- public boolean isRootTable() {
- return level == DataLevel.ROOT;
- }
-
- @Override
- public boolean isMetadataTable() {
- return level == DataLevel.METADATA;
- }
-
@Override
public Set<TableId> getCandidateTableIDs() {
- if (isRootTable()) {
return Collections.singleton(MetadataTable.ID);
- } else if (isMetadataTable()) {
Set<TableId> tableIds = new HashSet<>(getTableIDs());
tableIds.remove(MetadataTable.ID);
tableIds.remove(RootTable.ID);
Using level in the name seems better to me.
```suggestion
/**
* @return the tables id for the current data level
*/
public Set<TableId> getLevelTableIDs() {
```
public long getCandidatesStat() {
return candidates;
}
@Override
public Set<TableId> getCandidateTableIDs() {
+ if (level == DataLevel.ROOT) {
return Collections.singleton(MetadataTable.ID);
+ } else if (level == DataLevel.METADATA) {
Set<TableId> tableIds = new HashSet<>(getTableIDs());
tableIds.remove(MetadataTable.ID);
tableIds.remove(RootTable.ID);
|
codereview_new_java_data_4541
|
public interface GarbageCollectionEnvironment {
Stream<Reference> getReferences();
/**
- * Return a list of TableIDs for which we are considering deletes. For the root table this would
- * be the metadata table. For the metadata table, this would be the other tables in the system.
*
* @return The table ids
*/
```suggestion
* Return a list of all TableIDs in the
* {@link org.apache.accumulo.core.metadata.schema.Ample.DataLevel} for which we are considering
* deletes. When operating on DataLevel.USER this will return all user table ids. When operating
* on DataLevel.METADATA this will return the table id for the accumulo.metadata table. When
* operating on DataLevel.ROOT this will return the table id for the accumulo.root table.
```
public interface GarbageCollectionEnvironment {
Stream<Reference> getReferences();
/**
+ * Return a list of all TableIDs in the
+ * {@link org.apache.accumulo.core.metadata.schema.Ample.DataLevel} for which we are considering
+ * deletes. When operating on DataLevel.USER this will return all user table ids. When operating
+ * on DataLevel.METADATA this will return the table id for the accumulo.metadata table. When
+ * operating on DataLevel.ROOT this will return the table id for the accumulo.root table.
*
* @return The table ids
*/
|
codereview_new_java_data_4542
|
public Set<TableId> getCandidateTableIDs() throws InterruptedException {
return Collections.singleton(MetadataTable.ID);
} else if (level == DataLevel.USER) {
Set<TableId> tableIds = new HashSet<>();
- tableIds.remove(MetadataTable.ID);
- tableIds.remove(RootTable.ID);
getTableIDs().forEach((k, v) -> {
if (v == TableState.ONLINE || v == TableState.OFFLINE) {
// Don't return tables that are NEW, DELETING, or in an
// UNKNOWN state.
tableIds.add(k);
}
});
return tableIds;
} else {
throw new IllegalArgumentException("Unexpected Table in GC Env: " + this.level.name());
```suggestion
getTableIDs().forEach((k, v) -> {
if (v == TableState.ONLINE || v == TableState.OFFLINE) {
// Don't return tables that are NEW, DELETING, or in an
// UNKNOWN state.
tableIds.add(k);
}
});
tableIds.remove(MetadataTable.ID);
tableIds.remove(RootTable.ID);
```
public Set<TableId> getCandidateTableIDs() throws InterruptedException {
return Collections.singleton(MetadataTable.ID);
} else if (level == DataLevel.USER) {
Set<TableId> tableIds = new HashSet<>();
getTableIDs().forEach((k, v) -> {
if (v == TableState.ONLINE || v == TableState.OFFLINE) {
// Don't return tables that are NEW, DELETING, or in an
// UNKNOWN state.
tableIds.add(k);
}
});
+ tableIds.remove(MetadataTable.ID);
+ tableIds.remove(RootTable.ID);
return tableIds;
} else {
throw new IllegalArgumentException("Unexpected Table in GC Env: " + this.level.name());
|
codereview_new_java_data_4543
|
protected void ensureAllTablesChecked(Set<TableId> tableIdsBefore, Set<TableId>
tableIdsMustHaveSeen.removeAll(tableIdsSeen);
// If anything is left then we missed a table and may not have removed rfiles references
- // from the candidates list that are acutally still in use, which would
// result in the rfiles being deleted in the next step of the GC process
if (!tableIdsMustHaveSeen.isEmpty()) {
- log.error("TableIDs before: " + tableIdsBefore);
- log.error("TableIDs after : " + tableIdsAfter);
- log.error("TableIDs seen : " + tableIdsSeen);
- log.error("TableIDs that should have been seen but were not: " + tableIdsMustHaveSeen);
// maybe a scan failed?
- throw new IllegalStateException(
- "Saw table IDs in ZK that were not in metadata table: " + tableIdsMustHaveSeen);
}
}
One formatted error message should be enough. There's no need for 4 separate log entries. Having 4 will age out other recently seen errors on the monitor page that shows recent logs faster than necessary, and in a multi-threaded system, these can be split up by logs from other threads, making it harder to parse and display these using automated tooling.
protected void ensureAllTablesChecked(Set<TableId> tableIdsBefore, Set<TableId>
tableIdsMustHaveSeen.removeAll(tableIdsSeen);
// If anything is left then we missed a table and may not have removed rfiles references
+ // from the candidates list that are actually still in use, which would
// result in the rfiles being deleted in the next step of the GC process
if (!tableIdsMustHaveSeen.isEmpty()) {
// maybe a scan failed?
+ throw new IllegalStateException("Saw table IDs in ZK that were not in metadata table: "
+ + tableIdsMustHaveSeen + " TableIDs before GC: " + tableIdsBefore
+ + ", TableIDs during GC: " + tableIdsSeen + ", TableIDs after GC: " + tableIdsAfter);
}
}
|
codereview_new_java_data_4544
|
protected void ensureAllTablesChecked(Set<TableId> tableIdsBefore, Set<TableId>
tableIdsMustHaveSeen.removeAll(tableIdsSeen);
// If anything is left then we missed a table and may not have removed rfiles references
- // from the candidates list that are acutally still in use, which would
// result in the rfiles being deleted in the next step of the GC process
if (!tableIdsMustHaveSeen.isEmpty()) {
- log.error("TableIDs before: " + tableIdsBefore);
- log.error("TableIDs after : " + tableIdsAfter);
- log.error("TableIDs seen : " + tableIdsSeen);
- log.error("TableIDs that should have been seen but were not: " + tableIdsMustHaveSeen);
// maybe a scan failed?
- throw new IllegalStateException(
- "Saw table IDs in ZK that were not in metadata table: " + tableIdsMustHaveSeen);
}
}
I feel like we should either log or throw, but not both. If we throw, then we can rely on the caller to log the message. If we log, and throw, then we're probably getting duplicate error messages in the logs, that are not obviously the same problem.
protected void ensureAllTablesChecked(Set<TableId> tableIdsBefore, Set<TableId>
tableIdsMustHaveSeen.removeAll(tableIdsSeen);
// If anything is left then we missed a table and may not have removed rfiles references
+ // from the candidates list that are actually still in use, which would
// result in the rfiles being deleted in the next step of the GC process
if (!tableIdsMustHaveSeen.isEmpty()) {
// maybe a scan failed?
+ throw new IllegalStateException("Saw table IDs in ZK that were not in metadata table: "
+ + tableIdsMustHaveSeen + " TableIDs before GC: " + tableIdsBefore
+ + ", TableIDs during GC: " + tableIdsSeen + ", TableIDs after GC: " + tableIdsAfter);
}
}
|
codereview_new_java_data_4545
|
public boolean validateDataVersion(PropStoreKey<?> storeKey, long expectedVersio
throw new IllegalStateException(ex);
} catch (KeeperException.NoNodeException ex) {
propStoreWatcher.signalZkChangeEvent(storeKey);
} catch (KeeperException ex) {
log.debug("exception occurred verifying data version for {}", storeKey);
return false;
you are returning `true` here?
public boolean validateDataVersion(PropStoreKey<?> storeKey, long expectedVersio
throw new IllegalStateException(ex);
} catch (KeeperException.NoNodeException ex) {
propStoreWatcher.signalZkChangeEvent(storeKey);
+ return false;
} catch (KeeperException ex) {
log.debug("exception occurred verifying data version for {}", storeKey);
return false;
|
codereview_new_java_data_4546
|
private void removeCandidatesInUse(GarbageCollectionEnvironment gce,
log.debug("Candidate was still in use: {}", relativePath);
}
}
-
- // close underlying scanner
- refStream.close();
}
private long removeBlipCandidates(GarbageCollectionEnvironment gce,
Does this actually close the underlying scanner? Or just the stream?
private void removeCandidatesInUse(GarbageCollectionEnvironment gce,
log.debug("Candidate was still in use: {}", relativePath);
}
}
}
private long removeBlipCandidates(GarbageCollectionEnvironment gce,
|
codereview_new_java_data_4547
|
import java.util.Collections;
import java.util.stream.Collectors;
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.data.TableId;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
import org.apache.accumulo.server.fs.VolumeManager;
import org.apache.hadoop.fs.Path;
public class GcVolumeUtil {
// AGCAV : Accumulo Garbage Collector All Volumes
static final String ALL_VOLUMES_PREFIX = "agcav:/";
- public static AllVolumesDirectory getDeleteTabletOnAllVolumesUri(TableId tableId,
- String dirName) {
- ServerColumnFamily.validateDirCol(dirName);
- String metadataEntry = ALL_VOLUMES_PREFIX + Constants.TABLE_DIR + Path.SEPARATOR + tableId
- + Path.SEPARATOR + dirName;
- return new AllVolumesDirectory(tableId, metadataEntry);
- }
-
public static Collection<Path> expandAllVolumesUri(VolumeManager fs, Path path) {
if (path.toString().startsWith(ALL_VOLUMES_PREFIX)) {
String relPath = path.toString().substring(ALL_VOLUMES_PREFIX.length());
Would it make sense to move this method to the new AllVolumesDirectory class? I am not sure if that makes sense because I don't know enough about the surrounding context.
import java.util.Collections;
import java.util.stream.Collectors;
import org.apache.accumulo.server.fs.VolumeManager;
import org.apache.hadoop.fs.Path;
public class GcVolumeUtil {
// AGCAV : Accumulo Garbage Collector All Volumes
static final String ALL_VOLUMES_PREFIX = "agcav:/";
public static Collection<Path> expandAllVolumesUri(VolumeManager fs, Path path) {
if (path.toString().startsWith(ALL_VOLUMES_PREFIX)) {
String relPath = path.toString().substring(ALL_VOLUMES_PREFIX.length());
|
codereview_new_java_data_4590
|
static boolean extractHoMM2AssetsFromZip( final File externalFilesDir, final Inp
// It is allowed to extract only files located in these subdirectories
final Set<String> allowedSubdirNames = new HashSet<>();
allowedSubdirNames.add( "anim" );
allowedSubdirNames.add( "data" );
allowedSubdirNames.add( "maps" );
allowedSubdirNames.add( "music" );
Video files can be stored in **anim**, **anim2** or **heroes2/anim** directories. Should we include the rest?
static boolean extractHoMM2AssetsFromZip( final File externalFilesDir, final Inp
// It is allowed to extract only files located in these subdirectories
final Set<String> allowedSubdirNames = new HashSet<>();
allowedSubdirNames.add( "anim" );
+ // ANIM2 directory is used in the Russian HoMM2 localization made by Buka
+ allowedSubdirNames.add( "anim2" );
allowedSubdirNames.add( "data" );
allowedSubdirNames.add( "maps" );
allowedSubdirNames.add( "music" );
|
codereview_new_java_data_4707
|
public LiteralType getLiteralType() {
return LiteralType.DOUBLE;
} else if (node instanceof LiteralExpression.LongVal) {
return LiteralType.LONG;
- /* TODO: the parser must distinguish decimal vs. double
} else if (node instanceof LiteralExpression.DecimalVal) {
return LiteralType.DECIMAL;
*/
```suggestion
/* TODO(b/239648780): the parser must distinguish decimal vs. double
```
public LiteralType getLiteralType() {
return LiteralType.DOUBLE;
} else if (node instanceof LiteralExpression.LongVal) {
return LiteralType.LONG;
+ /* TODO(b/239648780): the parser must distinguish decimal vs. double
} else if (node instanceof LiteralExpression.DecimalVal) {
return LiteralType.DECIMAL;
*/
|
codereview_new_java_data_4708
|
public class ClassWithTypeAnnotationsInside {
@A OuterG<@A @B ? extends @B String, @A List<@A @B Object>> complicatedField;
- @Target({ ElementType.TYPE_USE, ElementType.TYPE, ElementType.TYPE_PARAMETER })
public @interface A {
int value() default 1;
}
- @Target({ ElementType.TYPE_USE, ElementType.TYPE, ElementType.TYPE_PARAMETER })
public @interface B { }
I don't think this needs to support `ElementType.TYPE`
public class ClassWithTypeAnnotationsInside {
@A OuterG<@A @B ? extends @B String, @A List<@A @B Object>> complicatedField;
+ @Target({ ElementType.TYPE_USE, ElementType.TYPE_PARAMETER })
public @interface A {
int value() default 1;
}
+ @Target({ ElementType.TYPE_USE, ElementType.TYPE_PARAMETER })
public @interface B { }
|
codereview_new_java_data_4709
|
public class ClassWithTypeAnnotationsInside {
@A OuterG<@A @B ? extends @B String, @A List<@A @B Object>> complicatedField;
- @Target({ ElementType.TYPE_USE, ElementType.TYPE, ElementType.TYPE_PARAMETER })
public @interface A {
int value() default 1;
}
- @Target({ ElementType.TYPE_USE, ElementType.TYPE, ElementType.TYPE_PARAMETER })
public @interface B { }
neither do this
public class ClassWithTypeAnnotationsInside {
@A OuterG<@A @B ? extends @B String, @A List<@A @B Object>> complicatedField;
+ @Target({ ElementType.TYPE_USE, ElementType.TYPE_PARAMETER })
public @interface A {
int value() default 1;
}
+ @Target({ ElementType.TYPE_USE, ElementType.TYPE_PARAMETER })
public @interface B { }
|
codereview_new_java_data_4710
|
default boolean annotationAppliesTo(ElementType elementType) {
}
SymAnnot target = getDeclaredAnnotation(Target.class);
if (target == null) {
- return false;
}
return target.attributeContains("value", elementType).isTrue();
}
This is not the right behavior… as per [the docs](https://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Target.html):
> If an `@Target` meta-annotation is not present on an annotation type T , then an annotation of type T may be written as a modifier for any declaration except a type parameter declaration.
So, this should probably become:
```suggestion
SymAnnot target = getDeclaredAnnotation(Target.class);
if (target == null) {
return elementType != ElementType.TYPE_PARAMETER;
```
default boolean annotationAppliesTo(ElementType elementType) {
}
SymAnnot target = getDeclaredAnnotation(Target.class);
if (target == null) {
+ return elementType != ElementType.TYPE_PARAMETER;
}
return target.attributeContains("value", elementType).isTrue();
}
|
codereview_new_java_data_4711
|
default boolean hasReceiver() {
return false;
}
if (this instanceof JConstructorSymbol) {
- return getEnclosingClass().getEnclosingClass() != null
- && !getEnclosingClass().isStatic();
}
return true;
}
swap these
default boolean hasReceiver() {
return false;
}
if (this instanceof JConstructorSymbol) {
+ return !getEnclosingClass().isStatic()
+ && getEnclosingClass().getEnclosingClass() != null;
}
return true;
}
|
codereview_new_java_data_4712
|
public Stream<JMethodSig> streamDeclaredMethods(Predicate<? super JMethodSymbol>
}
- public int getModifiers() {
- return symbol.getModifiers();
- }
-
- @Override
- public @NonNull JClassSymbol getSymbol() {
return symbol;
}
@Override
public final boolean isTop() {
- return this == ts.OBJECT; // NOPMD CompareObjectsWithEquals
}
@Override
```suggestion
return this.getSymbol() == ts.OBJECT.getSymbol(); // NOPMD CompareObjectsWithEquals
```
public Stream<JMethodSig> streamDeclaredMethods(Predicate<? super JMethodSymbol>
}
+ public final @NonNull JClassSymbol getSymbol() {
return symbol;
}
@Override
public final boolean isTop() {
+ return this.getSymbol().equals(ts.OBJECT.getSymbol()); // NOPMD CompareObjectsWithEquals
}
@Override
|
codereview_new_java_data_4713
|
protected AbstractAstExecSymbol(T node, AstSymFactory factory, JClassSymbol owne
);
NodeStream<ASTAnnotation> annotStream = node.getDeclaredAnnotations();
- if (annotStream.isEmpty()) {
- declaredAnnotations = Collections.emptyList();
- } else {
- final List<SymAnnot> annotations = new ArrayList<>();
- annotStream.forEach(n -> annotations.add(new AstSymbolicAnnot(n)));
- declaredAnnotations = Collections.unmodifiableList(annotations);
- }
}
@Override
```suggestion
declaredAnnotations = Collections.unmodifiableList(annotStream.toList(ASTSymbolicAnnot::new));
```
protected AbstractAstExecSymbol(T node, AstSymFactory factory, JClassSymbol owne
);
NodeStream<ASTAnnotation> annotStream = node.getDeclaredAnnotations();
+ declaredAnnotations = Collections.unmodifiableList(annotStream.toList(ASTSymbolicAnnot::new));
}
@Override
|
codereview_new_java_data_4714
|
super(node, factory);
NodeStream<ASTAnnotation> annotStream = node.getDeclaredAnnotations();
- if (annotStream.isEmpty()) {
- declaredAnnotations = Collections.emptyList();
- } else {
- final List<SymAnnot> annotations = new ArrayList<>();
- annotStream.forEach(n -> annotations.add(new AstSymbolicAnnot(n)));
- declaredAnnotations = Collections.unmodifiableList(annotations);
- }
}
@Override
```suggestion
declaredAnnotations = Collections.unmodifiableList(annotStream.toList(ASTSymbolicAnnot::new));
```
super(node, factory);
NodeStream<ASTAnnotation> annotStream = node.getDeclaredAnnotations();
+ declaredAnnotations = Collections.unmodifiableList(annotStream.toList(ASTSymbolicAnnot::new));
}
@Override
|
codereview_new_java_data_4715
|
}
final NodeStream<ASTAnnotation> annotStream = node.getDeclaredAnnotations();
- if (annotStream.nonEmpty()) {
- myAnnotations = new ArrayList<>();
- annotStream.forEach(anode -> myAnnotations.add(new AstSymbolicAnnot(anode)));
- } else {
- myAnnotations = Collections.emptyList();
- }
if (!recordComponents.isEmpty()) {
// then the recordsComponents contains all record components
```suggestion
this.declaredAnnotations = Collections.unmodifiableList(annotStream.toList(ASTSymbolicAnnot::new));
```
The list is not touched anywhere else. So I don't think you need `myAnnotations`
}
final NodeStream<ASTAnnotation> annotStream = node.getDeclaredAnnotations();
+ this.declaredAnnotations = Collections.unmodifiableList(annotStream.toList(ASTSymbolicAnnot::new));
if (!recordComponents.isEmpty()) {
// then the recordsComponents contains all record components
|
codereview_new_java_data_4716
|
import java.util.List;
-import org.junit.Test;
public class ForceLanguageCliTest extends BaseCliTest {
```suggestion
import org.junit.jupiter.api.Test;
```
This test should be a junit5 test, because `BaseCliTest` already uses junit5 annotations
import java.util.List;
+import org.junit.jupiter.api.Test;
+
public class ForceLanguageCliTest extends BaseCliTest {
|
codereview_new_java_data_4717
|
protected List<String> cliStandardArgs() {
return listOf(
"check",
"--no-cache",
- "--no-progress",
"-f", "text",
- "-R", PmdCliTest.RSET_WITH_VIOLATION
);
}
Might not be needed anymore (with the chanages from #4233)
protected List<String> cliStandardArgs() {
return listOf(
"check",
"--no-cache",
"-f", "text",
+ "-R", PmdCliTest.RULESET_WITH_VIOLATION
);
}
|
codereview_new_java_data_4718
|
/**
* This file is using ISO-8859-1 (Latin-1) encoding.
*
- * ?
*/
public class FileWith_ISO8859-1_Encoding {
I think, this went wrong - the IDE already replaced the important character with a ordinary `?` (U+003F), which means, this file is now also correctly UTF-8 encoded and doesn't contain the problem anymore.
Originally, instead of the question mark, the byte in hex was `0xE4` - which is U+00E4 and is a small a-umlaut (ä). Encoded in ISO-8859-1 this is just a "0xE4", but in UTF-8, this becomes "0xC3 0xA4".
/**
* This file is using ISO-8859-1 (Latin-1) encoding.
*
+ * � (this is an a-umlaut U+00E4)
*/
public class FileWith_ISO8859-1_Encoding {
|
codereview_new_java_data_4719
|
private void checkImports(TypeNode node, Object data) {
String importStr = firstMatch.getImportedName() + (firstMatch.isImportOnDemand() ? ".*" : "");
String type = firstMatch.isStatic() ? "static " : "";
- addViolation(data, node, new Object[]{node.getImage(), importStr, type});
}
}
}
Nit: This can be probably written with `asCtx(data).addViolation(...` as well
private void checkImports(TypeNode node, Object data) {
String importStr = firstMatch.getImportedName() + (firstMatch.isImportOnDemand() ? ".*" : "");
String type = firstMatch.isStatic() ? "static " : "";
+ asCtx(data).addViolation(node, node.getImage(), importStr, type);
}
}
}
|
codereview_new_java_data_4720
|
public class ApexUnitTestClassShouldHaveAssertsRule extends AbstractApexUnitTest
ASSERT_METHODS.add("assert.isfalse");
ASSERT_METHODS.add("assert.isinstanceoftype");
ASSERT_METHODS.add("assert.isnotinstanceoftype");
- ASSERT_METHODS.add("assert.isnnull");
ASSERT_METHODS.add("assert.isnotnull");
ASSERT_METHODS.add("assert.istrue");
// Fully-qualified variants...rare but still valid/possible
```suggestion
ASSERT_METHODS.add("assert.isnull");
```
public class ApexUnitTestClassShouldHaveAssertsRule extends AbstractApexUnitTest
ASSERT_METHODS.add("assert.isfalse");
ASSERT_METHODS.add("assert.isinstanceoftype");
ASSERT_METHODS.add("assert.isnotinstanceoftype");
+ ASSERT_METHODS.add("assert.isnull");
ASSERT_METHODS.add("assert.isnotnull");
ASSERT_METHODS.add("assert.istrue");
// Fully-qualified variants...rare but still valid/possible
|
codereview_new_java_data_4721
|
public final class ApexMultifileAnalysis {
// test only
static final Logger LOG = LoggerFactory.getLogger(ApexMultifileAnalysis.class);
- /**
- * Instances of the apexlink index and data structures ({@link Org})
- * are stored statically for now. TODO make that language-wide (#2518).
- */
-
// An arbitrary large number of errors to report
private static final int MAX_ERRORS_PER_FILE = 100;
We should also remove this javadoc comment belonging to the removed `INSTANCE_MAP`
public final class ApexMultifileAnalysis {
// test only
static final Logger LOG = LoggerFactory.getLogger(ApexMultifileAnalysis.class);
// An arbitrary large number of errors to report
private static final int MAX_ERRORS_PER_FILE = 100;
|
codereview_new_java_data_4722
|
public AstInfo(ParserTask task, T rootNode) {
- this(task, rootNode, Collections.emptyMap());
- }
-
- public AstInfo(ParserTask task, T rootNode, Map<Integer, String> suppressionComments) {
- this(task.getTextDocument(), rootNode, task.getLpRegistry(), suppressionComments);
}
private AstInfo(TextDocument textDocument,
Make this ctor private
public AstInfo(ParserTask task, T rootNode) {
+ this(task.getTextDocument(), rootNode, task.getLpRegistry(), Collections.emptyMap());
}
private AstInfo(TextDocument textDocument,
|
codereview_new_java_data_4723
|
package net.sourceforge.pmd.cli.internal;
public enum ExecutionResult {
OK(0),
ERROR(1),
VIOLATIONS_FOUND(4);
- private final int exitStatusCode;
- ExecutionResult(int exitStatusCode) {
- this.exitStatusCode = exitStatusCode;
}
-
- public int getExitStatusCode() {
- return exitStatusCode;
}
}
Not sure if this was duplicated on purpose, but there is a similar enum in the class PMD: https://docs.pmd-code.org/apidocs/pmd-core/6.47.0/net/sourceforge/pmd/PMD.StatusCode.html
package net.sourceforge.pmd.cli.internal;
+// TODO : Unify with PMD.StatusCode / CPD.StatusCode
public enum ExecutionResult {
OK(0),
ERROR(1),
VIOLATIONS_FOUND(4);
+ private final int exitCode;
+ ExecutionResult(int exitCode) {
+ this.exitCode = exitCode;
}
+
+ public int getExitCode() {
+ return exitCode;
}
}
|
codereview_new_java_data_4724
|
public class PmdLanguageTypeSupport implements ITypeConverter<Language>, Iterabl
@Override
public Language convert(final String value) throws Exception {
return LanguageRegistry.getLanguages().stream()
- .filter(l -> normalizeName(l).equals(value)).findFirst()
.orElseThrow(() -> new TypeConversionException("Unknown language: " + value));
}
@Override
public Iterator<String> iterator() {
- return LanguageRegistry.getLanguages().stream().map(PmdLanguageTypeSupport::normalizeName).iterator();
- }
-
- public static String normalizeName(final Language lang) {
- return lang.getTerseName().replace(' ', '-');
}
}
https://github.com/pmd/pmd/pull/4060/files#diff-13b0c252d6bcd1baf3435099a4d91c5ad9fc708390836e3864aa56e75df7a706R61 strengthens the contract of getTerseName (replaced with getId) to be a java identifier. This makes the normalization redundant. All existing languages currently respect this so you could be using just getTerseName.
public class PmdLanguageTypeSupport implements ITypeConverter<Language>, Iterabl
@Override
public Language convert(final String value) throws Exception {
return LanguageRegistry.getLanguages().stream()
+ .filter(l -> l.getTerseName().equals(value)).findFirst()
.orElseThrow(() -> new TypeConversionException("Unknown language: " + value));
}
@Override
public Iterator<String> iterator() {
+ return LanguageRegistry.getLanguages().stream().map(Language::getTerseName).iterator();
}
}
|
codereview_new_java_data_4725
|
public void setReportFile(final Path reportFile) {
this.reportFile = reportFile;
}
- @Option(names = "--use-version", defaultValue = "java-19",
description = "The language version PMD should use when parsing source code.%nValid values: ${COMPLETION-CANDIDATES}",
completionCandidates = PmdLanguageVersionTypeSupport.class, converter = PmdLanguageVersionTypeSupport.class)
public void setLanguageVersion(final List<LanguageVersion> languageVersion) {
I don't think this option should have a default value. In pmd 7, #4049 removed the concept of a "default language", so there is no reason to single out java. The omission of this CLI option also means that all languages use their default version, not just java
public void setReportFile(final Path reportFile) {
this.reportFile = reportFile;
}
+ @Option(names = "--use-version",
description = "The language version PMD should use when parsing source code.%nValid values: ${COMPLETION-CANDIDATES}",
completionCandidates = PmdLanguageVersionTypeSupport.class, converter = PmdLanguageVersionTypeSupport.class)
public void setLanguageVersion(final List<LanguageVersion> languageVersion) {
|
codereview_new_java_data_4726
|
public TestDescriptor[] extractTestsFromXml(Rule rule) {
/**
* Extract a set of tests from an XML file. The file should be
* ./xml/RuleName.xml relative to the test class. The format is defined in
- * test-data.xsd.
*/
RuleTestCollection parseTestCollection(Rule rule) {
String testsFileName = getCleanRuleName(rule);
```suggestion
* rule-tests_1_0_0.xsd.
```
public TestDescriptor[] extractTestsFromXml(Rule rule) {
/**
* Extract a set of tests from an XML file. The file should be
* ./xml/RuleName.xml relative to the test class. The format is defined in
+ * rule-tests_1_0_0.xsd in pmd-test-schema.
*/
RuleTestCollection parseTestCollection(Rule rule) {
String testsFileName = getCleanRuleName(rule);
|
codereview_new_java_data_4727
|
* A pattern (for pattern matching constructs like {@link ASTInstanceOfExpression InstanceOfExpression}
* or within a {@link ASTSwitchLabel}). This is a JDK 16 feature.
*
- * <p>This interface will be implemented by all forms of patterns.
*
* <pre class="grammar">
*
```suggestion
* <p>This interface is implemented by all forms of patterns.
```
* A pattern (for pattern matching constructs like {@link ASTInstanceOfExpression InstanceOfExpression}
* or within a {@link ASTSwitchLabel}). This is a JDK 16 feature.
*
+ * <p>This interface is implemented by all forms of patterns.
*
* <pre class="grammar">
*
|
codereview_new_java_data_4728
|
import net.sourceforge.pmd.annotation.Experimental;
/**
- * A record pattern (JDK19). This can be found on
- * the right-hand side of an {@link ASTInstanceOfExpression InstanceOfExpression}.
*
* <pre class="grammar">
*
- * RecordPattern ::= {@linkplain ASTReferenceType ReferenceType} {@linkplain ASTRecordStructurePattern RecordStructurePattern} [ {@linkplain ASTVariableDeclaratorId} VariableDeclaratorId ]
*
* </pre>
*
```suggestion
* RecordPattern ::= {@linkplain ASTReferenceType ReferenceType} {@linkplain ASTRecordStructurePattern RecordStructurePattern} [ {@linkplain ASTVariableDeclaratorId VariableDeclaratorId} ]
```
import net.sourceforge.pmd.annotation.Experimental;
/**
+ * A record pattern (JDK19).
*
* <pre class="grammar">
*
+ * RecordPattern ::= {@linkplain ASTReferenceType ReferenceType} {@linkplain ASTComponentPatternList ComponentPatternList} [ {@linkplain ASTVariableDeclaratorId VariableDeclaratorId} ]
*
* </pre>
*
|
codereview_new_java_data_4729
|
import net.sourceforge.pmd.annotation.Experimental;
/**
- * A record pattern (JDK19). This can be found on
- * the right-hand side of an {@link ASTInstanceOfExpression InstanceOfExpression}.
*
* <pre class="grammar">
*
- * RecordPattern ::= {@linkplain ASTReferenceType ReferenceType} {@linkplain ASTRecordStructurePattern RecordStructurePattern} [ {@linkplain ASTVariableDeclaratorId} VariableDeclaratorId ]
*
* </pre>
*
```suggestion
* A record pattern (JDK19).
```
This sentence used to be relevant when patterns were very new, but now there are more and more places where patterns can be found
import net.sourceforge.pmd.annotation.Experimental;
/**
+ * A record pattern (JDK19).
*
* <pre class="grammar">
*
+ * RecordPattern ::= {@linkplain ASTReferenceType ReferenceType} {@linkplain ASTComponentPatternList ComponentPatternList} [ {@linkplain ASTVariableDeclaratorId VariableDeclaratorId} ]
*
* </pre>
*
|
codereview_new_java_data_4730
|
public void testRelativeReportFileLongOption() throws IOException {
@Test
public void debugLogging() {
- Path reportFile = tempRoot().resolve("out/reportFile.txt");
- runPmdSuccessfully("--debug", "--no-cache", "--dir", srcDir, "--rulesets", DUMMY_RULESET, "-r", reportFile);
assertThat(errStreamCaptor.getLog(), containsString("[main] INFO net.sourceforge.pmd.PMD - Log level is at TRACE"));
}
@Test
public void defaultLogging() {
- Path reportFile = tempRoot().resolve("out/reportFile.txt");
- runPmdSuccessfully("--no-cache", "--dir", srcDir, "--rulesets", DUMMY_RULESET, "-r", reportFile);
assertThat(errStreamCaptor.getLog(), containsString("[main] INFO net.sourceforge.pmd.PMD - Log level is at INFO"));
}
@Test
public void testDeprecatedRulesetSyntaxOnCommandLine() {
- Path reportFile = tempRoot().resolve("out/reportFile.txt");
- runPmd(StatusCode.VIOLATIONS_FOUND, "--no-cache", "--dir", srcDir, "--rulesets", "dummy-basic", "-r", reportFile);
MatcherAssert.assertThat(errStreamCaptor.getLog(), containsString("Ruleset reference 'dummy-basic' uses a deprecated form, use 'rulesets/dummy/basic.xml' instead"));
}
I've added a report output file as otherwise we report to stdout.
And we close the report files afterwards - which is why we close stdout.
We have a outStreamCaptor active, however, the close is delegated to the original stdout stream.
Closing stdout in a unit test is not good for IntelliJ IDEA, as IntelliJ IDEA uses this channel to communicate test progress etc...
public void testRelativeReportFileLongOption() throws IOException {
@Test
public void debugLogging() {
+ runPmdSuccessfully("--debug", "--no-cache", "--dir", srcDir, "--rulesets", DUMMY_RULESET);
assertThat(errStreamCaptor.getLog(), containsString("[main] INFO net.sourceforge.pmd.PMD - Log level is at TRACE"));
}
@Test
public void defaultLogging() {
+ runPmdSuccessfully("--no-cache", "--dir", srcDir, "--rulesets", DUMMY_RULESET);
assertThat(errStreamCaptor.getLog(), containsString("[main] INFO net.sourceforge.pmd.PMD - Log level is at INFO"));
}
@Test
public void testDeprecatedRulesetSyntaxOnCommandLine() {
+ runPmd(StatusCode.VIOLATIONS_FOUND, "--no-cache", "--dir", srcDir, "--rulesets", "dummy-basic");
MatcherAssert.assertThat(errStreamCaptor.getLog(), containsString("Ruleset reference 'dummy-basic' uses a deprecated form, use 'rulesets/dummy/basic.xml' instead"));
}
|
codereview_new_java_data_4731
|
import net.sourceforge.pmd.cpd.Match;
public interface CPDRenderer {
void render(Iterator<Match> matches, Writer writer) throws IOException;
}
Add this additional method to the interface will break any custom renderers that implement CPDRenderer...
Should we maybe introduce a new interface, like "CPDRenderer2"?
With this change, the new method seems to be the one, that PMD is calling almost exclusively, which means, we should deprecate the other method - as it is not used directly.
But I think, I like the idea with an additional interface better - this new interface can then be implemented additionally by XMLRenderer and all other renderer can stay unchanged.
import net.sourceforge.pmd.cpd.Match;
+/**
+ * @deprecated Use {@link CPDReportRenderer}
+ */
+@Deprecated
public interface CPDRenderer {
void render(Iterator<Match> matches, Writer writer) throws IOException;
}
|
codereview_new_java_data_4732
|
import net.sourceforge.pmd.cpd.Match;
public interface CPDRenderer {
void render(Iterator<Match> matches, Writer writer) throws IOException;
}
```suggestion
void render(Map<String, Integer> numberOfTokensPerFile, Iterator<Match> matches, Writer writer) throws IOException;
```
import net.sourceforge.pmd.cpd.Match;
+/**
+ * @deprecated Use {@link CPDReportRenderer}
+ */
+@Deprecated
public interface CPDRenderer {
void render(Iterator<Match> matches, Writer writer) throws IOException;
}
|
codereview_new_java_data_4733
|
-/*
- * BSD-style license; for more info see http://pmd.sourceforge.net/license.html
- */
-
-/**
- * The class {@link net.sourceforge.pmd.lang.gherkin.antlr4.GherkinLexer} will be moved to
- * package {@code net.sourceforge.pmd.lang.gherkin.ast} with PMD 7.
- *
- * <p>All other classes will be removed.
- */
-package net.sourceforge.pmd.lang.gherkin.antlr4;
Since this is a new module, we should probably create it in the correct package right away. We didn't do this for the other modules, because they are not new and moving classes potentially breaks clients. But for Gherkin there are no clients that could break yet.
|
codereview_new_java_data_4734
|
import java.util.Properties;
import org.junit.Test;
-import org.junit.rules.ExpectedException;
import net.sourceforge.pmd.cpd.test.CpdTextComparisonTest;
public class GherkinTokenizerTest extends CpdTextComparisonTest {
-
- @org.junit.Rule
- public ExpectedException ex = ExpectedException.none();
-
public GherkinTokenizerTest() {
super(".feature");
}
This is not needed and can be removed
import java.util.Properties;
import org.junit.Test;
import net.sourceforge.pmd.cpd.test.CpdTextComparisonTest;
public class GherkinTokenizerTest extends CpdTextComparisonTest {
public GherkinTokenizerTest() {
super(".feature");
}
|
codereview_new_java_data_4888
|
public class DataFlowClientAutoConfiguration {
@Autowired
private @Nullable OAuth2ClientProperties oauth2ClientProperties;
- public DataFlowClientAutoConfiguration() {
- }
-
- public DataFlowClientAutoConfiguration(RestTemplate restTemplate) {
this.restTemplate = restTemplate;
}
I am not a fan of mixing constructor and field injection. Wdyt about moving it all to CI such as:
```java
private DataFlowClientProperties properties;
private RestTemplate restTemplate;
private ClientRegistrationRepository clientRegistrations;
private OAuth2AccessTokenResponseClient<OAuth2ClientCredentialsGrantRequest> clientCredentialsTokenResponseClient;
private OAuth2ClientProperties oauth2ClientProperties;
public DataFlowClientAutoConfiguration(
DataFlowClientProperties properties,
@Nullable RestTemplate restTemplate,
@Nullable ClientRegistrationRepository clientRegistrations,
@Nullable OAuth2AccessTokenResponseClient<OAuth2ClientCredentialsGrantRequest> clientCredentialsTokenResponseClient,
@Nullable OAuth2ClientProperties oauth2ClientProperties) {
this.properties = properties;
this.restTemplate = restTemplate;
this.clientRegistrations = clientRegistrations;
this.clientCredentialsTokenResponseClient = clientCredentialsTokenResponseClient;
this.oauth2ClientProperties = oauth2ClientProperties;
}
```
public class DataFlowClientAutoConfiguration {
@Autowired
private @Nullable OAuth2ClientProperties oauth2ClientProperties;
+ public DataFlowClientAutoConfiguration(@Nullable RestTemplate restTemplate) {
this.restTemplate = restTemplate;
}
|
codereview_new_java_data_4889
|
public void testComposedTaskAppArguments() {
@Test
public void testAssignmentOfOauth2ClientCredentialsClientAuthenticationMethod(){
this.contextRunner
- .withInitializer(context -> {
- Map<String, Object> map = new HashMap<>();
- map.put("oauth2ClientCredentialsClientAuthenticationMethod", "POST");
- context.getEnvironment().getPropertySources().addLast(new SystemEnvironmentPropertySource(
- StandardEnvironment.SYSTEM_ENVIRONMENT_PROPERTY_SOURCE_NAME, map));
- })
.withUserConfiguration(Config1.class).run((context) -> {
ComposedTaskProperties properties = context.getBean(ComposedTaskProperties.class);
assertThat(properties.getOauth2ClientCredentialsClientAuthenticationMethod())
I am pretty sure you copied the intiializer/properties part from the test above but a couple of points...
* I think we can instead simply use `ApplicationContextRunner.withSystemProperties`
However, if we did continue using the initializer approach:
* I don't think we would need to use `SystemEnvironmentPropertySource` and instead could use a simple `MapPropertySource`
* I am not sure why the `addLast` method is used rather than `addFirst` as we want these props to take precedence (unless the `SystemEnvironmentPropertySource` has a higher built-in order/priority.
public void testComposedTaskAppArguments() {
@Test
public void testAssignmentOfOauth2ClientCredentialsClientAuthenticationMethod(){
this.contextRunner
+ .withSystemProperties("OAUTH2_CLIENT_CREDENTIALS_CLIENT_AUTHENTICATION_METHOD=POST")
.withUserConfiguration(Config1.class).run((context) -> {
ComposedTaskProperties properties = context.getBean(ComposedTaskProperties.class);
assertThat(properties.getOauth2ClientCredentialsClientAuthenticationMethod())
|
codereview_new_java_data_5150
|
package net.fabricmc.filament.task.base;
-import javax.inject.Inject;
-
import org.gradle.api.file.RegularFileProperty;
import org.gradle.api.tasks.OutputFile;
public abstract class FileOutputTask extends FilamentTask {
@OutputFile
public abstract RegularFileProperty getOutputFile();
-
- @Inject
- public FileOutputTask() {
- getOutputFile().finalizeValueOnRead();
- }
}
Are these really needed for tasks? I can't imagine anything mutating them *during* task execution
package net.fabricmc.filament.task.base;
import org.gradle.api.file.RegularFileProperty;
import org.gradle.api.tasks.OutputFile;
public abstract class FileOutputTask extends FilamentTask {
@OutputFile
public abstract RegularFileProperty getOutputFile();
}
|
codereview_new_java_data_5151
|
public TTransport request(FContext context, byte[] payload) throws TTransportExc
byte[] response = makeRequest(context, payload);
TTransport responseTransport = null;
- if(response != null) {
TConfiguration responseConfig =
TConfigurationBuilder.custom().setMaxMessageSize(responseSizeLimit).build();
responseTransport = new TMemoryInputTransport(responseConfig, response);
```suggestion
if (response != null) {
```
public TTransport request(FContext context, byte[] payload) throws TTransportExc
byte[] response = makeRequest(context, payload);
TTransport responseTransport = null;
+ if (response != null) {
TConfiguration responseConfig =
TConfigurationBuilder.custom().setMaxMessageSize(responseSizeLimit).build();
responseTransport = new TMemoryInputTransport(responseConfig, response);
|
codereview_new_java_data_5152
|
import com.workiva.frugal.transport.monitor.FTransportMonitor;
import com.workiva.frugal.transport.monitor.MonitorRunner;
-import org.apache.thrift.TConfiguration;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;
import org.slf4j.Logger;
Is this import still needed?
import com.workiva.frugal.transport.monitor.FTransportMonitor;
import com.workiva.frugal.transport.monitor.MonitorRunner;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;
import org.slf4j.Logger;
|
codereview_new_java_data_6134
|
@AttributeDefinition(name = "Modbus-ID", description = "ID of Modbus bridge.")
String modbus_id() default "modbus0";
- @AttributeDefinition(name = "Modbus Unit-ID", description = "The Unit-ID of the Modbus device.")
- int modbusUnitId() default 3;
@AttributeDefinition(name = "Modbus target filter", description = "This is auto-generated by 'Modbus-ID'.")
String Modbus_target() default "(enabled=true)";
We should set a default value of 1 to stay backwards compatible with Modbus/TCP. Maybe add a hint here or in the readme that it has to be set explicitely for Modbus/RTU connections.
@AttributeDefinition(name = "Modbus-ID", description = "ID of Modbus bridge.")
String modbus_id() default "modbus0";
+ @AttributeDefinition(name = "Modbus Unit-ID", description = "The Unit-ID of the Modbus device. If you do not know " +
+ "the ID and the default ID of 1 does not work, try 3.")
+ int modbusUnitId() default 1;
@AttributeDefinition(name = "Modbus target filter", description = "This is auto-generated by 'Modbus-ID'.")
String Modbus_target() default "(enabled=true)";
|
codereview_new_java_data_6135
|
public class KacoBlueplanet extends AbstractSunSpecPvInverter implements SunSpec
// .put(DefaultSunSpecModel.S_160, Priority.LOW) // from 40792
// .put(SunSpecModel.S_64204, Priority.LOW) // from 40842
- // We had a Kaco nx3 for testing, which had a modbus ID of 3. A static modbus ID of 1 worked for TCP (no idea why),
- // but not for RTU. To get this plugin working with the Kaco nx3 on a RTU connection, the modbus ID must be configurable.
- //private static final int UNIT_ID = 1;
private static final int READ_FROM_MODBUS_BLOCK = 1;
@Reference
Once this PR is finished, these comments are not anymore required.
public class KacoBlueplanet extends AbstractSunSpecPvInverter implements SunSpec
// .put(DefaultSunSpecModel.S_160, Priority.LOW) // from 40792
// .put(SunSpecModel.S_64204, Priority.LOW) // from 40842
private static final int READ_FROM_MODBUS_BLOCK = 1;
@Reference
|
codereview_new_java_data_6531
|
boolean shouldRetryOperation(ResourceException ex, int retryCount) {
switch (ex.getCode()) {
case ResourceException.CONFLICT:
- if (!ex.getMessage().contains("already has a pending request in a different state")) {
- retry = true;
- }
break;
case ResourceException.GONE:
we should not make our logic based on message content. it is possible that the customer might write their own datastore plugin and may not be aware that they must return something specific in the error message. instead of returning CONFLICT, we should just return BAD_REQUEST and the error message can indicate what we already have.
boolean shouldRetryOperation(ResourceException ex, int retryCount) {
switch (ex.getCode()) {
case ResourceException.CONFLICT:
+
+ retry = true;
break;
case ResourceException.GONE:
|
codereview_new_java_data_6534
|
package com.yahoo.athenz.zms.purge;
import org.apache.commons.lang3.EnumUtils;
we need to add copyright notice
+/*
+ *
+ * * Copyright The Athenz Authors
+ * *
+ * * Licensed under the Apache License, Version 2.0 (the "License");
+ * * you may not use this file except in compliance with the License.
+ * * You may obtain a copy of the License at
+ * *
+ * * http://www.apache.org/licenses/LICENSE-2.0
+ * *
+ * * Unless required by applicable law or agreed to in writing, software
+ * * distributed under the License is distributed on an "AS IS" BASIS,
+ * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * * See the License for the specific language governing permissions and
+ * * limitations under the License.
+ *
+ */
+
package com.yahoo.athenz.zms.purge;
import org.apache.commons.lang3.EnumUtils;
|
codereview_new_java_data_6535
|
package com.yahoo.athenz.zms;
import com.yahoo.rdl.Timestamp;
we need to add copyright notice
+/*
+ * Copyright The Athenz Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package com.yahoo.athenz.zms;
import com.yahoo.rdl.Timestamp;
|
codereview_new_java_data_6564
|
public static Compressor<ByteBuf> newInstance(String name, DriverContext context
}
}
- @TargetClass(value = Lz4Compressor.class, onlyWith = Lz4Missing.class)
- public static final class DeleteLz4Compressor {}
-
- @TargetClass(value = SnappyCompressor.class)
- public static final class DeleteSnappyCompressor {}
-
public static class Lz4Present implements BooleanSupplier {
@Override
public boolean getAsBoolean() {
The original PR deletes the whole class, not just the annotation. What is the difference?
public static Compressor<ByteBuf> newInstance(String name, DriverContext context
}
}
public static class Lz4Present implements BooleanSupplier {
@Override
public boolean getAsBoolean() {
|
codereview_new_java_data_6565
|
*/
@SdkPublicApi
public interface AwsCredentials extends AwsCredentialsIdentity {
-
- @Override
- String accessKeyId();
-
- @Override
- String secretAccessKey();
}
These methods are already defined in `AwsCredentialsIdentity` so thought of removing them from here, but that caused failures from `japicmp`. LMK if it's possible to override that / desired to remove, or just better to leave them here still?
*/
@SdkPublicApi
public interface AwsCredentials extends AwsCredentialsIdentity {
}
|
codereview_new_java_data_6566
|
import software.amazon.awssdk.utils.builder.SdkBuilder;
/**
- * A request to resolve an Identity.
*
* The Identity may be determined for each request based on properties of the request (e.g. different credentials per bucket
* for S3).
Nit: Maybe we can add a `link` here or add Identity to `see` below
import software.amazon.awssdk.utils.builder.SdkBuilder;
/**
+ * A request to resolve an {@link Identity}.
*
* The Identity may be determined for each request based on properties of the request (e.g. different credentials per bucket
* for S3).
|
codereview_new_java_data_6567
|
public void preprocess(ServiceModel serviceModel) {
return;
}
Map<String, Shape> shapes = serviceModel.getShapes();
- for (String shapeName: customSdkShapes.getShapes().keySet()) {
- customSdkShapes.getShape(shapeName).setSynthetic(true);
- shapes.put(shapeName, customSdkShapes.getShape(shapeName));
- }
serviceModel.setShapes(shapes);
}
Can we use foreach ?
```java
customSdkShapes.getShapes().forEach((shapeName, shape) -> {
shape.setSynthetic(true);
shapes.put(shapeName, shape);
});
```
public void preprocess(ServiceModel serviceModel) {
return;
}
Map<String, Shape> shapes = serviceModel.getShapes();
+ customSdkShapes.getShapes().forEach((shapeName, shape) -> {
+ shape.setSynthetic(true);
+ shapes.put(shapeName, shape);
+ });
serviceModel.setShapes(shapes);
}
|
codereview_new_java_data_6568
|
public static HttpProxyOptions buildProxyOptions(ProxyConfiguration proxyConfigu
return clientProxyOptions;
}
- public static HttpMonitoringOptions revolveHttpMonitoringOptions(ConnectionHealthConfiguration config) {
if (config == null) {
return null;
}
Nit: there's an old misspelling here, `revolve` should be `resolve`. I think.
public static HttpProxyOptions buildProxyOptions(ProxyConfiguration proxyConfigu
return clientProxyOptions;
}
+ public static HttpMonitoringOptions resolveHttpMonitoringOptions(ConnectionHealthConfiguration config) {
if (config == null) {
return null;
}
|
codereview_new_java_data_6569
|
public static Ec2MetadataEndpointProvider instance() {
* Resolve the endpoint to be used for the {@link DefaultEc2MetadataClient} client. Users may manually provide an endpoint
* through the {@code AWS_EC2_METADATA_SERVICE_ENDPOINT} environment variable or the {@code ec2_metadata_service_endpoint}
* key in their aws config file.
- * If an endpoint is specified is this manner, use it. If no value are provide, the defaults to:
* <ol>
* <li>If endpoint mode is set to IPv4: {@code "http://169.254.169.254"}</li>
* <li>If endpoint mode is set to IPv6: {@code "http://[fd00:ec2::254]"}</li>
Singular vs plural - value are
public static Ec2MetadataEndpointProvider instance() {
* Resolve the endpoint to be used for the {@link DefaultEc2MetadataClient} client. Users may manually provide an endpoint
* through the {@code AWS_EC2_METADATA_SERVICE_ENDPOINT} environment variable or the {@code ec2_metadata_service_endpoint}
* key in their aws config file.
+ * If an endpoint is specified is this manner, use it. If no values are provided, the defaults to:
* <ol>
* <li>If endpoint mode is set to IPv4: {@code "http://169.254.169.254"}</li>
* <li>If endpoint mode is set to IPv6: {@code "http://[fd00:ec2::254]"}</li>
|
codereview_new_java_data_6570
|
import software.amazon.awssdk.utils.Validate;
/**
- * The class is used for response handling and parsing the metadata fetched by the get call in the {@link Ec2MetadataClient}
- * interface.
- * The class provides convenience methods to the users to parse the metadata as a String, List and Document (json).
*/
@SdkPublicApi
public final class Ec2MetadataResponse {
What do we mean by "Document (json)"?
import software.amazon.awssdk.utils.Validate;
/**
+ * This class is used for response handling and parsing the metadata fetched by the get call in the {@link Ec2MetadataClient}
+ * interface. It provides convenience methods to the users to parse the metadata as a String and List. Also provides
+ * ways to parse the metadata as Document type if it is in the json format.
*/
@SdkPublicApi
public final class Ec2MetadataResponse {
|
codereview_new_java_data_6571
|
public interface Builder extends CopyableBuilder<Ec2MetadataRetryPolicy.Builder,
}
- public static final class BuilderImpl implements Builder {
private Integer numRetries;
private BackoffStrategy backoffStrategy;
- public BuilderImpl() {
}
@Override
Hmm, why do we need it to be public?
public interface Builder extends CopyableBuilder<Ec2MetadataRetryPolicy.Builder,
}
+ private static final class BuilderImpl implements Builder {
private Integer numRetries;
private BackoffStrategy backoffStrategy;
+ private BuilderImpl() {
}
@Override
|
codereview_new_java_data_6572
|
public final class DefaultEc2MetadataClient extends BaseEc2MetadataClient implem
private DefaultEc2MetadataClient(Ec2MetadataBuilder builder) {
super(builder);
- // http client
Validate.isTrue(builder.httpClient == null || builder.httpClientBuilder == null,
"The httpClient and the httpClientBuilder can't both be configured.");
this.httpClient = Either
nit : do we need this comment?
public final class DefaultEc2MetadataClient extends BaseEc2MetadataClient implem
private DefaultEc2MetadataClient(Ec2MetadataBuilder builder) {
super(builder);
Validate.isTrue(builder.httpClient == null || builder.httpClientBuilder == null,
"The httpClient and the httpClientBuilder can't both be configured.");
this.httpClient = Either
|
codereview_new_java_data_6573
|
public interface Builder extends CopyableBuilder<Builder, S3Configuration> {
/**
* The supplier of profile file instances that should be consulted to determine the default value of
* {@link #useArnRegionEnabled(Boolean)} or {@link #multiRegionEnabled(Boolean)}.
- * This is not used, if those parameters are configured.
*
* <p>
* By default, the {@link ProfileFile#defaultProfileFile()} is used.
nit: "configured on the builder"?
public interface Builder extends CopyableBuilder<Builder, S3Configuration> {
/**
* The supplier of profile file instances that should be consulted to determine the default value of
* {@link #useArnRegionEnabled(Boolean)} or {@link #multiRegionEnabled(Boolean)}.
+ * This is not used, if those parameters are configured on the builder.
*
* <p>
* By default, the {@link ProfileFile#defaultProfileFile()} is used.
|
codereview_new_java_data_6574
|
private void uploadOnce(List<Double> latencies) throws Exception {
transferManager.uploadDirectory(b -> b.bucket(bucket)
.s3Prefix(config.prefix())
.source(uploadPath));
- CompletedDirectoryUpload completedDirectoryUpload = upload.completionFuture().get(60, TimeUnit.MINUTES);
if (completedDirectoryUpload.failedTransfers().isEmpty()) {
long end = System.currentTimeMillis();
latencies.add((end - start) / 1000.0);
minor, can we make this configurable?
private void uploadOnce(List<Double> latencies) throws Exception {
transferManager.uploadDirectory(b -> b.bucket(bucket)
.s3Prefix(config.prefix())
.source(uploadPath));
+ CompletedDirectoryUpload completedDirectoryUpload = upload.completionFuture().get(timeout, TimeUnit.MINUTES);
if (completedDirectoryUpload.failedTransfers().isEmpty()) {
long end = System.currentTimeMillis();
latencies.add((end - start) / 1000.0);
|
codereview_new_java_data_6575
|
public interface FileUpload extends ObjectTransfer {
* See {@link ResumableFileUpload} for supported formats.
*
* <p>
- * Currently, it's only supported if the underlying {@link S3AsyncClient} is CRT-based created via
- * {@link S3AsyncClient#crtBuilder()} or {@link S3AsyncClient#crtCreate()}.
* It will throw {@link UnsupportedOperationException} if the {@link S3TransferManager} is created
- * with a non CRT-based S3 client, i.e., created by {@link S3AsyncClient#builder()}.
*
* @return A {@link ResumableFileUpload} that can be used to resume the upload.
*/
...if the underlying {@link S3AsyncClient} is CRT-based created via {@link S3AsyncClient#crtBuilder()} or {@link S3AsyncClient#crtCreate()}
Right now it reads as if there may be more methods than these two that can create an CRT based client so you should only use those, but I am assuming that you have to use a CRT based client and that linking the methods is a helpful hint.
Maybe parentheses around methods? "...if the underlying {@link S3AsyncClient} is CRT-based (created via {@link S3AsyncClient#crtBuilder()} or {@link S3AsyncClient#crtCreate()})"? Or just use i.e. as below.
public interface FileUpload extends ObjectTransfer {
* See {@link ResumableFileUpload} for supported formats.
*
* <p>
+ * Currently, it's only supported if the underlying {@link S3AsyncClient} is CRT-based (created via
+ * {@link S3AsyncClient#crtBuilder()} or {@link S3AsyncClient#crtCreate()}).
* It will throw {@link UnsupportedOperationException} if the {@link S3TransferManager} is created
+ * with a non CRT-based S3 client (created via {@link S3AsyncClient#builder()}).
*
* @return A {@link ResumableFileUpload} that can be used to resume the upload.
*/
|
codereview_new_java_data_6576
|
public ResumableFileUpload pause() {
private ResumableFileUpload doPause() {
if (clientType != S3ClientType.CRT_BASED) {
- throw new UnsupportedOperationException("Pausing upload is not supported in non CRT-based S3 Client. Consider "
- + "passing a CRT-based S3 client to S3TransferManager instead: "
+ "S3AsyncClient.crtBuilder().build()");
}
Do we have to direct users to CRT or can we say
"Pausing upload is not supported in a non CRT-based S3 Client. For upload pause support, pass a CRT-based S3 client to S3TransferManager instead: [...]"
public ResumableFileUpload pause() {
private ResumableFileUpload doPause() {
if (clientType != S3ClientType.CRT_BASED) {
+ throw new UnsupportedOperationException("Pausing an upload is not supported in a non CRT-based S3 Client. For "
+ + "upload pause support, pass a CRT-based S3 client to S3TransferManage instead: "
+ "S3AsyncClient.crtBuilder().build()");
}
|
codereview_new_java_data_6577
|
public ResumableFileUpload pause() {
private ResumableFileUpload doPause() {
if (clientType != S3ClientType.CRT_BASED) {
throw new UnsupportedOperationException("Pausing an upload is not supported in a non CRT-based S3 Client. For "
- + "upload pause support, pass a CRT-based S3 client to S3TransferManage "
+ "instead: S3AsyncClient.crtBuilder().build();");
}
Nit: missing the `r` in TransferManager on line 65.
public ResumableFileUpload pause() {
private ResumableFileUpload doPause() {
if (clientType != S3ClientType.CRT_BASED) {
throw new UnsupportedOperationException("Pausing an upload is not supported in a non CRT-based S3 Client. For "
+ + "upload pause support, pass a CRT-based S3 client to S3TransferManager "
+ "instead: S3AsyncClient.crtBuilder().build();");
}
|
codereview_new_java_data_6578
|
private static String getMessageForTooManyAcquireOperationsError() {
}
public static String closedChannelMessage(Channel channel) {
- ChannelDiagnostics channelDiagnostics, parentChannelDiagnostics;
if (channel != null) {
channelDiagnostics = channel.attr(CHANNEL_DIAGNOSTICS).get();
parentChannelDiagnostics = channel.parent() != null ? channel.parent().attr(CHANNEL_DIAGNOSTICS).get() : null;
Looks like compilation failed.
```
ERROR] Failed to execute goal org.apache.maven.plugins:maven-compiler-plugin:3.8.1:compile (default-compile) on project netty-nio-client: Compilation failure: Compilation failure:
NettyUtils.java:[148,12] error: variable channelDiagnostics might not have been initialized
NettyUtils.java:[151,16] error: variable parentChannelDiagnostics might not have been initialized
```
private static String getMessageForTooManyAcquireOperationsError() {
}
public static String closedChannelMessage(Channel channel) {
+ ChannelDiagnostics channelDiagnostics = null
+ ChannelDiagnostics parentChannelDiagnostics = null;
if (channel != null) {
channelDiagnostics = channel.attr(CHANNEL_DIAGNOSTICS).get();
parentChannelDiagnostics = channel.parent() != null ? channel.parent().attr(CHANNEL_DIAGNOSTICS).get() : null;
|
codereview_new_java_data_6579
|
import software.amazon.awssdk.annotations.SdkProtectedApi;
@SdkProtectedApi
public final class RequiredTrait implements Trait {
Could you add a small Javadoc comment to indicate this Trait purpose?
import software.amazon.awssdk.annotations.SdkProtectedApi;
+/**
+ * Trait that indicates a value must be provided for a member.
+ */
@SdkProtectedApi
public final class RequiredTrait implements Trait {
|
codereview_new_java_data_6580
|
public final class QueryParamMarshaller {
};
public static final JsonMarshaller<Void> NULL = (val, context, paramName, sdkField) -> {
- if (context.protocolHandler().isTraitValidationEnabled(RequiredTrait.class)
- && sdkField.containsTrait(RequiredTrait.class)) {
throw new IllegalArgumentException(String.format("Parameter '%s' must not be null", paramName));
}
};
Hmm, can we simplify this and only attach the RequiredTrait to protocols for which we have it enabled?
That way we don't need an explicit isTraitValidationEnabled check at runtime. If the RequiredTrait is there, we do it. Otherwise, we don't.
public final class QueryParamMarshaller {
};
public static final JsonMarshaller<Void> NULL = (val, context, paramName, sdkField) -> {
+ if (sdkField.containsTrait(RequiredTrait.class)) {
throw new IllegalArgumentException(String.format("Parameter '%s' must not be null", paramName));
}
};
|
codereview_new_java_data_6581
|
public boolean hasEvent() {
* @param <T> Type of metadata being requested.
* @return The value of the additional metadata being requested or null if it's not present.
*/
- public <T> T additionalMetadata(OperationMetadataAttribute<T> key) {
return additionalMetadata.get(key);
}
Unfortunately since this is a protected API, we shouldn't fix the spelling without deprecating the 'wrong' method name and creating a new one. We support people using newer core library versions with older client versions, and this could cause them to break by updating a core library.
We might be able to get away with fixing this when we do a minor version bump, but we'd probably want to lump it in with other changes to justify the annoyance to customers.
public boolean hasEvent() {
* @param <T> Type of metadata being requested.
* @return The value of the additional metadata being requested or null if it's not present.
*/
+ public <T> T addtionalMetadata(OperationMetadataAttribute<T> key) {
return additionalMetadata.get(key);
}
|
codereview_new_java_data_6582
|
public String toString() {
* Method to return the number of retries allowed.
* @return The number of retries allowed.
*/
- public int getNumRetries() {
return numRetries;
}
/**
* Method to return the BackoffStrategy used.
* @return The backoff Strategy used.
*/
- public BackoffStrategy getBackoffStrategy() {
return backoffStrategy;
}
We should use fluent getters.`numRetries`
public String toString() {
* Method to return the number of retries allowed.
* @return The number of retries allowed.
*/
+ public int numRetries() {
return numRetries;
}
/**
* Method to return the BackoffStrategy used.
* @return The backoff Strategy used.
*/
+ public BackoffStrategy backoffStrategy() {
return backoffStrategy;
}
|
codereview_new_java_data_6583
|
public final class ChannelAttributeKey {
"aws.http.nio.netty.async.channelDiagnostics");
/**
- * {@link AttributeKey} to keep track of whether we have received the {@link LastHttpContent}.
*/
- public static final AttributeKey<Boolean> LAST_HTTP_CONTENT_RECEIVED_KEY = NettyUtils.getOrCreateAttributeKey(
- "aws.http.nio.netty.async.lastHttpContentReceived");
/**
* {@link AttributeKey} to keep track of whether we should close the connection after this request
Should we rename it to `STREAMING_COMPLETE_KEY`? Alternatively, we can update the description of this field to make it clear that it indicates streaming has finished
public final class ChannelAttributeKey {
"aws.http.nio.netty.async.channelDiagnostics");
/**
+ * {@link AttributeKey} to keep track of whether the streaming is completed and this is set to true when we receive the *
+ * {@link LastHttpContent}.
*/
+ public static final AttributeKey<Boolean> STREAMING_COMPLETE_KEY = NettyUtils.getOrCreateAttributeKey(
+ "aws.http.nio.netty.async.streamingComplete");
/**
* {@link AttributeKey} to keep track of whether we should close the connection after this request
|
codereview_new_java_data_6584
|
public void notifyError(Exception exception) {
}
public void deliverData(ByteBuffer byteBuffer) {
- log.trace(() -> "received data of size: " + byteBuffer.remaining());
// If the subscription is cancelled, no op
if (isDone) {
return;
Will it be helpful to log this scenario when deliverData is called on a cancelled subscription ?
public void notifyError(Exception exception) {
}
public void deliverData(ByteBuffer byteBuffer) {
+ log.trace(() -> "Received data: " + byteBuffer);
// If the subscription is cancelled, no op
if (isDone) {
return;
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.