id
stringlengths 22
25
| commit_message
stringlengths 137
6.96k
| diffs
listlengths 0
63
|
|---|---|---|
derby-DERBY-5488-ff025760
|
DERBY-5488: Make setObject( int, BigInteger ) rely on the existing setObject( int, BigDecimal ) logic.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1197172 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/jdbc/EmbedPreparedStatement20.java",
"hunks": [
{
"added": [
"import java.math.BigInteger;"
],
"header": "@@ -32,6 +32,7 @@ import org.apache.derby.iapi.reference.SQLState;",
"removed": []
}
]
}
] |
derby-DERBY-5489-bde7b3b8
|
DERBY-5489: getBinary() returns incorrect data after getObject() call on BLOB column
o Added missing check to getObject on LOB columns in the client driver.
o Added new checks to getBytes/getString for LOB columns in both drivers,
which stands out from the rest of the getters because invoking them multiple
times on a LOB column is allowed (this is an exception to the rule).
o Added two new tests which verifies that the data return when invoking the
mentioned getters multiple times returns correct data, including when you
first invoke getBytes/getString and then invoke one of the other valid
getters for LOB columns. Some refactoring of LobRsGetterTest.
o Adjusted access pattern in UpdatableResultSetTest to comply with the rules.
At a later time it may be possible to lift this restriction on LOB columns.
Patch file: derby-5489-2b-fixes.diff
--This line, and tose below, will be ignored--
M java/testing/org/apache/derbyTesting/functionTests/tests/jdbcapi/LobRsGetterTest.java
M java/testing/org/apache/derbyTesting/functionTests/tests/jdbcapi/_Suite.java
M java/testing/org/apache/derbyTesting/functionTests/tests/lang/UpdatableResultSetTest.java
M java/client/org/apache/derby/client/am/ResultSet.java
M java/engine/org/apache/derby/impl/jdbc/EmbedResultSet.java
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1330681 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/am/ResultSet.java",
"hunks": [
{
"added": [
" int type = resultSetMetaData_.types_[column - 1];",
" if (type == Types.BLOB || type == Types.CLOB) {",
" checkLOBMultiCall(column);",
" // If the above didn't fail, this is the first getter",
" // invocation, or only getBytes and/or getString have been",
" // invoked previously. The special treatment of these getters",
" // is allowed for backwards compatibility.",
" }"
],
"header": "@@ -1050,11 +1050,18 @@ public abstract class ResultSet implements java.sql.ResultSet,",
"removed": [
""
]
},
{
"added": [
" int type = resultSetMetaData_.types_[column - 1];",
" if (type == Types.BLOB) {",
" checkLOBMultiCall(column);",
" // If the above didn't fail, this is the first getter",
" // invocation, or only getBytes has been invoked previously.",
" // The special treatment of this getter is allowed for",
" // backwards compatibility.",
" }"
],
"header": "@@ -1083,6 +1090,14 @@ public abstract class ResultSet implements java.sql.ResultSet,",
"removed": []
},
{
"added": [
" int type = resultSetMetaData_.types_[column - 1];",
" if (type == Types.BLOB || type == Types.CLOB) {",
" useStreamOrLOB(column);",
" }"
],
"header": "@@ -1354,6 +1369,10 @@ public abstract class ResultSet implements java.sql.ResultSet,",
"removed": []
},
{
"added": [
" checkLOBMultiCall(columnIndex);",
" columnUsedFlags_[columnIndex - 1] = true;",
" }",
"",
" /**",
" * Checks if a stream or a LOB object has already been created for the",
" * specified LOB column.",
" * <p>",
" * Accessing a LOB column more than once is not forbidden by the JDBC",
" * specification, but the Java API states that for maximum portability,",
" * result set columns within each row should be read in left-to-right order,",
" * and each column should be read only once. The restriction was implemented",
" * in Derby due to complexities with the positioning of store streams when",
" * the user was given multiple handles to the stream.",
" *",
" * @param columnIndex 1-based index of the LOB column",
" * @throws SqlException if the column has already been accessed",
" */",
" private void checkLOBMultiCall(int columnIndex)",
" throws SqlException {"
],
"header": "@@ -5481,6 +5500,26 @@ public abstract class ResultSet implements java.sql.ResultSet,",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/jdbc/EmbedResultSet.java",
"hunks": [
{
"added": [
" int columnType = getColumnType(columnIndex);",
" if (columnType == Types.BLOB || columnType == Types.CLOB) {",
" checkLOBMultiCall(columnIndex);",
" // If the above didn't fail, this is the first getter invocation,",
" // or only getString and/or getBytes have been invoked previously.",
" // The special treatment of these getters is allowed for",
" // backwards compatibility.",
" }"
],
"header": "@@ -676,7 +676,14 @@ public abstract class EmbedResultSet extends ConnectionChild",
"removed": [
""
]
},
{
"added": [
" if (maxFieldSize > 0 && isMaxFieldSizeType(columnType))"
],
"header": "@@ -687,7 +694,7 @@ public abstract class EmbedResultSet extends ConnectionChild",
"removed": [
" if (maxFieldSize > 0 && isMaxFieldSizeType(getColumnType(columnIndex)))"
]
},
{
"added": [
" int columnType = getColumnType(columnIndex);",
" if (columnType == Types.BLOB) {",
" checkLOBMultiCall(columnIndex);",
" // If the above didn't fail, this is the first getter invocation,",
" // or only getBytes has been invoked previously. The special",
" // treatment of this getter is allowed for backwards compatibility.",
" }"
],
"header": "@@ -875,6 +882,13 @@ public abstract class EmbedResultSet extends ConnectionChild",
"removed": []
},
{
"added": [
" if (maxFieldSize > 0 && isMaxFieldSizeType(columnType))"
],
"header": "@@ -885,7 +899,7 @@ public abstract class EmbedResultSet extends ConnectionChild",
"removed": [
" if (maxFieldSize > 0 && isMaxFieldSizeType(getColumnType(columnIndex)))"
]
}
]
}
] |
derby-DERBY-5489-f6c58c8c
|
DERBY-5489: getBinary() returns incorrect data after getObject() call on BLOB column
Added a test for the expected behavior of valid getters invoked on LOB columns.
A restriction has been put in place to allow for only one getter invocation on
a given field, with the possible exception of getBytes and getString (iff
invoked as the first getter on the field).
Patch file: derby-5489-1b-test.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1329186 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5490-514ee1dc
|
DERBY-5490: Fix the spawning of network servers on the OpenJDK 7 preview on Mac OS X.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1197563 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5492-e8afaebf
|
DERBY-5492 Restrictive file permissions: permissions removed also for owner on NTFS if Acl does not contain explicit entry for owner
Patch derby-5492-2 which solves this issue plus make one other
adjustment, see item two below.
- Construct a new AclEntry for the owner with all rights, and removed
existing ones (NTFS). This should handle the error seen in Oracle's
regressions.
- For Solaris/ZFS and similar file systems which support both Posix
file attributes view and ACLs, don't touch the ACLs but stick to the
Posix flags.
For the latter my rationale is as follows: Principle of least
surprise: most users never touch the ACLs but use the more familiar
Posix file masks. It turned out the existing Derby implementation,
although protecting the file adequately, showed a "+" in the ls(1)
listing indicating that the settings could not be directly mapped
onto the Posix model. The reason was that we removed more permissions
than the plain read,write, and execute. Since ZFS internally builds
on ACLs, the ls(1) listing would should that Derby had been tinkering
with the non-mappable ACL permissions. I think it is better to stick
to the Posix permissions by default. If people are using ACL
functionality they are likely more than average concerned with
security and can run with default file permissions and take full
responsibility of the permissions fo created filed.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1199673 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/services/io/FileUtil.java",
"hunks": [
{
"added": [
"import java.util.Arrays;",
"import java.util.HashSet;",
"import java.util.Set;"
],
"header": "@@ -38,8 +38,10 @@ import org.apache.derby.io.StorageFile;",
"removed": [
"import java.util.Iterator;"
]
},
{
"added": [
" private static Class aclEntryPermissionClz;"
],
"header": "@@ -612,6 +614,7 @@ nextFile:\tfor (int i = 0; i < list.length; i++) {",
"removed": []
},
{
"added": [
" private static Method values;",
" private static Method setPermissions;",
" "
],
"header": "@@ -626,7 +629,9 @@ nextFile:\tfor (int i = 0; i < list.length; i++) {",
"removed": [
""
]
},
{
"added": [
" if (JVMInfo.JDK_ID >= JVMInfo.J2SE_17 &&"
],
"header": "@@ -660,7 +665,7 @@ nextFile:\tfor (int i = 0; i < list.length; i++) {",
"removed": [
" if (JVMInfo.JDK_ID >= JVMInfo.J2SE_17 && "
]
},
{
"added": [
" aclEntryPermissionClz = Class.forName(",
" \"java.nio.file.attribute.AclEntryPermission\");"
],
"header": "@@ -717,6 +722,8 @@ nextFile:\tfor (int i = 0; i < list.length; i++) {",
"removed": []
},
{
"added": [
" values = aclEntryPermissionClz.",
" getMethod(\"values\", (Class[]) null);",
" setPermissions = aclEntryBuilderClz.",
" getMethod(\"setPermissions\", new Class[] { Set.class });"
],
"header": "@@ -748,6 +755,10 @@ nextFile:\tfor (int i = 0; i < list.length; i++) {",
"removed": []
}
]
}
] |
derby-DERBY-5493-5705f5bf
|
DERBY-5494
Prior to this fix the nested user update transaction used by sequence updater
was doing a "lazy" commit, where the log record for the commit waw written
to the stream but not forced to disk. It would get forced to disk by any
subsequent user transaction commit.
Changed system to default doing a real commit for all nested user update
transactions, and those that don't need that behavior should use commitNoSync().
Changed identity columns to use the commitNoSync() to keep same performance
for those operations.
Includes the test contributed by rickh as part of DERBY-5493 change.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1327218 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/execute/InsertResultSet.java",
"hunks": [
{
"added": [],
"header": "@@ -786,7 +786,6 @@ class InsertResultSet extends DMLWriteResultSet implements TargetResultSet",
"removed": [
""
]
},
{
"added": [],
"header": "@@ -797,7 +796,6 @@ class InsertResultSet extends DMLWriteResultSet implements TargetResultSet",
"removed": [
""
]
},
{
"added": [],
"header": "@@ -815,7 +813,6 @@ class InsertResultSet extends DMLWriteResultSet implements TargetResultSet",
"removed": [
""
]
}
]
}
] |
derby-DERBY-5493-810f2b96
|
DERBY-5493: Boost the size of preallocated sequence ranges from 20 to 100 in order to boost concurrency.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1327682 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5493-8e352d60
|
DERBY-5493: Fix correctness problem with sequences by introducing syscs_peek_at_sequence and simplifying the SequenceUpdater code.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1327471 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/catalog/DataDictionaryImpl.java",
"hunks": [
{
"added": [
" private HashMap sequenceIDs;",
" "
],
"header": "@@ -418,7 +418,8 @@ public final class\tDataDictionaryImpl",
"removed": [
""
]
},
{
"added": [
"\t\t\t\t\t\t\t\t\t\t\t\t\"SYSCS_PEEK_AT_SEQUENCE\","
],
"header": "@@ -472,6 +473,7 @@ public final class\tDataDictionaryImpl",
"removed": []
},
{
"added": [
" sequenceIDs = new HashMap();",
""
],
"header": "@@ -700,6 +702,8 @@ public final class\tDataDictionaryImpl",
"removed": []
},
{
"added": [
" public Long peekAtSequence( String schemaName, String sequenceName )",
" throws StandardException",
" {",
" String uuid = getSequenceID( schemaName, sequenceName );",
"",
" if ( uuid == null )",
" {",
" throw StandardException.newException(SQLState.LANG_OBJECT_NOT_FOUND_DURING_EXECUTION, \"SEQUENCE\",",
" ( schemaName + \".\" + sequenceName) );",
" }",
" ",
" return ((SequenceUpdater) sequenceGeneratorCache.find( uuid )).peekAtCurrentValue();",
" }",
" "
],
"header": "@@ -10479,6 +10483,20 @@ public final class\tDataDictionaryImpl",
"removed": []
},
{
"added": [
" ",
" // BIGINT",
" // SYSCS_UTIL.SYSCS_PEEK_AT_SEQUENCE( VARCHAR(128), VARCHAR(128) )",
"",
" {",
" // procedure argument names",
" String[] arg_names = { \"schemaName\", \"sequenceName\" };",
"",
" // procedure argument types",
" TypeDescriptor[] arg_types =",
" {",
" CATALOG_TYPE_SYSTEM_IDENTIFIER,",
" CATALOG_TYPE_SYSTEM_IDENTIFIER",
" };",
"",
" createSystemProcedureOrFunction(",
" \"SYSCS_PEEK_AT_SEQUENCE\",",
" sysUtilUUID,",
" arg_names,",
" arg_types,",
"\t\t\t\t0,",
"\t\t\t\t0,",
" RoutineAliasInfo.READS_SQL_DATA,",
" false,",
" DataTypeDescriptor.getCatalogType( Types.BIGINT ),",
" newlyCreatedRoutines,",
" tc);",
" }"
],
"header": "@@ -13243,6 +13261,34 @@ public final class\tDataDictionaryImpl",
"removed": []
},
{
"added": [
"",
" dropSequenceID( descriptor );"
],
"header": "@@ -13992,6 +14038,8 @@ public final class\tDataDictionaryImpl",
"removed": []
},
{
"added": [
" SequenceDescriptor sequenceDescriptor = (SequenceDescriptor)"
],
"header": "@@ -14008,7 +14056,7 @@ public final class\tDataDictionaryImpl",
"removed": [
" return (SequenceDescriptor)"
]
},
{
"added": [
"",
" putSequenceID( sequenceDescriptor );",
" ",
" return sequenceDescriptor;"
],
"header": "@@ -14017,6 +14065,10 @@ public final class\tDataDictionaryImpl",
"removed": []
},
{
"added": [
" SequenceDescriptor sequenceDescriptor = (SequenceDescriptor)"
],
"header": "@@ -14044,7 +14096,7 @@ public final class\tDataDictionaryImpl",
"removed": [
" return (SequenceDescriptor)"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/catalog/SequenceUpdater.java",
"hunks": [
{
"added": [
" * <li>When updating the on-disk value, we use a subtransaction of the user's",
" * execution transaction. If the",
" * special transaction cannot do its work immediately, without waiting for a lock, then",
" * a TOO MUCH CONTENTION error is raised. It is believed that this can only happen",
" * if someone holds locks on SYSSEQUENCES, either via sequence DDL or a scan",
" * of the catalog. The TOO MUCH CONTENTION error tells",
" * the user to not scan SYSSEQUENCES directly, but to instead use the",
" * SYSCS_UTIL.SYSCS_PEEK_AT_SEQUENCE() if the user needs the current value of the",
" * sequence generator.</li>"
],
"header": "@@ -61,9 +61,15 @@ import org.apache.derby.iapi.types.RowLocation;",
"removed": [
" * <li>When updating the on-disk value, we first try to do the writing in",
" * a nested subtransaction. This is so that we can immediately release the write-lock afterwards.",
" * If that fails, we then try to do the writing in the user's execution transaction.</li>"
]
},
{
"added": [
" * <li>Otherwise, we must allocate a new range by updating the catalog row. We should not",
" * be in contention with another connection because the update method is synchronized.</li>"
],
"header": "@@ -76,34 +82,8 @@ import org.apache.derby.iapi.types.RowLocation;",
"removed": [
" * <li>Otherwise, we must allocate a new range by updating the catalog row. At this",
" * point we may find ourselves racing another session, which also needs the next number",
" * in the sequence.</li>",
" * <li>When we try to update the catalog row, we check to see whether the current value",
" * there is what we expect it to be. If it is, then all is well: we update the catalog row",
" * then return to the first step to try to get the next number from the new cache of",
" * pre-allocated numbers.</li>",
" * <li>If, however, the value in the catalog row is not what we expect, then another",
" * session has won the race to update the catalog. We accept this fact gracefully and",
" * do not touch the catalog. Instead, we return to the first step and try to get the",
" * next number from the new cache of numbers which the other session has just",
" * pre-allocated.</li>",
" * <li>We only allow ourselves to retry this loop a small number of times. If we still",
" * can't get the next number in the sequence, we raise an exception complaining that",
" * there is too much contention on the generator.</li>",
" * </ul>",
" *",
" * <p>",
" * If applications start seeing exceptions complaining that there is too much contention",
" * on a sequence generator, then we should improve this algorithm. Here are some options",
" * based on the idea that contention should go down if we increase the number of",
" * pre-allocated numbers:",
" * </p>",
" *",
" * <ul>",
" * <li>We can let the user change the size of the pre-allocated range.</li>",
" * <li>Derby can increase the size of the pre-allocated range when Derby detects",
" * too much contention.</li>"
]
},
{
"added": [],
"header": "@@ -130,9 +110,6 @@ public abstract class SequenceUpdater implements Cacheable",
"removed": [
" // This is the lock timeout in milliseconds; a negative number means no timeout",
" private long _lockTimeoutInMillis;",
""
]
},
{
"added": [],
"header": "@@ -142,7 +119,6 @@ public abstract class SequenceUpdater implements Cacheable",
"removed": [
" _lockTimeoutInMillis = getLockTimeout();"
]
},
{
"added": [
" * Update the sequence value on disk. This method does its work in a subtransaction of",
" * the user's execution transaction."
],
"header": "@@ -170,9 +146,8 @@ public abstract class SequenceUpdater implements Cacheable",
"removed": [
" * Update the sequence value on disk. This method is first called with a read/write subtransaction",
" * of the session's execution transaction. If work can't be done there immediately, this method",
" * is called with the session's execution transaction."
]
},
{
"added": [
" * Only one thread at a time is allowed through here. We do not want a race between the",
" * two calls to the sequence generator: getCurrentValueAndAdvance() and allocateNewRange().",
" public synchronized void getCurrentValueAndAdvance",
" // We may have to try to get a value from the Sequence Generator twice.",
" // The first attempt may fail because we need to pre-allocate a new chunk",
" // of values.",
" for ( int i = 0; i < 2; i++ )",
" //",
" // We try to get a sequence number. The SequenceGenerator method is synchronized",
" // so only one writer should be in there at a time. Lock contention is possible if",
" // someone has selected from SYSSEQUENCES contrary to our advice. In that case,",
" // we raise a TOO MUCH CONTENTION exception.",
" //"
],
"header": "@@ -325,24 +300,28 @@ public abstract class SequenceUpdater implements Cacheable",
"removed": [
" * Only one thread at a time is allowed through here. That synchronization is performed by",
" * the sequence generator itself.",
" public void getCurrentValueAndAdvance",
" Long startTime = null;",
"",
" // We try to get a sequence number. We try until we've exceeded the lock timeout",
" // in case we find ourselves in a race with another session which is draining numbers from",
" // the same sequence generator.",
" while ( true )"
]
},
{
"added": [
" ",
" }",
" //",
" // If we get here, then we failed to allocate a new sequence number range.",
" //",
" throw tooMuchContentionException();"
],
"header": "@@ -368,38 +347,16 @@ public abstract class SequenceUpdater implements Cacheable",
"removed": [
" ",
" //",
" // If we get here, then we failed to get a sequence number. Along the way,",
" // we or another session may have allocated more sequence numbers on disk. We go back",
" // in to try to grab one of those numbers.",
" //",
" if ( startTime == null )",
" {",
" // get the system time only if we have to",
" startTime = new Long( System.currentTimeMillis() );",
" continue;",
" }",
" ",
" if (",
" (_lockTimeoutInMillis >= 0L) &&",
" ( (System.currentTimeMillis() - startTime.longValue()) > _lockTimeoutInMillis )",
" )",
" {",
" //",
" // If we get here, then we exhausted our retry attempts. This might be a sign",
" // that we need to increase the number of sequence numbers which we",
" // allocate. There's an opportunity for Derby to tune itself here.",
" //",
" throw tooMuchContentionException();",
" }",
" ",
" } // end of retry loop",
""
]
},
{
"added": [
" public Long peekAtCurrentValue() throws StandardException"
],
"header": "@@ -408,7 +365,7 @@ public abstract class SequenceUpdater implements Cacheable",
"removed": [
" private Long peekAtCurrentValue() throws StandardException"
]
},
{
"added": [
" * Update the value on disk. Does its work in a subtransaction of the user's",
" * execution transaction. If that fails, raises a TOO MUCH CONTENTION exception.",
" public synchronized boolean updateCurrentValueOnDisk( Long oldValue, Long newValue ) throws StandardException"
],
"header": "@@ -421,15 +378,14 @@ public abstract class SequenceUpdater implements Cacheable",
"removed": [
" * Update the value on disk. First tries to update the value in a",
" * subtransaction. If that fails, falls back on the execution transaction.",
" * This is a callback method invoked by the sequence generator.",
" public boolean updateCurrentValueOnDisk( Long oldValue, Long newValue ) throws StandardException"
]
},
{
"added": [
" TransactionController nestedTransaction = executionTransaction.startNestedUserTransaction( false );",
" try",
" {"
],
"header": "@@ -458,17 +414,12 @@ public abstract class SequenceUpdater implements Cacheable",
"removed": [
" TransactionController nestedTransaction = null;",
" try {",
" nestedTransaction = executionTransaction.startNestedUserTransaction( false );",
" } catch (StandardException se) {}",
" ",
" // First try to do the work in the nested transaction. Fail if we can't",
" // get a lock immediately.",
" try {"
]
},
{
"added": [
" // We might be self-deadlocking if the user has selected from SYSSEQUENCES",
" // contrary to our advice.",
"",
" throw tooMuchContentionException();"
],
"header": "@@ -487,9 +438,10 @@ public abstract class SequenceUpdater implements Cacheable",
"removed": [
" // Fall back on the execution transaction",
" ",
" return updateCurrentValueOnDisk( executionTransaction, oldValue, newValue, true );"
]
}
]
}
] |
derby-DERBY-5494-5705f5bf
|
DERBY-5494
Prior to this fix the nested user update transaction used by sequence updater
was doing a "lazy" commit, where the log record for the commit waw written
to the stream but not forced to disk. It would get forced to disk by any
subsequent user transaction commit.
Changed system to default doing a real commit for all nested user update
transactions, and those that don't need that behavior should use commitNoSync().
Changed identity columns to use the commitNoSync() to keep same performance
for those operations.
Includes the test contributed by rickh as part of DERBY-5493 change.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1327218 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/execute/InsertResultSet.java",
"hunks": [
{
"added": [],
"header": "@@ -786,7 +786,6 @@ class InsertResultSet extends DMLWriteResultSet implements TargetResultSet",
"removed": [
""
]
},
{
"added": [],
"header": "@@ -797,7 +796,6 @@ class InsertResultSet extends DMLWriteResultSet implements TargetResultSet",
"removed": [
""
]
},
{
"added": [],
"header": "@@ -815,7 +813,6 @@ class InsertResultSet extends DMLWriteResultSet implements TargetResultSet",
"removed": [
""
]
}
]
}
] |
derby-DERBY-5494-ceaf7dfd
|
DERBY-5494 Same value returned by successive calls to a sequence generator flanking an unorderly shutdown.
DERBY-5780 identity column performance has degredated
The previous patch for DERBY-5494 had the unintended affect of forcing a
synchronous write for all nested user transactions at abort time. This
in turn caused identity column inserts to have one synchronous write per
insert as the nested user transaction is destroyed for each insert which
does an abort each time.
To solve this interfaces were changed so that calling code could set the
default commit sync behavior when the transaction was committed rather than
count on the "type" of transaction. Nested user transactions used for identity
columns have default set to not sync, and the rest of the nested user transactions
default to syncing. Behavior of other types of transactions should not
be affected. User transactions still sync by default and internal and ntt's still
default to not sync.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1344065 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/store/raw/RawStoreFactory.java",
"hunks": [
{
"added": [
" @param flush_log_on_xact_end By default should the transaction ",
" commit and abort be synced to the log. Normal usage should pick true, ",
" unless there is specific performance need and usage works correctly if ",
" a commit can be lost on system crash."
],
"header": "@@ -723,6 +723,10 @@ public interface RawStoreFactory extends Corruptable {",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/iapi/store/raw/xact/TransactionFactory.java",
"hunks": [
{
"added": [
" @param contextMgr is the context manager to use. It must",
" be the current context manager.",
" @param transName is the transaction name. It will be ",
" displayed in the transactiontable VTI.",
" @param flush_log_on_xact_end By default should the transaction commit",
" and abort be synced to the log. Normal",
" usage should pick true, unless there",
" is specific performance need and usage",
" works correctly if a commit can be ",
" lost on system crash."
],
"header": "@@ -117,10 +117,16 @@ public interface TransactionFactory extends Corruptable {",
"removed": [
" @param contextMgr is the context manager to use. It must be ",
" the current context manager.",
" @param transName is the transaction name. It will be ",
" displayed in the transactiontable VTI."
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/catalog/SequenceUpdater.java",
"hunks": [
{
"added": [
" TransactionController subTransaction = ",
" executionTC.startNestedUserTransaction( true, true );",
""
],
"header": "@@ -272,7 +272,9 @@ public abstract class SequenceUpdater implements Cacheable",
"removed": [
" TransactionController subTransaction = executionTC.startNestedUserTransaction( true );"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/store/access/RAMTransaction.java",
"hunks": [
{
"added": [
" *",
" * @param readOnly Is transaction readonly? Only 1 non-read",
" * only nested transaction is allowed per ",
" * transaction.",
" *",
" * @param flush_log_on_xact_end By default should the transaction commit",
" * and abort be synced to the log. Normal",
" * usage should pick true, unless there is",
" * specific performance need and usage ",
" * works correctly if a commit can be lost",
" * on system crash.",
" public TransactionController startNestedUserTransaction(",
" boolean readOnly,",
" boolean flush_log_on_xact_end)"
],
"header": "@@ -2301,12 +2301,25 @@ public class RAMTransaction",
"removed": [
" public TransactionController startNestedUserTransaction(boolean readOnly)"
]
},
{
"added": [
" getLockSpace(), ",
" cm,",
" cm, ",
" AccessFactoryGlobals.NESTED_UPDATE_USER_TRANS,",
" flush_log_on_xact_end));"
],
"header": "@@ -2327,10 +2340,13 @@ public class RAMTransaction",
"removed": [
" getLockSpace(), cm,",
" cm, AccessFactoryGlobals.NESTED_UPDATE_USER_TRANS));"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/store/raw/xact/Xact.java",
"hunks": [
{
"added": [
" // Whether or not to flush log on commit or abort. ",
" // Current usage:",
" // User transactions default to flush. Internal and nested top",
" // transactions default to not flush. ",
" //",
" // Nested user update transactions are configured when they are created, ",
" // and most default to flush. Nested user update transaction used for",
" // identity column maintenance defaults to not flush to maintain ",
" // backward performance compatibility with previous releases.",
" //",
" // In all cases log will not be flushsed by Xact.prepareCommit()",
" // if commitNoSync() has been called rather than commit.",
" private boolean flush_log_on_xact_end;",
""
],
"header": "@@ -243,6 +243,20 @@ public class Xact extends RawTransaction implements Limit, LockOwner {",
"removed": []
},
{
"added": [
" CompatibilitySpace compatibilitySpace,",
" boolean flush_log_on_xact_end)",
"\t\tthis.xactFactory = xactFactory;",
"\t\tthis.logFactory = logFactory;",
"\t\tthis.dataFactory = dataFactory;",
"\t\tthis.dataValueFactory = dataValueFactory;",
"\t\tthis.readOnly = readOnly;",
"\t\tthis.flush_log_on_xact_end = flush_log_on_xact_end;"
],
"header": "@@ -264,16 +278,18 @@ public class Xact extends RawTransaction implements Limit, LockOwner {",
"removed": [
" CompatibilitySpace compatibilitySpace)",
"\t\tthis.xactFactory = xactFactory;",
"\t\tthis.logFactory = logFactory;",
"\t\tthis.dataFactory = dataFactory;",
"\t\tthis.dataValueFactory = dataValueFactory;",
"\t\tthis.readOnly = readOnly;"
]
},
{
"added": [],
"header": "@@ -298,11 +314,6 @@ public class Xact extends RawTransaction implements Limit, LockOwner {",
"removed": [
"",
" /*",
" System.out.println(\"Xact.constructor: readonly = \" + this.readOnly +",
" \";this = \" + this);",
" */"
]
},
{
"added": [
"\t\t\tif (seenUpdates) ",
" {"
],
"header": "@@ -773,8 +784,8 @@ public class Xact extends RawTransaction implements Limit, LockOwner {",
"removed": [
"\t\t\tif (seenUpdates) {",
""
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/store/raw/xact/XactFactory.java",
"hunks": [
{
"added": [
" RawStoreFactory rsf, ",
" ContextManager cm,",
" boolean readOnly,",
" CompatibilitySpace compatibilitySpace,",
" String xact_context_id,",
" String transName,",
" boolean excludeMe,",
" boolean flush_log_on_xact_end)"
],
"header": "@@ -312,13 +312,14 @@ public class XactFactory implements TransactionFactory, ModuleControl, ModuleSup",
"removed": [
" RawStoreFactory rsf,",
" ContextManager cm,",
" boolean readOnly,",
" CompatibilitySpace compatibilitySpace,",
" String xact_context_id,",
" String transName,",
" boolean excludeMe)"
]
},
{
"added": [
" readOnly, compatibilitySpace, flush_log_on_xact_end);"
],
"header": "@@ -335,7 +336,7 @@ public class XactFactory implements TransactionFactory, ModuleControl, ModuleSup",
"removed": [
" readOnly, compatibilitySpace);"
]
},
{
"added": [
" return(",
" startCommonTransaction(",
" rsf, ",
" cm, ",
" false, // user xact always read/write ",
" null, ",
" USER_CONTEXT_ID, ",
" transName, ",
" true, // user xact always excluded during quiesce",
" true)); // user xact default flush on xact end"
],
"header": "@@ -351,8 +352,16 @@ public class XactFactory implements TransactionFactory, ModuleControl, ModuleSup",
"removed": [
" return(startCommonTransaction(",
" rsf, cm, false, null, USER_CONTEXT_ID, transName, true));"
]
},
{
"added": [
" return(",
" startCommonTransaction(",
" rsf, ",
" cm, ",
" true, ",
" compatibilitySpace, ",
" NESTED_READONLY_USER_CONTEXT_ID, ",
" transName, ",
" false,",
" true)); // user readonly xact default flush on xact",
" // end, should never have anything to flush.",
" String transName,",
" boolean flush_log_on_xact_end)",
" return(",
" startCommonTransaction(",
" rsf, ",
" cm, ",
" false, ",
" null, ",
" NESTED_UPDATE_USER_CONTEXT_ID, ",
" transName, ",
" true,",
" flush_log_on_xact_end)); // allow caller to choose default ",
" // log log flushing on commit/abort",
" // for internal operations used ",
" // nested user update transaction."
],
"header": "@@ -362,20 +371,39 @@ public class XactFactory implements TransactionFactory, ModuleControl, ModuleSup",
"removed": [
" return(startCommonTransaction(",
" rsf, cm, true, compatibilitySpace, ",
" NESTED_READONLY_USER_CONTEXT_ID, transName, false));",
" String transName)",
" return(startCommonTransaction(",
" rsf, cm, false, null, ",
" NESTED_UPDATE_USER_CONTEXT_ID, transName, true));"
]
},
{
"added": [
" rsf, ",
" cm, ",
" false, ",
" null, ",
" USER_CONTEXT_ID, ",
" AccessFactoryGlobals.USER_TRANS_NAME, ",
" true,",
" true); // user xact default flush on xact end"
],
"header": "@@ -395,8 +423,14 @@ public class XactFactory implements TransactionFactory, ModuleControl, ModuleSup",
"removed": [
" rsf, cm, false, null, ",
" USER_CONTEXT_ID, AccessFactoryGlobals.USER_TRANS_NAME, true);"
]
},
{
"added": [
" this, logFactory, dataFactory, dataValueFactory, ",
" false, null, false);"
],
"header": "@@ -443,7 +477,8 @@ public class XactFactory implements TransactionFactory, ModuleControl, ModuleSup",
"removed": [
" this, logFactory, dataFactory, dataValueFactory, false, null);"
]
}
]
},
{
"file": "java/testing/org/apache/derbyTesting/unitTests/store/T_AccessFactory.java",
"hunks": [
{
"added": [
" TransactionController child_tc = ",
" tc.startNestedUserTransaction(true, true);"
],
"header": "@@ -2987,7 +2987,8 @@ public class T_AccessFactory extends T_Generic",
"removed": [
" TransactionController child_tc = tc.startNestedUserTransaction(true);"
]
},
{
"added": [
" child_tc = tc.startNestedUserTransaction(true, true);",
" child_tc.startNestedUserTransaction(true, true);"
],
"header": "@@ -3033,11 +3034,11 @@ public class T_AccessFactory extends T_Generic",
"removed": [
" child_tc = tc.startNestedUserTransaction(true);",
" child_tc.startNestedUserTransaction(true);"
]
},
{
"added": [
" child_tc = tc.startNestedUserTransaction(true, true);"
],
"header": "@@ -3074,7 +3075,7 @@ public class T_AccessFactory extends T_Generic",
"removed": [
" child_tc = tc.startNestedUserTransaction(true);"
]
},
{
"added": [
" child_tc = tc.startNestedUserTransaction(true, true);"
],
"header": "@@ -3132,7 +3133,7 @@ public class T_AccessFactory extends T_Generic",
"removed": [
" child_tc = tc.startNestedUserTransaction(true);"
]
},
{
"added": [
" child_tc = tc.startNestedUserTransaction(true, true);"
],
"header": "@@ -3183,7 +3184,7 @@ public class T_AccessFactory extends T_Generic",
"removed": [
" child_tc = tc.startNestedUserTransaction(true);"
]
},
{
"added": [
" child_tc = tc.startNestedUserTransaction(false, true);"
],
"header": "@@ -3213,7 +3214,7 @@ public class T_AccessFactory extends T_Generic",
"removed": [
" child_tc = tc.startNestedUserTransaction(false);"
]
},
{
"added": [
" child_tc = tc.startNestedUserTransaction(false, true);"
],
"header": "@@ -3253,7 +3254,7 @@ public class T_AccessFactory extends T_Generic",
"removed": [
" child_tc = tc.startNestedUserTransaction(false);"
]
}
]
}
] |
derby-DERBY-5496-eddb67d3
|
DERBY-5496: Compile the JUnit infrastructure package against the small device classpath, rather than the jdk 1.4 classpath.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1199234 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/JDBC.java",
"hunks": [
{
"added": [
"import java.lang.reflect.Method;"
],
"header": "@@ -20,6 +20,7 @@",
"removed": []
},
{
"added": [
" String tmpstr = (String)o;",
" boolean b = true;",
" if (!(o instanceof String))",
" b = false;",
" if (!(tmpstr.startsWith(\"SQL\")))",
" b = false;",
" if (tmpstr.length() != 18)",
" b = false;",
" for (int i=3 ; i<18 ; i++)",
" if (Character.isDigit(tmpstr.charAt(i)))",
" continue;",
" else",
" b = false;",
" break;",
" return b;"
],
"header": "@@ -44,34 +45,25 @@ public class JDBC {",
"removed": [
" // unless JSR169, use String.matches...",
" if (JDBC.vmSupportsJDBC3()) ",
" return o instanceof String &&",
" ((String) o).matches(\"SQL[0-9]{15}\");",
" }",
" else",
" {",
" String tmpstr = (String)o;",
" boolean b = true;",
" if (!(o instanceof String))",
" b = false;",
" if (!(tmpstr.startsWith(\"SQL\")))",
" b = false;",
" if (tmpstr.length() != 18)",
" b = false;",
" for (int i=3 ; i<18 ; i++)",
" if (Character.isDigit(tmpstr.charAt(i)))",
" continue;",
" else",
" {",
" b = false;",
" break;",
" }",
" return b;"
]
}
]
},
{
"file": "java/testing/org/apache/derbyTesting/junit/RuntimeStatisticsParser.java",
"hunks": [
{
"added": [
" ",
" return Utilities.split(positionLines, '\\n');"
],
"header": "@@ -417,17 +417,8 @@ public class RuntimeStatisticsParser {",
"removed": [
" if (JDBC.vmSupportsJSR169())",
" {",
" // do something else then split.",
" String [] startPositionLines = Utilities.split(positionLines, '\\n');",
" return startPositionLines;",
" }",
" else",
" {",
" String [] startPositionLines = positionLines.split(\"\\n\");",
" return startPositionLines;",
" }"
]
}
]
}
] |
derby-DERBY-5498-abf8151c
|
DERBY-5498 ClosedByInterruptException in AuthenticationTest
Patch d5498b plugs a hole in the handling of NIO channel closures due
to interrupt in DirFile4#getExclusiveFileLock.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1200995 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/io/DirFile4.java",
"hunks": [
{
"added": [
"import java.nio.channels.AsynchronousCloseException;",
"import java.nio.channels.ClosedChannelException;",
"import org.apache.derby.iapi.util.InterruptStatus;"
],
"header": "@@ -31,12 +31,13 @@ import java.io.OutputStream;",
"removed": [
"import java.security.AccessControlException;",
"import org.apache.derby.shared.common.reference.SQLState;"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/store/raw/data/FileContainer.java",
"hunks": [
{
"added": [],
"header": "@@ -1511,8 +1511,6 @@ abstract class FileContainer",
"removed": [
" protected final static int INTERRUPT_RETRY_SLEEP = 500; // millis",
" protected final static int MAX_INTERRUPT_RETRIES = 120; // i.e. 60s"
]
},
{
"added": [
" int maxTries = InterruptStatus.MAX_INTERRUPT_RETRIES;"
],
"header": "@@ -1587,7 +1585,7 @@ abstract class FileContainer",
"removed": [
" int maxTries = MAX_INTERRUPT_RETRIES;"
]
},
{
"added": [
" Thread.sleep(",
" InterruptStatus.INTERRUPT_RETRY_SLEEP);"
],
"header": "@@ -1674,7 +1672,8 @@ abstract class FileContainer",
"removed": [
" Thread.sleep(INTERRUPT_RETRY_SLEEP);"
]
},
{
"added": [
" int maxTries = InterruptStatus.MAX_INTERRUPT_RETRIES;"
],
"header": "@@ -2069,7 +2068,7 @@ abstract class FileContainer",
"removed": [
" int maxTries = MAX_INTERRUPT_RETRIES;"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/store/raw/data/RAFContainer.java",
"hunks": [
{
"added": [
" int maxTries = InterruptStatus.MAX_INTERRUPT_RETRIES;"
],
"header": "@@ -435,7 +435,7 @@ class RAFContainer extends FileContainer implements PrivilegedExceptionAction",
"removed": [
" int maxTries = MAX_INTERRUPT_RETRIES; // ca 60s = (120 * 0.5s)"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/store/raw/data/RAFContainer4.java",
"hunks": [
{
"added": [
" int retries = InterruptStatus.MAX_INTERRUPT_RETRIES;"
],
"header": "@@ -324,7 +324,7 @@ class RAFContainer4 extends RAFContainer {",
"removed": [
" int retries = MAX_INTERRUPT_RETRIES;"
]
},
{
"added": [
" channelCleanupMonitor.wait(",
" InterruptStatus.INTERRUPT_RETRY_SLEEP);"
],
"header": "@@ -333,7 +333,8 @@ class RAFContainer4 extends RAFContainer {",
"removed": [
" channelCleanupMonitor.wait(INTERRUPT_RETRY_SLEEP);"
]
},
{
"added": [
" int retries = InterruptStatus.MAX_INTERRUPT_RETRIES;"
],
"header": "@@ -346,7 +347,7 @@ class RAFContainer4 extends RAFContainer {",
"removed": [
" int retries = MAX_INTERRUPT_RETRIES;"
]
},
{
"added": [
" int retries = InterruptStatus.MAX_INTERRUPT_RETRIES;"
],
"header": "@@ -498,7 +499,7 @@ class RAFContainer4 extends RAFContainer {",
"removed": [
" int retries = MAX_INTERRUPT_RETRIES;"
]
},
{
"added": [
" channelCleanupMonitor.wait(",
" InterruptStatus.INTERRUPT_RETRY_SLEEP);"
],
"header": "@@ -507,7 +508,8 @@ class RAFContainer4 extends RAFContainer {",
"removed": [
" channelCleanupMonitor.wait(INTERRUPT_RETRY_SLEEP);"
]
},
{
"added": [
" int retries = InterruptStatus.MAX_INTERRUPT_RETRIES;"
],
"header": "@@ -519,7 +521,7 @@ class RAFContainer4 extends RAFContainer {",
"removed": [
" int retries = MAX_INTERRUPT_RETRIES;"
]
},
{
"added": [
" * InterruptStatus.MAX_INTERRUPT_RETRIES *",
" * InterruptStatus.INTERRUPT_RETRY_SLEEP}."
],
"header": "@@ -648,7 +650,8 @@ class RAFContainer4 extends RAFContainer {",
"removed": [
" * FileContainer#MAX_INTERRUPT_RETRIES * FileContainer#INTERRUPT_RETRY_SLEEP}."
]
},
{
"added": [
" if (timesWaited > InterruptStatus.MAX_INTERRUPT_RETRIES) {"
],
"header": "@@ -721,7 +724,7 @@ class RAFContainer4 extends RAFContainer {",
"removed": [
" if (timesWaited > MAX_INTERRUPT_RETRIES) {"
]
},
{
"added": [
" channelCleanupMonitor.wait(",
" InterruptStatus.INTERRUPT_RETRY_SLEEP);"
],
"header": "@@ -729,7 +732,8 @@ class RAFContainer4 extends RAFContainer {",
"removed": [
" channelCleanupMonitor.wait(INTERRUPT_RETRY_SLEEP);"
]
},
{
"added": [
" Thread.sleep(InterruptStatus.INTERRUPT_RETRY_SLEEP);"
],
"header": "@@ -763,7 +767,7 @@ class RAFContainer4 extends RAFContainer {",
"removed": [
" Thread.sleep(INTERRUPT_RETRY_SLEEP);"
]
},
{
"added": [
" int retries = InterruptStatus.MAX_INTERRUPT_RETRIES;"
],
"header": "@@ -849,7 +853,7 @@ class RAFContainer4 extends RAFContainer {",
"removed": [
" int retries = MAX_INTERRUPT_RETRIES;"
]
},
{
"added": [
" Thread.sleep(InterruptStatus.INTERRUPT_RETRY_SLEEP);"
],
"header": "@@ -870,7 +874,7 @@ class RAFContainer4 extends RAFContainer {",
"removed": [
" Thread.sleep(INTERRUPT_RETRY_SLEEP);"
]
}
]
}
] |
derby-DERBY-550-7d6219fc
|
- DERBY-1535 Trial 2 for DERBY-550, improve use of Engine from NetworkServer and reduce memory usage - Replacing call of setByte() to call of setBinaryInputStream(), which was room to improvement in DERBY-1559. - Patch by Tomohito Nakayama (tomonaka@basil.ocn.ne.jp)
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@465249 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/drda/org/apache/derby/impl/drda/DRDAConnThread.java",
"hunks": [
{
"added": [
"\t\t\t\t\t\t\t",
" if ( paramBytes==null ) {",
"\t\t\t\t\t\t\t\tps.setBytes(i+1, ",
" null );",
" ",
"\t\t\t\t\t\t\t\tps.setBinaryStream(i+1, ",
" new ByteArrayInputStream(paramBytes),"
],
"header": "@@ -4547,10 +4547,14 @@ class DRDAConnThread extends Thread {",
"removed": [
"\t\t\t\t\t\t\tif (paramBytes==null || !useSetBinaryStream) {",
"\t\t\t\t\t\t\t\tps.setBytes(i+1, paramBytes);",
"\t\t\t\t\t\t\t\tps.setBinaryStream(i+1, new ByteArrayInputStream(paramBytes),"
]
}
]
}
] |
derby-DERBY-5504-19a48ec5
|
DERBY-5504: Prepare replication tests for spaces in path names
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1203605 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5504-bdd8f6d6
|
DERBY-5504: Use execJavaCmd() in SecureServerTest and Driver40UnbootedTest
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1203113 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5507-006b542c
|
DERBY-5507: Orderly shutdown fails if you are using BUILTIN authentication and turn on derby.database.propertiesOnly
Make sure passwords are mapped to a hashed token before they are stored
in the database, also if the password is already defined in a system
property.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1220685 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/services/property/PropertyValidation.java",
"hunks": [
{
"added": [
"\t\t\t\t\tif (mappedValue == null)",
" \t\t\t\t\t\tmappedValue = psc.map(key, value, d);",
""
],
"header": "@@ -61,6 +61,9 @@ public class PropertyValidation implements PropertyFactory",
"removed": []
}
]
}
] |
derby-DERBY-5509-3f9d8123
|
DERBY-5509; javadoc for NetServlet and NetworkServerControl are missing a few closing tags
adding the missing tags.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1204128 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/drda/org/apache/derby/drda/NetServlet.java",
"hunks": [
{
"added": [
"\t<LI><PRE>portNumber</PRE> - Port number to use. The default is 1527.</LI>",
"\t\t\tinitialization if 'true'.</LI>",
"\t<LI><PRE>tracingDirectory</PRE> - Directory for trace files</LI>"
],
"header": "@@ -40,10 +40,10 @@ import org.apache.derby.iapi.reference.Property;",
"removed": [
"\t<LI><PRE>portNumber</PRE> - Port number to use. The default is 1527.",
"\t\t\tinitialization if 'true'.",
"\t<LI><PRE>tracingDirectory</PRE> - Directory for trace files"
]
}
]
},
{
"file": "java/drda/org/apache/derby/drda/NetworkServerControl.java",
"hunks": [
{
"added": [
"\t</LI>"
],
"header": "@@ -62,6 +62,7 @@ import org.apache.derby.impl.drda.NetworkServerControlImpl;",
"removed": []
},
{
"added": [
"\t</P>"
],
"header": "@@ -97,6 +98,7 @@ import org.apache.derby.impl.drda.NetworkServerControlImpl;",
"removed": []
},
{
"added": [
"\tindicates the ip address to which NetworkServerControl should connect. </LI>"
],
"header": "@@ -108,7 +110,7 @@ import org.apache.derby.impl.drda.NetworkServerControlImpl;",
"removed": [
"\tindicates the ip address to which NetworkServerControl should connect "
]
},
{
"added": [
"</UL>",
"</P>",
"<P><B>Examples.</B></P>",
"\t</P>",
"\t</P>"
],
"header": "@@ -139,18 +141,22 @@ import org.apache.derby.impl.drda.NetworkServerControlImpl;",
"removed": [
"<P><B>Examples.</B>"
]
},
{
"added": [
"\t</P>"
],
"header": "@@ -158,6 +164,7 @@ import org.apache.derby.impl.drda.NetworkServerControlImpl;",
"removed": []
},
{
"added": [
"\t</P>"
],
"header": "@@ -167,6 +174,7 @@ import org.apache.derby.impl.drda.NetworkServerControlImpl;",
"removed": []
},
{
"added": [
"\t * </P>",
"\t * </P>"
],
"header": "@@ -236,12 +244,14 @@ public class NetworkServerControl{",
"removed": []
},
{
"added": [
"\t * </P>"
],
"header": "@@ -270,6 +280,7 @@ public class NetworkServerControl{",
"removed": []
},
{
"added": [
"\t * </P>"
],
"header": "@@ -375,6 +386,7 @@ public class NetworkServerControl{",
"removed": []
}
]
}
] |
derby-DERBY-551-cec114ad
|
DERBY-551 (partial) Adds more comments to InternalTriggerExecutionContext.validateStatement.
Patch submitted by Deepa Remesh
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@421920 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/execute/InternalTriggerExecutionContext.java",
"hunks": [
{
"added": [
"\t\t// DDL statements are not allowed in triggers. Parser does not allow ",
"\t\t// DDL statements in a trigger's action statement. This runtime check",
"\t\t// is needed only for DDL statements executed by procedures within a ",
"\t\t// trigger context. ",
"\t\t// No INSERT/UPDATE/DELETE for a before trigger. Parser does not allow ",
"\t\t// these DML statements in a trigger's action statement in a before ",
"\t\t// trigger. Currently, parser does not disallow creation of before ",
"\t\t// triggers calling procedures that modify SQL data. This runtime check",
"\t\t// is needed to not allow execution of these DML statements by procedures",
"\t\t// within a before trigger context. ",
"\t \telse if (triggerd.isBeforeTrigger() && "
],
"header": "@@ -327,15 +327,21 @@ public class InternalTriggerExecutionContext implements TriggerExecutionContext,",
"removed": [
"\t\t// DDL statements are not allowed in triggers",
"\t\t/*",
"\t\t** No INSERT/UPDATE/DELETE for a before trigger.",
"\t \t*/",
"\t\telse if (triggerd.isBeforeTrigger() && "
]
}
]
}
] |
derby-DERBY-5514-dcfc481b
|
DERBY-5514 SecureServerTest (and others) don't play nice with EMMA: AccessControlException
Patch derby-5514-2:
- fixes the usa of String#contains,
- adds -Demma.verbosity.level=silent to RuntimeInfoTest
- adds the convenience method runsWithEmma to BaseTestCase
- adds fixes to NetworkServerTestSetup to
a) always use Emma verbosity silent when spawning a server in separate VM, and also
b) refrains from starting the server with the security manager when running with Emma since the default server policy doesn't contain permissions for Emma, and finally
- skips the assertion for the security manager being used in
SecureServerTest (it is not, see preceding item)
With patch d5514-emma-permissions-to-all, Emma now runs OK with "ant
emma-all" and suites.All.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1207471 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/NetworkServerTestSetup.java",
"hunks": [
{
"added": [
" al.add( \"-Demma.verbosity.level=silent\" );",
" // Loading from classes need to work-around the limitation of the",
" // default policy file doesn't work with classes. Similarly, if we are",
" // running with Emma we don't run with the security manager, as the",
" // default server policy doesn't contain needed permissions and,",
" // additionally, Emma sources do not use doPrivileged blocks anyway.",
" if (!TestConfiguration.loadingFromJars() || BaseTestCase.runsWithEmma())"
],
"header": "@@ -255,12 +255,16 @@ final public class NetworkServerTestSetup extends BaseTestSetup {",
"removed": [
" // Loading from classes need to work-around the limitation",
" // of the default policy file doesn't work with classes.",
" if (!TestConfiguration.loadingFromJars())"
]
}
]
}
] |
derby-DERBY-5517-d109dede
|
DERBY-5517: testReplication_Local_3_p1_StateNegativeTests failed with connection refused
Make sure server processes in one test case have stopped before the next
test case is started.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1213251 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-552-5a159416
|
DERBY-552 Fix fetching resources from an installed jar when the database itself is a jar file.
The old code did not handle the case when the length of the resource (jar) entry was unknown.
New code reads the resource from the JarInputStream into a local byte array and returns a
stream based upon that to the application. This also provides better isolation between
the application and derby, denying user code the ability to read the entire jar or
to prevent correct shutdown of the database. Added utilities to AccessibleByteArrayOutputStream
to enable copying an InputStream into a local array.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@483653 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/services/io/AccessibleByteArrayOutputStream.java",
"hunks": [
{
"added": [
"import java.io.IOException;",
"import java.io.InputStream;"
],
"header": "@@ -22,6 +22,8 @@",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/services/reflect/JarLoader.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.services.io.AccessibleByteArrayOutputStream;"
],
"header": "@@ -36,6 +36,7 @@ import java.util.jar.JarEntry;",
"removed": []
},
{
"added": [
" We copy to the contents to a byte array and return a",
" stream around that to the caller. Though a copy is",
" involved it has the benefit of:",
" <UL>",
" <LI> Isolating the application from the JarInputStream, thus",
" denying any possibility of the application reading more of the",
" jar that it should be allowed to. E.g. the contents class files are not",
" exposed through getResource.",
" <LI> Avoids any possibility of the application holding onto",
" the open stream beyond shutdown of the database, thus leading",
" to leaked file descriptors or inability to remove the jar.",
" </UL>"
],
"header": "@@ -347,9 +348,18 @@ class JarLoader extends ClassLoader {",
"removed": [
"\t\tHere we need to get the size of the zip entry and",
"\t\tput a limiting stream around it. Otherwise the",
"\t\tcaller would end up reading the entire zip file!"
]
}
]
}
] |
derby-DERBY-5521-81923add
|
DERBY-5521 JDBCMBeanTest#testAttributeDriverLevel uses Java assert in lieu of JUnit assert: no real testing happens
Patch derby-5521b which fixes this issue.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1211266 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5526-c8afd4ff
|
DERBY-5526 on upgrade from 10.5 to 10.8.2 , getting ERROR XBM0A: The database directory 'C:\cygwin\home\debugfat\clientdb' exists.
However, it does not contain the expected 'service.properties' file.
Change to only throw this message if there is a seg0 directory so this looks
like a partially created database
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1212541 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5530-f74cf7a6
|
DERBY-5530: SQLChar.getCollationKey NPE in index-stat-thread
Propagate collation information to the new indexes on TRAUNCATE TABLE.
Patch file: derby-5530-1a-propagate_collation_info.diff (trivially modified)
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1243878 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/execute/AlterTableConstantAction.java",
"hunks": [
{
"added": [],
"header": "@@ -86,7 +86,6 @@ import org.apache.derby.iapi.types.DataValueDescriptor;",
"removed": [
"import org.apache.derby.impl.sql.catalog.DDColumnDependableFinder;"
]
},
{
"added": [
" IndexRowGenerator curIndex = compressIRGs[index];",
" indexRows[index] = curIndex.getIndexRowTemplate();",
" curIndex.getIndexRow(emptyHeapRow, "
],
"header": "@@ -2367,9 +2366,10 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction",
"removed": [
"\t\t\t\tindexRows[index] = compressIRGs[index].getIndexRowTemplate();",
"\t\t\t\tcompressIRGs[index].getIndexRow(emptyHeapRow, "
]
}
]
}
] |
derby-DERBY-5531-66ed1016
|
DERBY-5531 Assert failure when inserting NULL into indexed column with territory-based collation
At bind time of insert node, we go through the individual columns in the insert statement to determine if there is a need for normalization or not. In case if the insert resultset has all null values, we conclude that we do not need normalization. At execution time, as part of normalization, the data type conversion from datatypes like SQLVarchar to CollatorSQLVarchar will happen if we are dealing with territory based database. But this never happens if insert resultset has all null values and hence a later assert check in OpenBTree fails because the datatypes do not match(in the example given in this jira, SQLVarchar and CollatorSQLVarchar mismatch at runtime will cause an assert failure). Here is the relevant code in InsertNode.bindStatement()
/* Insert a NormalizeResultSetNode above the source if the source
* and target column types and lengths do not match.
*/
if (! resultColumnList.columnTypesAndLengthsMatch(
resultSet.getResultColumns()))
{
resultSet =
(ResultSetNode) getNodeFactory().getNode(
C_NodeTypes.NORMALIZE_RESULT_SET_NODE, resultSet,
resultColumnList, null, Boolean.FALSE,
getContextManager());
}
To fix this problem, we are changing OpenBTree.isIndexableRowConsistent to not throw an assert failure if dealing with null values. If the value is null, then we will not assert the column datatypes. As it is, this method is meaningful only in sane mode. Insane jars do not give any error for the test case provided in the jira because assertion only happens with sane jars.
In short, the fix is, if the row column's value is null, then don't worry about the data type match at execution time.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1378206 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5534-841c8990
|
DERBY-5546 ResultSet#updateBigDecimal on a REAL column does not do underflow checking
Patch derby-5546-2. For both Real and Double, check for underflow. For
Double underflow is currently detected but only because we didn't fix
DERBY-3398 yet, so we introduce the same check now as for Real. Once
DERBY-3398 it will no longer be redundant. The tests are still guarded
by a check for embedded until DERBY-5534 is fixed.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1447996 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/types/SQLDouble.java",
"hunks": [
{
"added": [
"import java.math.BigDecimal;"
],
"header": "@@ -36,6 +36,7 @@ import java.io.ObjectOutput;",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/iapi/types/SQLReal.java",
"hunks": [
{
"added": [
"import java.math.BigDecimal;"
],
"header": "@@ -36,6 +36,7 @@ import java.io.ObjectOutput;",
"removed": []
}
]
}
] |
derby-DERBY-5535-2d622c69
|
DERBY-5535 Remove unused methods from client's CrossConverters class
Patch derby-5535 removes the unused methods.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1214423 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/am/CrossConverters.java",
"hunks": [
{
"added": [],
"header": "@@ -973,10 +973,6 @@ final class CrossConverters {",
"removed": [
" final boolean getBooleanFromBigDecimal(java.math.BigDecimal source) throws SqlException {",
" return source.intValue() != 0;",
" }",
""
]
},
{
"added": [],
"header": "@@ -1046,14 +1042,6 @@ final class CrossConverters {",
"removed": [
" final byte getByteFromBigDecimal(java.math.BigDecimal source) throws SqlException {",
" if (Configuration.rangeCheckCrossConverters &&",
" (source.compareTo(bdMaxByteValue__) == 1 || source.compareTo(bdMinByteValue__) == -1)) {",
" throw new LossOfPrecisionConversionException(agent_.logWriter_, String.valueOf(source));",
" }",
" return (byte) source.intValue();",
" }",
""
]
},
{
"added": [],
"header": "@@ -1106,14 +1094,6 @@ final class CrossConverters {",
"removed": [
" final short getShortFromBigDecimal(java.math.BigDecimal source) throws SqlException {",
" if (Configuration.rangeCheckCrossConverters &&",
" (source.compareTo(bdMaxShortValue__) == 1 || source.compareTo(bdMinShortValue__) == -1)) {",
" throw new LossOfPrecisionConversionException(agent_.logWriter_, String.valueOf(source));",
" }",
" return (short) source.intValue();",
" }",
""
]
},
{
"added": [],
"header": "@@ -1158,14 +1138,6 @@ final class CrossConverters {",
"removed": [
" final int getIntFromBigDecimal(java.math.BigDecimal source) throws SqlException {",
" if (Configuration.rangeCheckCrossConverters &&",
" (source.compareTo(bdMaxIntValue__) == 1 || source.compareTo(bdMinIntValue__) == -1)) {",
" throw new LossOfPrecisionConversionException(agent_.logWriter_, String.valueOf(source));",
" }",
" return source.intValue();",
" }",
""
]
},
{
"added": [],
"header": "@@ -1200,14 +1172,6 @@ final class CrossConverters {",
"removed": [
" final long getLongFromBigDecimal(java.math.BigDecimal source) throws SqlException {",
" if (Configuration.rangeCheckCrossConverters &&",
" (source.compareTo(bdMaxLongValue__) == 1 || source.compareTo(bdMinLongValue__) == -1)) {",
" throw new LossOfPrecisionConversionException(agent_.logWriter_, String.valueOf(source));",
" }",
" return source.longValue();",
" }",
""
]
},
{
"added": [],
"header": "@@ -1233,14 +1197,6 @@ final class CrossConverters {",
"removed": [
" final float getFloatFromBigDecimal(java.math.BigDecimal source) throws SqlException {",
" if (Configuration.rangeCheckCrossConverters &&",
" (source.compareTo(bdMaxFloatValue__) == 1 || source.compareTo(bdMinFloatValue__) == -1)) {",
" throw new LossOfPrecisionConversionException(agent_.logWriter_, String.valueOf(source));",
" }",
" return source.floatValue();",
" }",
""
]
},
{
"added": [],
"header": "@@ -1257,14 +1213,6 @@ final class CrossConverters {",
"removed": [
" final double getDoubleFromBigDecimal(java.math.BigDecimal source) throws SqlException {",
" if (Configuration.rangeCheckCrossConverters &&",
" (source.compareTo(bdMaxDoubleValue__) == 1 || source.compareTo(bdMinDoubleValue__) == -1)) {",
" throw new LossOfPrecisionConversionException(agent_.logWriter_, String.valueOf(source));",
" }",
" return source.doubleValue();",
" }",
""
]
}
]
}
] |
derby-DERBY-5536-34197480
|
DERBY-5536 Client's ResultSet#getLong does not range check when converting from a DECIMAL column
Patch derby-5536-3, which fixes this issue and adds new tests. It
changes the implementation of am.Decimal#getLong to allow it to detect
overflow. If the number of digits indicate it can't happen, we use an
optimized path.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1229082 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/am/Cursor.java",
"hunks": [
{
"added": [
" private final long getLongFromDECIMAL(int column, String targetType) ",
" throws SqlException {",
" } catch (ArithmeticException e) {",
" throw new SqlException(agent_.logWriter_,",
" new ClientMessageId (SQLState.LANG_OUTSIDE_RANGE_FOR_DATATYPE),",
" targetType, e);",
" targetType, e);",
" \"DECIMAL\", targetType, e);"
],
"header": "@@ -392,20 +392,25 @@ public abstract class Cursor {",
"removed": [
" private final long getLongFromDECIMAL(int column) throws SqlException {",
" \"long\", e);",
" \"DECIMAL\", \"long\", e);"
]
},
{
"added": [
" return agent_.crossConverters_.getBooleanFromLong(",
" getLongFromDECIMAL(column, \"boolean\"));"
],
"header": "@@ -739,7 +744,8 @@ public abstract class Cursor {",
"removed": [
" return agent_.crossConverters_.getBooleanFromLong(getLongFromDECIMAL(column));"
]
},
{
"added": [
" return agent_.crossConverters_.getByteFromLong(",
" getLongFromDECIMAL(column, \"byte\"));"
],
"header": "@@ -767,7 +773,8 @@ public abstract class Cursor {",
"removed": [
" return agent_.crossConverters_.getByteFromLong(getLongFromDECIMAL(column));"
]
},
{
"added": [
" return agent_.crossConverters_.getShortFromLong(",
" getLongFromDECIMAL(column, \"short\"));"
],
"header": "@@ -794,7 +801,8 @@ public abstract class Cursor {",
"removed": [
" return agent_.crossConverters_.getShortFromLong(getLongFromDECIMAL(column));"
]
},
{
"added": [
" return agent_.crossConverters_.getIntFromLong(",
" getLongFromDECIMAL(column, \"int\"));"
],
"header": "@@ -821,7 +829,8 @@ public abstract class Cursor {",
"removed": [
" return agent_.crossConverters_.getIntFromLong(getLongFromDECIMAL(column));"
]
}
]
},
{
"file": "java/client/org/apache/derby/client/am/Decimal.java",
"hunks": [
{
"added": [
"import java.math.BigDecimal;"
],
"header": "@@ -20,6 +20,7 @@",
"removed": []
},
{
"added": [
" * @throws ArithmeticException if value is too large for a long"
],
"header": "@@ -320,6 +321,7 @@ public class Decimal {",
"removed": []
}
]
}
] |
derby-DERBY-5536-b40d7705
|
DERBY-5536 Client's ResultSet#getLong does not range check when converting from a DECIMAL column
Followup patch "derby-5536-refactor" factors out the test cases for
DERBY-5536 into a separate fixture, adding "ORDER BY" to secure
correct row retrieval order.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1229481 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5539-1ede0a8c
|
DERBY-5539: Harden password hashing in the builtin authentication service
Add random salt before hashing the credentials, and apply the hash
function multiple times.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1221666 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/jdbc/authentication/AuthenticationServiceBase.java",
"hunks": [
{
"added": [
"import java.security.SecureRandom;"
],
"header": "@@ -55,6 +55,7 @@ import java.security.NoSuchAlgorithmException;",
"removed": []
},
{
"added": [
" /**",
" * Pattern that is prefixed to the stored password in the configurable",
" * hash authentication scheme if key stretching has been applied. This",
" * scheme extends the configurable hash scheme by adding a random salt and",
" * applying the hash function multiple times when generating the hashed",
" * token.",
" */",
" public static final String",
" ID_PATTERN_CONFIGURABLE_STRETCHED_SCHEME = \"3b62\";",
""
],
"header": "@@ -143,6 +144,16 @@ public abstract class AuthenticationServiceBase",
"removed": []
},
{
"added": [
" * Hash credentials using the specified hash algorithm, possibly performing",
" * key stretching by adding random salt and applying the hash function",
" * multiple times.",
" * </p>",
" *",
" * <p>",
" * The algorithm must be supported by one of the registered security",
" * providers in the JVM."
],
"header": "@@ -520,9 +531,14 @@ public abstract class AuthenticationServiceBase",
"removed": [
" * Encrypt a password using the specified hash algorithm and with the",
" * user name as extra salt. The algorithm must be supported by one of",
" * the registered security providers in the JVM."
]
},
{
"added": [
" * @param salt random salt to add to the credentials (possibly {@code null})",
" * @param iterations the number of times to apply the hash function",
" String user, String password, String algorithm,",
" byte[] salt, int iterations)",
" byte[] userBytes;",
" byte[] passwordBytes;",
" try {",
" userBytes = user.getBytes(ENCODING);",
" passwordBytes = password.getBytes(ENCODING);",
" } catch (UnsupportedEncodingException uee) {",
" // UTF-8 should always be available, so this should never happen.",
" throw StandardException.plainWrapException(uee);",
" }",
""
],
"header": "@@ -532,18 +548,31 @@ public abstract class AuthenticationServiceBase",
"removed": [
" String user, String password, String algorithm)"
]
},
{
"added": [
" byte[] digest = null;",
" for (int i = 0; i < iterations; i++) {",
" md.reset();",
" if (digest != null) {",
" md.update(digest);",
" }",
" md.update(userBytes);",
" md.update(passwordBytes);",
" if (salt != null) {",
" md.update(salt);",
" }",
" digest = md.digest();",
" if ((salt == null || salt.length == 0) && iterations == 1) {",
" // No salt was used, and only a single iteration, which is",
" // identical to the default hashing scheme in 10.6-10.8. Generate",
" // a token on a format compatible with those old versions.",
" return ID_PATTERN_CONFIGURABLE_HASH_SCHEME +",
" } else {",
" // Salt and/or multiple iterations was used, so we need to add",
" // those parameters to the token in order to verify the credentials",
" // later.",
" return ID_PATTERN_CONFIGURABLE_STRETCHED_SCHEME +",
" StringUtil.toHexString(digest, 0, digest.length) +",
" SEPARATOR_CHAR + StringUtil.toHexString(salt, 0, salt.length) +",
" SEPARATOR_CHAR + iterations + SEPARATOR_CHAR + algorithm;",
" }"
],
"header": "@@ -552,21 +581,36 @@ public abstract class AuthenticationServiceBase",
"removed": [
" md.reset();",
"",
" try {",
" md.update(user.getBytes(ENCODING));",
" md.update(password.getBytes(ENCODING));",
" } catch (UnsupportedEncodingException uee) {",
" // UTF-8 should always be available, so this should never happen.",
" throw StandardException.plainWrapException(uee);",
" byte[] digest = md.digest();",
"",
" return ID_PATTERN_CONFIGURABLE_HASH_SCHEME +"
]
},
{
"added": [
" DataDictionary dd = getDataDictionary();",
" dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_6, null);",
"",
" // Support for key stretching was added in Derby 10.9, so don't use it",
" // if the database may still be used with an older version.",
" boolean supportKeyStretching =",
" dd.checkVersion(DataDictionary.DD_VERSION_DERBY_10_9, null);"
],
"header": "@@ -596,14 +640,19 @@ public abstract class AuthenticationServiceBase",
"removed": [
" getDataDictionary().checkVersion(",
" DataDictionary.DD_VERSION_DERBY_10_6, null);"
]
}
]
}
] |
derby-DERBY-5539-dcd69d38
|
DERBY-5539: Harden password hashing in the builtin authentication service
Always generate a hashed token, also if there is no user with the
specified user name. This way, authentication failures take the same
amount of time regardless of the user's existence, which will make it
harder for attackers to tell whether a user exists.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1292704 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/jdbc/authentication/AuthenticationServiceBase.java",
"hunks": [
{
"added": [
" /**",
" * Get all the database properties.",
" * @return the database properties, or {@code null} if there is no",
" * access factory",
" */",
" Properties getDatabaseProperties() throws StandardException {",
" Properties props = null;",
"",
" TransactionController tc = getTransaction();",
" if (tc != null) {",
" try {",
" props = tc.getProperties();",
" } finally {",
" tc.commit();",
" }",
" }",
"",
" return props;",
" }",
""
],
"header": "@@ -309,6 +309,26 @@ public abstract class AuthenticationServiceBase",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/jdbc/authentication/BasicAuthenticationServiceImpl.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.store.access.TransactionController;"
],
"header": "@@ -27,7 +27,7 @@ import org.apache.derby.iapi.reference.SQLState;",
"removed": [
"import org.apache.derby.iapi.services.sanity.SanityManager;"
]
},
{
"added": [
" // DERBY-5539: Generate a hashed token even if the user is not",
" // defined at the database level (that is, the user is defined at",
" // the system level or does not exist at all). If we don't do that,",
" // authentication failures would take less time for non-existing",
" // users than they would for existing users, since generating the",
" // hashed token is a relatively expensive operation. Attackers",
" // could use this to determine if a user exists. By generating the",
" // hashed token also for non-existing users, authentication",
" // failures will take the same time for existing and non-existing",
" // users, and it will be more difficult for attackers to tell the",
" // difference.",
" try {",
" Properties props = getDatabaseProperties();",
" if (props != null) {",
" encryptUsingDefaultAlgorithm(",
" userName, userPassword, props);",
" }",
" } catch (StandardException se) {",
" throw Util.generateCsSQLException(se);",
" }",
""
],
"header": "@@ -221,6 +221,27 @@ public final class BasicAuthenticationServiceImpl",
"removed": []
}
]
}
] |
derby-DERBY-5540-379e4cc2
|
DERBY-5540: Call initCause() and getCause() without reflection in BaseJDBCTestCase
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1220669 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5541-c14b3d44
|
DERBY-5541: Remove unnecessary field rwsOK in DirStorageFactory4
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1220670 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/io/DirFile4.java",
"hunks": [
{
"added": [
" DirFile4(String path)"
],
"header": "@@ -50,17 +50,14 @@ class DirFile4 extends DirFile",
"removed": [
"\tprivate final boolean rwsOK;",
"",
" DirFile4( String path, boolean rwsOK)",
"\t\tthis.rwsOK = rwsOK;"
]
},
{
"added": [
" DirFile4(String directoryName, String fileName)"
],
"header": "@@ -69,10 +66,9 @@ class DirFile4 extends DirFile",
"removed": [
" DirFile4( String directoryName, String fileName, boolean rwsOK)",
"\t\tthis.rwsOK = rwsOK;"
]
},
{
"added": [
" DirFile4( DirFile directoryName, String fileName)"
],
"header": "@@ -81,10 +77,9 @@ class DirFile4 extends DirFile",
"removed": [
" DirFile4( DirFile directoryName, String fileName, boolean rwsOK)",
"\t\tthis.rwsOK = rwsOK;"
]
},
{
"added": [
" return new DirFile4(parent);"
],
"header": "@@ -98,7 +93,7 @@ class DirFile4 extends DirFile",
"removed": [
" return new DirFile4( parent, rwsOK);"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/io/DirStorageFactory4.java",
"hunks": [
{
"added": [],
"header": "@@ -22,9 +22,6 @@",
"removed": [
"import org.apache.derby.iapi.services.info.JVMInfo;",
"",
"import java.io.IOException;"
]
},
{
"added": [],
"header": "@@ -33,8 +30,6 @@ import java.io.IOException;",
"removed": [
"\tprivate static final boolean\trwsOK = JVMInfo.JDK_ID >= JVMInfo.J2SE_142;",
" "
]
},
{
"added": [
" return new DirFile4(dataDirectory);",
" return new DirFile4(dataDirectory, path);"
],
"header": "@@ -54,8 +49,8 @@ public class DirStorageFactory4 extends DirStorageFactory",
"removed": [
" return new DirFile4(dataDirectory, rwsOK);",
" return new DirFile4(dataDirectory, path, rwsOK);"
]
},
{
"added": [
" return new DirFile4( separatedDataDirectory + directoryName, fileName);"
],
"header": "@@ -69,7 +64,7 @@ public class DirStorageFactory4 extends DirStorageFactory",
"removed": [
" return new DirFile4( separatedDataDirectory + directoryName, fileName, rwsOK);"
]
},
{
"added": [
" return new DirFile4((DirFile) directoryName, fileName);"
],
"header": "@@ -83,7 +78,7 @@ public class DirStorageFactory4 extends DirStorageFactory",
"removed": [
" return new DirFile4( (DirFile) directoryName, fileName, rwsOK);"
]
},
{
"added": [
" return true;"
],
"header": "@@ -99,7 +94,7 @@ public class DirStorageFactory4 extends DirStorageFactory",
"removed": [
" return rwsOK;"
]
}
]
}
] |
derby-DERBY-5542-4e1ac797
|
DERBY-5542: Remove checks for Java version being greater than or equal to 1.4
No older versions are supported.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1220671 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/jdbc/EmbedXAResource.java",
"hunks": [
{
"added": [],
"header": "@@ -36,7 +36,6 @@ import org.apache.derby.iapi.jdbc.ResourceAdapter;",
"removed": [
"import org.apache.derby.iapi.services.info.JVMInfo;"
]
}
]
},
{
"file": "java/testing/org/apache/derbyTesting/functionTests/util/corruptio/CorruptDiskStorageFactory.java",
"hunks": [
{
"added": [
"\t\tString dirStorageFactoryClass ="
],
"header": "@@ -68,17 +68,8 @@ public class CorruptDiskStorageFactory extends CorruptBaseStorageFactory",
"removed": [
"\t\tString dirStorageFactoryClass;",
"\t\tif( JVMInfo.JDK_ID >= JVMInfo.J2SE_14)",
" {",
" dirStorageFactoryClass = ",
" }",
" else",
" {",
" dirStorageFactoryClass = ",
" \"org.apache.derby.impl.io.DirStorageFactory\";",
" }"
]
}
]
}
] |
derby-DERBY-5546-841c8990
|
DERBY-5546 ResultSet#updateBigDecimal on a REAL column does not do underflow checking
Patch derby-5546-2. For both Real and Double, check for underflow. For
Double underflow is currently detected but only because we didn't fix
DERBY-3398 yet, so we introduce the same check now as for Real. Once
DERBY-3398 it will no longer be redundant. The tests are still guarded
by a check for embedded until DERBY-5534 is fixed.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1447996 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/types/SQLDouble.java",
"hunks": [
{
"added": [
"import java.math.BigDecimal;"
],
"header": "@@ -36,6 +36,7 @@ import java.io.ObjectOutput;",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/iapi/types/SQLReal.java",
"hunks": [
{
"added": [
"import java.math.BigDecimal;"
],
"header": "@@ -36,6 +36,7 @@ import java.io.ObjectOutput;",
"removed": []
}
]
}
] |
derby-DERBY-5547-65246387
|
DERBY-5547: NSSecurityMechanismTest.testNetworkServerSecurityMechanism() fails intermittently
Wait for previous network server to stop before starting a new one.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1222155 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/NetworkServerTestSetup.java",
"hunks": [
{
"added": [
"import java.net.UnknownHostException;"
],
"header": "@@ -26,6 +26,7 @@ import java.io.FileOutputStream;",
"removed": []
},
{
"added": [
" * @throws AssertionFailedError if the port didn't become available before",
" * the timeout",
" * @throws InterruptedException if the thread was interrupted while waiting",
" * for the port to become available",
" * @throws UnknownHostException if the host name couldn't be resolved",
" public static void waitForAvailablePort()",
" throws InterruptedException, UnknownHostException {"
],
"header": "@@ -217,9 +218,14 @@ final public class NetworkServerTestSetup extends BaseTestSetup {",
"removed": [
" * @throws Exception if the port didn't become available before the timeout",
" private void waitForAvailablePort() throws Exception {"
]
},
{
"added": [
" private static void probeServerPort(final int port, final InetAddress addr)"
],
"header": "@@ -248,7 +254,7 @@ final public class NetworkServerTestSetup extends BaseTestSetup {",
"removed": [
" private void probeServerPort(final int port, final InetAddress addr)"
]
}
]
}
] |
derby-DERBY-5552-3fe22817
|
DERBY-5552 Derby threads hanging when using ClientXADataSource and a deadlock or lock timeout occurs
Code patch contributed by Brett Bergquist - brett at thebergquistfamily dot com
Derby will no longer null out the connection on lock timeout or deadlock.
I added test to verify proper behavior for the conneciton and transaction
state after the lock timeout occurs.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1293494 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/Utilities.java",
"hunks": [
{
"added": [
" } ",
" System.out.println(\"};\\n\");"
],
"header": "@@ -137,10 +137,9 @@ public class Utilities {",
"removed": [
" } else {",
" System.out.println(\"};\\n\");",
" }"
]
}
]
}
] |
derby-DERBY-5553-537fdabe
|
DERBY-5553 System property for client tracing -Dderby.client.traceDirectory does not work with XADataSource
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1518766 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/jdbc/ClientBaseDataSourceRoot.java",
"hunks": [
{
"added": [
" if (traceDirectoryString == null && properties != null) {"
],
"header": "@@ -425,7 +425,7 @@ public abstract class ClientBaseDataSourceRoot implements",
"removed": [
" if (traceDirectoryString == null) {"
]
},
{
"added": [
" if (traceLevelString == null && properties != null) {",
" if (traceLevelString != null ) {",
" return parseInt(traceLevelString, propertyDefault_traceLevel);",
" } else {",
" return propertyDefault_traceLevel;",
" }"
],
"header": "@@ -989,12 +989,15 @@ public abstract class ClientBaseDataSourceRoot implements",
"removed": [
" if (traceLevelString == null) {",
"",
" return parseInt(traceLevelString, propertyDefault_traceLevel);"
]
},
{
"added": [
" // DERBY-5553. System properties derby.client.traceDirectory",
" // and derby.client.traceLevel do not work for ClientXADataSource",
" // or ClientConnectionPoolDataSource",
" // Trace level and trace directory will be read from system",
" // properties if they are not specified in the Properties",
" // argument, so we check for them first to avoid getting cut",
" // off by the (prop == null) check below.",
" String traceDir = getTraceDirectory(prop);",
" if (traceDir != null) {",
" setTraceDirectory(traceDir);",
" }",
" ",
" int traceLevel = getTraceLevel(prop);",
" if (traceLevel != propertyDefault_traceLevel) {",
" setTraceLevel(traceLevel);",
" }"
],
"header": "@@ -1060,6 +1063,22 @@ public abstract class ClientBaseDataSourceRoot implements",
"removed": []
}
]
}
] |
derby-DERBY-5554-01c7acbe
|
DERBY-5554: Add a trailing tableid column to the SPACE_TABLE vti.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1351714 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/diag/SpaceTable.java",
"hunks": [
{
"added": [
"\t<LI>TABLEID char(36) - not nullable. The UUID of the table.</LI>"
],
"header": "@@ -91,6 +91,7 @@ import org.apache.derby.vti.VTIEnvironment;",
"removed": []
},
{
"added": [
" {",
" conglomTable[i] = new ConglomInfo",
" (",
" cds[i].getTableID().toString(),",
" cds[i].getConglomerateNumber(),",
" cds[i].isIndex() ? cds[i].getConglomerateName() : tableName,",
" cds[i].isIndex()",
" );",
" }"
],
"header": "@@ -147,10 +148,15 @@ public class SpaceTable extends VTITemplate implements VTICosting {",
"removed": [
" conglomTable[i] = new ConglomInfo(",
" cds[i].getConglomerateNumber(),",
" cds[i].isIndex() ? cds[i].getConglomerateName() : tableName,",
" cds[i].isIndex());"
]
},
{
"added": [
" String str = null;",
" ",
"\t\tswitch( columnNumber )",
"\t\t{",
"\t\t case 1:",
"\t\t\t str = conglomInfo.getConglomName();",
" break;",
" \t\tcase 8:",
"\t\t\t str = conglomInfo.getTableID();",
" break;",
"\t\t default:",
"\t\t\t break;",
"\t\t}"
],
"header": "@@ -227,7 +233,19 @@ public class SpaceTable extends VTITemplate implements VTICosting {",
"removed": [
"\t\tString str = conglomInfo.getConglomName();"
]
},
{
"added": [
"\t\tEmbedResultSetMetaData.getResultColumnDescriptor(\"TABLEID\", Types.CHAR, false, 36),"
],
"header": "@@ -335,6 +353,7 @@ public class SpaceTable extends VTITemplate implements VTICosting {",
"removed": []
}
]
}
] |
derby-DERBY-5554-271e7dd1
|
DERBY-5554: Add 0-arg constructor to SPACE_TABLE vti.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1351795 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/diag/SpaceTable.java",
"hunks": [
{
"added": [
" public SpaceTable() {}"
],
"header": "@@ -116,6 +116,7 @@ public class SpaceTable extends VTITemplate implements VTICosting {",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/catalog/DataDictionaryImpl.java",
"hunks": [
{
"added": [
"\t * index, the size of the return array is 1. If the uuid argument is null, then",
" * this method retrieves descriptors for all of the conglomerates in the database."
],
"header": "@@ -7033,7 +7033,8 @@ public final class\tDataDictionaryImpl",
"removed": [
"\t * index, the size of the return array is 1."
]
}
]
}
] |
derby-DERBY-5554-3c065e9b
|
DERBY-5554: Forbid the joining of VTIs to one another in the FROM list.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1357692 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5554-60dc440d
|
DERBY-5554: Allow multiple tables in FROM lists which join tables to VTI arguments.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1360306 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/FromVTI.java",
"hunks": [
{
"added": [
" // for remapping column references in VTI args at code generation time",
" private HashMap argSources = new HashMap();",
""
],
"header": "@@ -117,6 +117,9 @@ public class FromVTI extends FromTable implements VTIEnvironment",
"removed": []
},
{
"added": [
"",
" // remember this FromTable so that we can code generate the arg",
" // from actual result columns later on.",
" argSources.put( new Integer( fromTable.getTableNumber() ), fromTable );",
" "
],
"header": "@@ -885,13 +888,17 @@ public class FromVTI extends FromTable implements VTIEnvironment",
"removed": [
" "
]
},
{
"added": [
" remapBaseTableColumns();"
],
"header": "@@ -1503,6 +1510,7 @@ public class FromVTI extends FromTable implements VTIEnvironment",
"removed": []
}
]
}
] |
derby-DERBY-5559-5e1c7a74
|
DERBY-5559 AssertFailures (7, or 8) with ibm 1.6 and 1.5 on Windows XP in lang.NativeAuthProcs fixture testAll
This change just adds explicit printout in the tests when the asserts fail,
it does not fix the test failures.
I added them to the 4 places I saw them failing in nightly runs as reported
in the apache nightly test run site for the following report:
http://people.apache.org/~myrnavl/derby_test_results/main/windows/testlog/ibm15/1227449-suites.All_diff.txt
All the problem areas are asserts based on timestamps and the tests seem to
be waiting for some time such that these asserts will be true. My guess is
that this methodology is not portable with the current values and may need to
be tuned more to work across all JVMs and hardware.
I am seeing errors on a XP laptop running ibm jvm 16.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1228537 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5559-a1ac6747
|
DERBY-5559 AssertFailures (7, or 8) with ibm 1.6 and 1.5 on Windows XP in lang.NativeAuthProcs fixture testAll
Adding longer waits and some additional waits to fix problems on my laptop with
this test. Before this change the test would expect the timestamp to have
moved forward after some password manipulation operations, but instead the
timestamp would be the same as before. I believe this is just a problem with
the known issue of JVM/machine caching of time, and not an actual problem
with the password mechanism.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1229835 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5560-b4177107
|
DERBY-5560 Java deadlock between LogicalConnection40 and ClientXAConnection40
Patch contributed by Brett Bergquist
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1513218 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/am/LogicalConnection.java",
"hunks": [
{
"added": [
" public void close() throws SQLException {",
" // The pooledConnection owns this LogicalConnection. To ensure that",
" // there is no deadlock when calling back into the pooledConnection_.recycleConnection",
" // below, we first synchronize on the pooledConnection and then on this",
" // LogicalConnection",
" synchronized (pooledConnection_) {",
" synchronized (this) {",
" try {",
" // we also need to loop thru all the logicalStatements and close them",
" if (physicalConnection_ == null) {",
" return;",
" }",
" if (physicalConnection_.agent_.loggingEnabled()) {",
" physicalConnection_.agent_.logWriter_.traceEntry(this, \"close\");",
" }",
"",
" if (physicalConnection_.isClosed()) // connection is closed or has become stale",
" {",
" pooledConnection_.informListeners(new SqlException(null,",
" new ClientMessageId(",
" SQLState.PHYSICAL_CONNECTION_ALREADY_CLOSED)));",
" } else {",
" physicalConnection_.checkForTransactionInProgress();",
" physicalConnection_.closeForReuse(",
" pooledConnection_.isStatementPoolingEnabled());",
" if (!physicalConnection_.isGlobalPending_()) {",
" pooledConnection_.recycleConnection();",
" }",
" }",
" physicalConnection_ = null;",
" pooledConnection_.nullLogicalConnection();",
" } catch (SqlException se) {",
" throw se.getSQLException();"
],
"header": "@@ -96,36 +96,41 @@ public class LogicalConnection implements Connection {",
"removed": [
" synchronized public void close() throws SQLException {",
" try",
" {",
" // we also need to loop thru all the logicalStatements and close them",
" if (physicalConnection_ == null) {",
" return;",
" }",
" if (physicalConnection_.agent_.loggingEnabled()) {",
" physicalConnection_.agent_.logWriter_.traceEntry(this, \"close\");",
" }",
"",
" if (physicalConnection_.isClosed()) // connection is closed or has become stale",
" {",
" pooledConnection_.informListeners(new SqlException(null, ",
" new ClientMessageId(",
" SQLState.PHYSICAL_CONNECTION_ALREADY_CLOSED)));",
" } else {",
" physicalConnection_.checkForTransactionInProgress();",
" physicalConnection_.closeForReuse(",
" pooledConnection_.isStatementPoolingEnabled());",
" if (!physicalConnection_.isGlobalPending_()) {",
" pooledConnection_.recycleConnection();",
" physicalConnection_ = null;",
" pooledConnection_.nullLogicalConnection();",
" }",
" catch ( SqlException se )",
" {",
" throw se.getSQLException();"
]
}
]
}
] |
derby-DERBY-5561-06a9c554
|
DERBY-5561: Race conditions in LogicalConnection checking for a null physical connection
Changes:
o converted docs to Javadoc
o removed stale doc
o corrected/improved some comments
o made physicalConnection_ package-private
o made checkForNullPhysicalConnection final
Patch file: derby-5561-2a-minor_cleanups.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1334919 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/am/LogicalConnection.java",
"hunks": [
{
"added": [
"/**",
" * A simple delegation wrapper handle for a physical connection.",
" * <p>",
" * All methods of the {@code Connection} interface are forwarded to the",
" * underlying physical connection, except for {@link #close()} and",
" * {@link #isClosed()}. When a physical connection is wrapped, it is non-null,",
" * when the logical connection is closed, the wrapped physical connection is",
" * always set to {@code null}.",
" * Both the finalizer and the {@code close}-methods will always set the ",
" * physical connection to {@code null}. After the physical connection has been",
" * nulled out, only the {@code PooledConnection} instance will maintain a",
" * handle to the physical connection.",
" */",
" /**",
" * Underlying physical connection for this logical connection.",
" * <p>",
" * Set to {@code null} when this logical connection is closed.",
" */",
" Connection physicalConnection_;"
],
"header": "@@ -24,17 +24,26 @@ import org.apache.derby.shared.common.reference.SQLState;",
"removed": [
"",
"// A simple delegation wrapper handle for a physical connection.",
"// All methods are forwarded to the underlying physical connection except for close() and isClosed().",
"// When a physical connection is wrapped, it is non-null, when the logical connection",
"// is closed, the wrapped physical connection is always set to null.",
"// Both the finalizer and close() methods will always set the physical connection to null.",
"// After the physical conneciton is set to null,",
"// only the Pooled Connection instance will maintain a handle to the physical connection.",
"",
" protected Connection physicalConnection_ = null; // reset to null when the logical connection is closed."
]
},
{
"added": [
" /**",
" * Verifies that there is an underlying physical connection for this",
" * logical connection.",
" * <p>",
" * If the physical connection has been nulled out it means that this",
" * logical connection has been closed.",
" *",
" * @throws SQLException if this logical connection has been closed",
" */",
" protected final void checkForNullPhysicalConnection()",
" throws SQLException {"
],
"header": "@@ -129,10 +138,17 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" // this method doesn't wrap in the standard way, because it went out without a throws clause.",
" // Unlike all other LogicalConnection methods, if the physical connection is null, it won't throw an exception, but will return false.",
"",
" protected void checkForNullPhysicalConnection() throws SQLException {"
]
},
{
"added": [
" * Notifies listeners about exceptions of session level severity or higher.",
" * <p>",
" * The exception, even if the severity is sufficiently high, is ignored if",
" * the underlying physical connection has been nulled out. Otherwise a ",
" * {@code connectionErrorOccurred}-event is sent to all the registered",
" * listeners.",
" * @param sqle the cause of the notification"
],
"header": "@@ -141,14 +157,14 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" * This method checks if the physcial connection underneath is null and",
" * if yes, then it simply returns.",
" * Otherwise, if the severity of exception is greater than equal to",
" * ExceptionSeverity.SESSION_SEVERITY, then we will send ",
" * connectionErrorOccurred event to all the registered listeners.",
" * @param sqle SQLException An event will be sent to the listeners if the",
" * exception's severity is >= ExceptionSeverity.SESSION_SEVERITY."
]
}
]
}
] |
derby-DERBY-5561-5e39436e
|
DERBY-5561: Race conditions in LogicalConnection checking for a null physical connection
Made methods calling checkForNullPhysicalConnection synchronized.
Patch contributed by Brett Bergquist (brett at thebergquistfamily dot com).
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1333305 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/am/LogicalConnection.java",
"hunks": [
{
"added": [
" synchronized public String nativeSQL(String sql) throws SQLException {"
],
"header": "@@ -197,7 +197,7 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" public String nativeSQL(String sql) throws SQLException {"
]
},
{
"added": [
" synchronized public boolean getAutoCommit() throws SQLException {"
],
"header": "@@ -217,7 +217,7 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" public boolean getAutoCommit() throws SQLException {"
]
},
{
"added": [
" synchronized public int getTransactionIsolation() throws SQLException {"
],
"header": "@@ -257,7 +257,7 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" public int getTransactionIsolation() throws SQLException {"
]
},
{
"added": [
" synchronized public java.sql.SQLWarning getWarnings() throws SQLException {"
],
"header": "@@ -267,7 +267,7 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" public java.sql.SQLWarning getWarnings() throws SQLException {"
]
},
{
"added": [
" synchronized public boolean isReadOnly() throws SQLException {"
],
"header": "@@ -363,7 +363,7 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" public boolean isReadOnly() throws SQLException {"
]
},
{
"added": [
" synchronized public String getCatalog() throws SQLException {"
],
"header": "@@ -383,7 +383,7 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" public String getCatalog() throws SQLException {"
]
},
{
"added": [
" synchronized public java.util.Map getTypeMap() throws SQLException {"
],
"header": "@@ -428,7 +428,7 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" public java.util.Map getTypeMap() throws SQLException {"
]
},
{
"added": [
" synchronized public java.sql.Statement createStatement(int resultSetType, int resultSetConcurrency,"
],
"header": "@@ -448,7 +448,7 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" public java.sql.Statement createStatement(int resultSetType, int resultSetConcurrency,"
]
},
{
"added": [
" synchronized public java.sql.CallableStatement prepareCall(String sql, int resultSetType,"
],
"header": "@@ -459,7 +459,7 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" public java.sql.CallableStatement prepareCall(String sql, int resultSetType,"
]
},
{
"added": [
" synchronized public java.sql.PreparedStatement prepareStatement(String sql, int resultSetType,"
],
"header": "@@ -471,7 +471,7 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" public java.sql.PreparedStatement prepareStatement(String sql, int resultSetType,"
]
},
{
"added": [
" synchronized public java.sql.PreparedStatement prepareStatement(String sql, int autoGeneratedKeys)"
],
"header": "@@ -484,7 +484,7 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" public java.sql.PreparedStatement prepareStatement(String sql, int autoGeneratedKeys)"
]
},
{
"added": [
" synchronized public java.sql.PreparedStatement prepareStatement(String sql, int columnIndexes[])"
],
"header": "@@ -495,7 +495,7 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" public java.sql.PreparedStatement prepareStatement(String sql, int columnIndexes[])"
]
},
{
"added": [
" synchronized public java.sql.PreparedStatement prepareStatement(String sql, String columnNames[])"
],
"header": "@@ -506,7 +506,7 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" public java.sql.PreparedStatement prepareStatement(String sql, String columnNames[])"
]
},
{
"added": [
" synchronized public void setHoldability(int holdability) throws SQLException {"
],
"header": "@@ -517,7 +517,7 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" public void setHoldability(int holdability) throws SQLException {"
]
},
{
"added": [
" synchronized public int getHoldability() throws SQLException {"
],
"header": "@@ -527,7 +527,7 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" public int getHoldability() throws SQLException {"
]
},
{
"added": [
" synchronized public java.sql.Savepoint setSavepoint() throws SQLException {"
],
"header": "@@ -537,7 +537,7 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" public java.sql.Savepoint setSavepoint() throws SQLException {"
]
},
{
"added": [
" synchronized public java.sql.Savepoint setSavepoint(String name) throws SQLException {"
],
"header": "@@ -547,7 +547,7 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" public java.sql.Savepoint setSavepoint(String name) throws SQLException {"
]
},
{
"added": [
" synchronized public void rollback(java.sql.Savepoint savepoint) throws SQLException {"
],
"header": "@@ -557,7 +557,7 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" public void rollback(java.sql.Savepoint savepoint) throws SQLException {"
]
},
{
"added": [
" synchronized public void releaseSavepoint(java.sql.Savepoint savepoint) throws SQLException {"
],
"header": "@@ -567,7 +567,7 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" public void releaseSavepoint(java.sql.Savepoint savepoint) throws SQLException {"
]
},
{
"added": [
" synchronized public String getSchema() throws SQLException"
],
"header": "@@ -610,7 +610,7 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" public String getSchema() throws SQLException"
]
},
{
"added": [
" synchronized public void setSchema( String schemaName ) throws SQLException"
],
"header": "@@ -624,7 +624,7 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" public void setSchema( String schemaName ) throws SQLException"
]
}
]
}
] |
derby-DERBY-5562-152986e7
|
DERBY-5562: A read-only XA transaction that has a timeout never has the timer canceled when the transaction is complete
Cancel the timer when a read-only transaction is prepared and implicitly committed.
Based on a fix contributed by Brett Bergquist.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1230480 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/jdbc/XATransactionState.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.store.access.XATransactionController;"
],
"header": "@@ -33,6 +33,7 @@ import org.apache.derby.iapi.services.context.ContextImpl;",
"removed": []
}
]
}
] |
derby-DERBY-5564-c9ef1664
|
DERBY-5564 Code does different things depending if derby.locks.deadlockTrace=true is set
original patch by Brett Bergquist, then modified by Mike Matrigali for submission.
Changes the code to always return 40XL1 as the SQL state when a lock timeout
occurs. Previous to this change if deadlock diagnostics were enabled then
40X02 would be returned. Internally multiple places in the code was not
expecting the second error code for a lock timeout. Also it was agreed that
it was confusing for user applications to have to code for both states in
case they wanted to turn diagnostics on and off.
Existing test cases were changed to match the new expected behavior.
The behavior in DDLConstantaction to immediately throw an error on first try
if a lock timeout is encounted with diagnostics enabled was preserved. The
error thrown now will be with sql state 40XL1 and not 40X02 as before.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1230100 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/sql/dictionary/SPSDescriptor.java",
"hunks": [
{
"added": [
"\t\t\t\t// First try compiling in a nested transaction so we can ",
" // release the locks after the compilation, and not have them",
" // sit around in the parent transaction. But if we get lock ",
" // time out in the nested transaction, then go ahead and do ",
" // the compilation in the user transaction. When doing the ",
" // compilation in the user transaction, the locks acquired for ",
" // recompilation will be released at the end of the user ",
" // transaction (commit or abort)."
],
"header": "@@ -691,11 +691,14 @@ public class SPSDescriptor extends TupleDescriptor",
"removed": [
"\t\t\t\t//bug 4821 - First try compiling on a nested transaction so we can release",
"\t\t\t\t//the locks after the compilation. But if we get lock time out on the",
"\t\t\t\t//nested transaction, then go ahead and do the compilation on the user",
"\t\t\t\t//transaction. When doing the compilation on user transaction, the locks",
"\t\t\t\t//acquired for recompilation will be released at the end of the user transaction."
]
},
{
"added": [
"\t\t\t\t\t// If I cannot start a Nested User Transaction use the ",
" // parent transaction to do all the work."
],
"header": "@@ -711,8 +714,8 @@ public class SPSDescriptor extends TupleDescriptor",
"removed": [
"\t\t\t\t\t// If I cannot start a Nested User Transaction use the parent",
"\t\t\t\t\t// transaction to do all the work."
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/services/daemon/IndexStatisticsDaemonImpl.java",
"hunks": [
{
"added": [
""
],
"header": "@@ -323,6 +323,7 @@ public class IndexStatisticsDaemonImpl",
"removed": []
},
{
"added": [
"",
" if (se.isLockTimeout() && !lockConflictSeen) {",
"",
""
],
"header": "@@ -330,12 +331,14 @@ public class IndexStatisticsDaemonImpl",
"removed": [
" if (SQLState.LOCK_TIMEOUT.equals(se.getMessageId()) &&",
" !lockConflictSeen) {"
]
},
{
"added": [
"",
"",
" if (se.isLockTimeout() && retries < 3) {"
],
"header": "@@ -508,9 +511,10 @@ public class IndexStatisticsDaemonImpl",
"removed": [
" if (SQLState.LOCK_TIMEOUT.equals(se.getMessageId()) &&",
" retries < 3) {"
]
},
{
"added": [
"",
" if (se.isLockTimeout() && asBackgroundTask && retries < 3) {"
],
"header": "@@ -632,8 +636,8 @@ public class IndexStatisticsDaemonImpl",
"removed": [
" if (SQLState.LOCK_TIMEOUT.equals(se.getMessageId()) &&",
" asBackgroundTask && retries < 3) {"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/store/access/btree/BTreePostCommit.java",
"hunks": [
{
"added": [
" if (se.isLockTimeoutOrDeadlock())"
],
"header": "@@ -257,8 +257,7 @@ class BTreePostCommit implements Serviceable",
"removed": [
"\t\t\tif (se.getMessageId().equals(SQLState.LOCK_TIMEOUT) ||",
"\t\t\t\tse.getMessageId().equals(SQLState.DEADLOCK))"
]
}
]
}
] |
derby-DERBY-5565-b0902f14
|
DERBY-5565 Network Server should reject client connections that are not Derby Network Client.
We now reject all PRDID's that do not start with DNC.
The protocol tests have been changed to use the DNC10090 PRDID instead of TST01000. This actually alters the code path where there is slightly different behavior in later derby client versions, so that we have to skip an additional dss on connect for the diagnostic information.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1336268 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/drda/org/apache/derby/impl/drda/AppRequester.java",
"hunks": [
{
"added": [
""
],
"header": "@@ -35,8 +35,7 @@ class AppRequester",
"removed": [
"\tprotected static final int JCC_CLIENT = 1;",
"\tprotected static final int CCC_CLIENT = 2;\t\t// not yet supported."
]
},
{
"added": [
" if ((prdid.indexOf(DRDAConstants.DERBY_DRDA_CLIENT_ID) != -1)) {",
" clientType = DNC_CLIENT;",
" } else {",
" clientType = UNKNOWN_CLIENT;",
" }"
],
"header": "@@ -108,17 +107,11 @@ class AppRequester",
"removed": [
"\t\tif (srvrlslv == null)",
"\t\t{ clientType = UNKNOWN_CLIENT; }",
"\t\telse if (srvrlslv.indexOf(\"JCC\") != -1)",
"\t\t{ clientType = JCC_CLIENT; }",
"\t\telse if",
"\t\t\t(",
"\t\t\t (srvrlslv.indexOf(DRDAConstants.DERBY_DRDA_CLIENT_ID) != -1)",
"\t\t\t)",
"\t\t{ clientType = DNC_CLIENT; }",
"\t\telse",
"\t\t{ clientType = UNKNOWN_CLIENT; }"
]
}
]
},
{
"file": "java/drda/org/apache/derby/impl/drda/DRDAConnThread.java",
"hunks": [
{
"added": [
" if (appRequester.getClientType() != appRequester.DNC_CLIENT) {",
" invalidClient(appRequester.prdid);",
" }",
" // All versions of DNC,the only client supported, handle",
" // warnings on CNTQRY",
" sendWarningsOnCNTQRY = true;"
],
"header": "@@ -3424,16 +3424,12 @@ class DRDAConnThread extends Thread {",
"removed": [
"",
"\t\t\t\t\t/* If JCC version is 1.5 or later, send SQLWarning on CNTQRY */",
"\t\t\t\t\tif (((appRequester.getClientType() == appRequester.JCC_CLIENT) &&",
"\t\t\t\t\t\t(appRequester.greaterThanOrEqualTo(1, 5, 0))) ||",
"\t\t\t\t\t (appRequester.getClientType() == appRequester.DNC_CLIENT))",
"\t\t\t\t\t{",
"\t\t\t\t\t\tsendWarningsOnCNTQRY = true;",
"\t\t\t\t\t}",
"\t\t\t\t\telse sendWarningsOnCNTQRY = false;",
""
]
},
{
"added": [
" // This check was added because of DERBY-1434 ",
" if (appRequester.greaterThanOrEqualTo(10,3,0) ) {"
],
"header": "@@ -5511,11 +5507,9 @@ class DRDAConnThread extends Thread {",
"removed": [
" // This check was added because of DERBY-1434",
" ",
" if ( appRequester.getClientType() != AppRequester.DNC_CLIENT",
" || appRequester.greaterThanOrEqualTo(10,3,0) ) {"
]
}
]
}
] |
derby-DERBY-5567-398a0430
|
DERBY-5567 AlterTableTest#testDropColumn fails: drop view cannot be performed due to dependency
When a view (b) is defined on another view (a), dropping a column in
the base table can lead to both view being invalidated. This patch
(DERBY-5567-1) fixes a problem in the logic:
Depending on the order in which dependencies of the base table column
are registered, the invalidation will happen either view a or view b.
If it happens on view a first, this view in turn will try to
invalidate view b (since that depends on view a), but with the
DROP_VIEW action which fails. The patch changes this recursive
invalidation to use the original action, e.g. DROP_COLUMN which will
allow dropping the dependent view (dropping a column is allowed to
cause cascading drops of dependent views).
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1239898 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/sql/dictionary/ViewDescriptor.java",
"hunks": [
{
"added": [
"import org.apache.derby.impl.sql.depend.BasicDependencyManager;"
],
"header": "@@ -35,6 +35,7 @@ import org.apache.derby.iapi.services.sanity.SanityManager;",
"removed": []
},
{
"added": [
" case DependencyManager.DROP_COLUMN:",
" ",
" TableDescriptor td = ",
" getDataDictionary().getTableDescriptor(uuid);",
" ",
" if (td == null) { ",
" // DERBY-5567 already dropped via another dependency ",
" break;",
" }",
" ",
" // DERBY-5567 keep original action",
" drop(lcc, td.getSchemaDescriptor(), td, action);",
"",
" lcc.getLastActivation().addWarning(",
" StandardException.newWarning(",
" SQLState.LANG_VIEW_DROPPED,",
" this.getObjectName() ));",
" break;",
"",
" default:"
],
"header": "@@ -359,20 +360,28 @@ public final class ViewDescriptor extends TupleDescriptor",
"removed": [
"\t\t case DependencyManager.DROP_COLUMN:",
"\t\t\t\tdrop(lcc, ",
"\t\t\t\t\t\tgetDataDictionary().getTableDescriptor(uuid).getSchemaDescriptor(),",
"\t\t\t\t\t\tgetDataDictionary().getTableDescriptor(uuid));",
"",
" lcc.getLastActivation().addWarning(",
" StandardException.newWarning(",
" SQLState.LANG_VIEW_DROPPED,",
" this.getObjectName() ));",
" return;",
"",
"\t\t default:"
]
},
{
"added": [
" /**",
" * Drop this descriptor, if not already done.",
" * ",
" * @param lcc current language connection context",
" * @param sd schema descriptor",
" * @param td table descriptor for this view",
" * @throws StandardException standard error policy",
" */",
" public void drop(",
" LanguageConnectionContext lcc,",
" SchemaDescriptor sd,",
" TableDescriptor td) throws StandardException",
" {",
" drop(lcc, sd, td, DependencyManager.DROP_VIEW);",
" }",
"",
" /**",
" * Drop this descriptor, if not already done, due to action.",
" * If action is not {@code DependencyManager.DROP_VIEW}, the descriptor is ",
" * dropped due to dropping some other object, e.g. a table column.",
" * ",
" * @param lcc current language connection context",
" * @param sd schema descriptor",
" * @param td table descriptor for this view",
" * @param action action",
" * @throws StandardException standard error policy",
" */",
" private void drop(",
" LanguageConnectionContext lcc,",
" SchemaDescriptor sd,",
" TableDescriptor td,",
" int action) throws StandardException",
" {",
""
],
"header": "@@ -409,14 +418,43 @@ public final class ViewDescriptor extends TupleDescriptor",
"removed": [
"\tpublic void drop(LanguageConnectionContext lcc,",
"\t\t\t\t\t\t\t SchemaDescriptor sd, TableDescriptor td)",
"\t\tthrows StandardException",
"\t{",
" "
]
},
{
"added": [
" dm.invalidateFor(td, action, lcc);"
],
"header": "@@ -425,7 +463,7 @@ public final class ViewDescriptor extends TupleDescriptor",
"removed": [
"\t\tdm.invalidateFor(td, DependencyManager.DROP_VIEW, lcc);"
]
},
{
"added": [
" public String getName() {",
" return viewName;",
" }"
],
"header": "@@ -440,5 +478,8 @@ public final class ViewDescriptor extends TupleDescriptor",
"removed": []
}
]
}
] |
derby-DERBY-5568-3e52818d
|
DERBY-5568: AssertionFailedError: Should not hold locks after commit in ResultSetMiscTest
Wait for post-commit work to complete before checking lock table.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1229066 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-557-76ddb614
|
DERBY-557 Added a test case for DERBY-557. Submitted by Knut Anders Hatlen
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@377998 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-557-90f16141
|
DERBY-557
Client driver gets OutOfMemoryError when re-executing statement without closing ResultSet
Attached a patch which fixes the bug. The patch ensures that the ResultSets associated with a Statement/PreparedStatement are removed from CommitAndRollbackListeners_ in org.apache.derby.client.am.Connection when the statement is re-executed.
I have run derbyall with only one error (wrong length of encryption key, not related to the patch). I have also run the program in the problem description (both with Statement and PreparedStatement), and the memory usage doesn't increase over time.
Contributed by Knut Anders Hatlen
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@289539 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/am/ResultSet.java",
"hunks": [
{
"added": [
" markClosed(true);"
],
"header": "@@ -388,8 +388,7 @@ public abstract class ResultSet implements java.sql.ResultSet,",
"removed": [
" markClosed();",
" connection_.CommitAndRollbackListeners_.remove(this);"
]
}
]
},
{
"file": "java/client/org/apache/derby/client/am/Statement.java",
"hunks": [
{
"added": [
" /**",
" * Mark all ResultSets associated with this statement as",
" * closed. The ResultSets will not be removed from the commit and",
" * rollback listeners list in",
" * <code>org.apache.derby.client.am.Connection</code>.",
" */",
" markResultSetsClosed(false);",
" }",
"",
" /**",
" * Mark all ResultSets associated with this statement as",
" * closed.",
" *",
" * @param removeListener if true the ResultSets will be removed",
" * from the commit and rollback listeners list in",
" * <code>org.apache.derby.client.am.Connection</code>.",
" */",
" void markResultSetsClosed(boolean removeListener) {",
" resultSetList_[i].markClosed(removeListener);",
" generatedKeysResultSet_.markClosed(removeListener);",
" resultSet_.markClosed(removeListener);"
],
"header": "@@ -1393,20 +1393,38 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface",
"removed": [
" resultSetList_[i].markClosed();",
" generatedKeysResultSet_.markClosed();",
" resultSet_.markClosed();"
]
},
{
"added": [
" markResultSetsClosed(true); // true means remove from list of commit and rollback listeners"
],
"header": "@@ -1544,7 +1562,7 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface",
"removed": [
" markResultSetsClosed();"
]
}
]
}
] |
derby-DERBY-557-cff2860a
|
DERBY-210 - Network Server will leak prepared statements if not explicitly closed by the user until the connection is closed
I am uploading a combined patch 'derby-210.diff' which solves the memory leak. As Bryan suggested, I am uploading this patch and will open jira issues for other optimizations. Patch does the following:
* Eliminates the below references to PreparedStatement objects by using WeakHashMap instead of LinkedList. When there are no other references to the keys in a WeakHashMap, they will get removed from the map and can thus get garbage-collected. They do not have to wait till the Connection object is collected.
- 'openStatements_' in org.apache.derby.client.am.Connection
- 'CommitAndRollbackListeners_' in org.apache.derby.client.am.Connection
* Removes the list 'RollbackOnlyListeners_' since this is not being used.
* Updates the following comment for openStatements_:
// Since DERBY prepared statements must be re-prepared after a commit,
// then we must traverse this list after a commit and notify statements
// that they are now in an un-prepared state.
final java.util.LinkedList openStatements_ = new java.util.LinkedList();
In the code, I did not see this list being traversed after a commit to re-prepare statements. Also, I think this is not needed since Derby does not require re-prepare of statements after a commit. Currently, this list is used to close all open statements when the originating connection is closed.
* Removes all ResultSets from HashTable 'positionedUpdateCursorNameToResultSet_' in SectionManager. Only result sets of positioned update statements were being removed from this hashtable whereas all result sets were added. Because of this, client driver was holding on to result sets and statements even after rs.close() was called.
* Adds a test 'derbyStress.java' to jdbcapi suite. This test is based on the repro for this patch. Without this patch, it fails when run with client driver. Kathey had suggested in another mail that tests for client memory leak problems (DERBY-557, DERBY-210) can be added to same test. I did not see an existing test. So I created this new test. If DERBY-557 does not have a test, I think it can be added to this new test.
* Excludes the new test from running with jcc because jcc gives out of memory error.
* Creates 'derbyStress_app.properties' with following property 'jvmflags=-Xmx64M' to guarantee the test fails on all machines.
Successfully ran derbyall with Sun JDK 1.4.2 on Windows XP. Please take a look at this patch.
Contributed by Deepa Remesh
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@369612 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/am/Connection.java",
"hunks": [
{
"added": [
" ",
" // WeakHashMap is used to store references so that the objects added to ",
" // the map can get garbage-collected without waiting for the Connection object.",
" ",
" // When Connection.close() is called, this list is traversed and markClosed() ",
" // is called on all statements in this list. ",
" final java.util.WeakHashMap openStatements_ = new java.util.WeakHashMap();",
" // Some statuses of DERBY objects may be invalid on server after both ",
" // commit and rollback. For example,",
" final java.util.WeakHashMap CommitAndRollbackListeners_ = new java.util.WeakHashMap();"
],
"header": "@@ -30,20 +30,21 @@ public abstract class Connection implements java.sql.Connection,",
"removed": [
" // Since DERBY prepared statements must be re-prepared after a commit,",
" // then we must traverse this list after a commit and notify statements",
" // that they are now in an un-prepared state.",
" final java.util.LinkedList openStatements_ = new java.util.LinkedList();",
" // Some statuses of DERBY objects may be invalid on server either after only rollback",
" // or after both commit and rollback. For example,",
" // If they only depend on rollback, they need to get on RollbackOnlyListeners_.",
" final java.util.LinkedList RollbackOnlyListeners_ = new java.util.LinkedList();",
" final java.util.LinkedList CommitAndRollbackListeners_ = new java.util.LinkedList();"
]
},
{
"added": [
" openStatements_.put(ps,null);"
],
"header": "@@ -394,7 +395,7 @@ public abstract class Connection implements java.sql.Connection,",
"removed": [
" openStatements_.add(ps);"
]
},
{
"added": [],
"header": "@@ -737,7 +738,6 @@ public abstract class Connection implements java.sql.Connection,",
"removed": [
" RollbackOnlyListeners_.clear();"
]
},
{
"added": [
" \tjava.util.Set keySet = openStatements_.keySet();",
" for (java.util.Iterator i = keySet.iterator(); i.hasNext();) {"
],
"header": "@@ -748,7 +748,8 @@ public abstract class Connection implements java.sql.Connection,",
"removed": [
" for (java.util.ListIterator i = openStatements_.listIterator(); i.hasNext();) {"
]
},
{
"added": [
" \tjava.util.Set keySet = openStatements_.keySet();",
" for (java.util.Iterator i = keySet.iterator(); i.hasNext();) {",
" \tjava.util.Set keySet = openStatements_.keySet();",
" for (java.util.Iterator i = keySet.iterator(); i.hasNext();) {"
],
"header": "@@ -756,13 +757,15 @@ public abstract class Connection implements java.sql.Connection,",
"removed": [
" for (java.util.ListIterator i = openStatements_.listIterator(); i.hasNext();) {",
" for (java.util.ListIterator i = openStatements_.listIterator(); i.hasNext();) {"
]
},
{
"added": [
" openStatements_.put(s,null);"
],
"header": "@@ -1220,7 +1223,7 @@ public abstract class Connection implements java.sql.Connection,",
"removed": [
" openStatements_.add(s);"
]
},
{
"added": [
" openStatements_.put(ps,null);"
],
"header": "@@ -1266,7 +1269,7 @@ public abstract class Connection implements java.sql.Connection,",
"removed": [
" openStatements_.add(ps);"
]
},
{
"added": [
" openStatements_.put(cs,null);"
],
"header": "@@ -1304,7 +1307,7 @@ public abstract class Connection implements java.sql.Connection,",
"removed": [
" openStatements_.add(cs);"
]
},
{
"added": [
" \tjava.util.Set keySet = CommitAndRollbackListeners_.keySet();",
" for (java.util.Iterator i = keySet.iterator(); i.hasNext();) {"
],
"header": "@@ -1444,7 +1447,8 @@ public abstract class Connection implements java.sql.Connection,",
"removed": [
" for (java.util.Iterator i = CommitAndRollbackListeners_.iterator(); i.hasNext();) {"
]
},
{
"added": [
" \tjava.util.Set keySet = CommitAndRollbackListeners_.keySet();",
" for (java.util.Iterator i = keySet.iterator(); i.hasNext();) {"
],
"header": "@@ -1459,11 +1463,8 @@ public abstract class Connection implements java.sql.Connection,",
"removed": [
" for (java.util.Iterator i = CommitAndRollbackListeners_.iterator(); i.hasNext();) {",
" UnitOfWorkListener listener = (UnitOfWorkListener) i.next();",
" listener.completeLocalRollback(i);",
" }",
" for (java.util.Iterator i = RollbackOnlyListeners_.iterator(); i.hasNext();) {"
]
}
]
}
] |
derby-DERBY-5574-9e26acc4
|
DERBY-5574 encryption test in encryption nightly suite test fails with ERROR XBM0S: Unable to rename file error
Catch errors on rename, and retry hoping that error is caused by some
temporary file system resource issue. Similar methodology that other
parts of the system uses on read and write errors. Worst case system
retries 5 times and still throws the error, best case a subsequent retry
succeeds and user application is never aware an error was encountered.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1236370 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/io/DirFile.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.util.InterruptStatus;"
],
"header": "@@ -39,6 +39,7 @@ import java.net.URL;",
"removed": []
},
{
"added": [
" * Rename the file denoted by this name. ",
" * Note that StorageFile objects are immutable. This method renames the ",
" * underlying file, it does not change this StorageFile object. The ",
" * StorageFile object denotes the same name as before, however the exists()",
" * method will return false after the renameTo method executes successfully.",
" *",
" * <p>",
" * It is not specified whether this method will succeed if a file ",
" * already exists under the new name."
],
"header": "@@ -230,12 +231,16 @@ class DirFile extends File implements StorageFile",
"removed": [
" * Rename the file denoted by this name. Note that StorageFile objects are immutable. This method",
" * renames the underlying file, it does not change this StorageFile object. The StorageFile object denotes the",
" * same name as before, however the exists() method will return false after the renameTo method",
" * executes successfully.",
" *<p>It is not specified whether this method will succeed if a file already exists under the new name."
]
},
{
"added": [
" boolean rename_status = super.renameTo( (File) newName);",
" int retry_count = 1;",
"",
" while (!rename_status && (retry_count <= 5))",
" {",
" // retry operation, hoping a temporary I/O resource issue is ",
" // causing the failure.",
"",
" try",
" {",
" Thread.sleep(1000 * retry_count);",
" }",
" catch (InterruptedException ie)",
" {",
" // This thread received an interrupt as well, make a note.",
" InterruptStatus.setInterrupted();",
" }",
"",
" rename_status = super.renameTo((File) newName);",
"",
" retry_count++;",
" }",
"",
" return(rename_status);"
],
"header": "@@ -243,7 +248,30 @@ class DirFile extends File implements StorageFile",
"removed": [
" return super.renameTo( (File) newName);"
]
}
]
}
] |
derby-DERBY-5578-4b4365a1
|
DERBY-5578 Provide a way to invalidate stored prepared statements
Close the resultset in the helper methods as suggested by Knut.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1348520 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5578-f448bbbd
|
DERBY-5578 Provide a way to invalidate stored prepared statements
Commiting changes for adding a new procedure(SYSCS_UTIL.SYSCS_INVALIDATE_STORED_STATEMENTS) which will allow users to invalidate all the stored statements inside SYS.SYSSTATEMENTS. At this point, there are only two types of stored statements in SYS.SYSSTATEMENTS system table - statements for metadata calls and statements for trigger action plans. I have also added test cases including the regression tests and upgrade tests. Upgrade tests show that this procedure is available only after hard upgrade. Regression test show how the procedure can be executed only by dba unless dba grants execute permission to other users. Additionally, it has test cases showing statements getting invalidated by the procedure call and subsequent execution of metadata call or trigger causing their corresponding statements to become valid.
Since these changes fiddle around with system tables, this jira can't be backported to released branches.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1348275 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/catalog/DataDictionaryImpl.java",
"hunks": [
{
"added": [
"\t\tinvalidateAllSPSPlans(lcc);",
"\t}",
"",
"\t/**",
"\t * @see DataDictionary#invalidateAllSPSPlans",
"\t * @exception StandardException\t\tThrown on error",
"\t */",
"\tpublic void invalidateAllSPSPlans(LanguageConnectionContext lcc) throws StandardException",
"\t{"
],
"header": "@@ -4573,6 +4573,15 @@ public final class\tDataDictionaryImpl",
"removed": []
}
]
}
] |
derby-DERBY-5580-35e7aba5
|
DERBY-5580: NativeAuthenticationServiceTest fails to delete databases
Make sure the databases get shut down before attempting to delete them.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1235709 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5582-1690ef63
|
DERBY-5582 Access denied (java.lang.RuntimePermission modifyThreadGroup) in IndexStatisticsDaemonImpl.schedule()
Put index-stat thread in derby daemon group.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1236887 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/services/daemon/IndexStatisticsDaemonImpl.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.services.monitor.Monitor;"
],
"header": "@@ -39,6 +39,7 @@ import org.apache.derby.iapi.services.property.PropertyUtil;",
"removed": []
}
]
},
{
"file": "java/testing/org/apache/derbyTesting/junit/SecurityManagerSetup.java",
"hunks": [
{
"added": [
"\tprivate SecurityManager decoratorSecurityManager = null;",
"\t",
" public SecurityManagerSetup(Test test, String policyResource)",
" {",
" super(test);",
" this.decoratorPolicyResource = policyResource != null ?",
" policyResource : getDefaultPolicy();",
" }",
"",
"\t/**",
"\t * Use custom policy and SecurityManager",
"\t * ",
"\t * @param test - Test to wrap",
"\t * @param policyResource - policy resource. If null use default testing policy",
"\t * @param securityManager - Custom SecurityManager if null use the system security manager",
"\t */",
"\tpublic SecurityManagerSetup(Test test, String policyResource, SecurityManager securityManager)",
"\t\tthis.decoratorPolicyResource = policyResource != null ?",
"\t\t policyResource : getDefaultPolicy();",
"\t\tthis.decoratorSecurityManager = securityManager;"
],
"header": "@@ -70,10 +70,28 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
"\tpublic SecurityManagerSetup(Test test, String policyResource)",
"\t\tthis.decoratorPolicyResource = policyResource;"
]
},
{
"added": [
"\t\tinstallSecurityManager(decoratorPolicyResource, decoratorSecurityManager);"
],
"header": "@@ -113,7 +131,7 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
"\t\tinstallSecurityManager(decoratorPolicyResource);"
]
},
{
"added": [
"\tprivate static void installSecurityManager(String policyFile) {",
"\t installSecurityManager(policyFile, System.getSecurityManager());",
"\t}",
"",
"\tprivate static void installSecurityManager(String policyFile, final SecurityManager sm)",
"\t\t\t {",
"\t ",
"\t\tSecurityManager currentsm = System.getSecurityManager();",
"\t\tif (currentsm != null) {",
"\t\t\tSecurityManager oldSecMan = System.getSecurityManager();",
"\t\t\tif ( newPolicyProperty.equals( oldPolicyProperty ) &&",
"\t\t\t oldSecMan == sm) { return; }"
],
"header": "@@ -144,28 +162,34 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
"\t",
"\tprivate static void installSecurityManager(String policyFile)",
"\t\t\t {",
"\t\tSecurityManager sm = System.getSecurityManager();",
"\t\tif (sm != null) {",
"\t\t\tif ( newPolicyProperty.equals( oldPolicyProperty ) ) { return; }"
]
},
{
"added": [
" if (sm == null)",
" System.setSecurityManager(new SecurityManager());",
" else",
" System.setSecurityManager(sm);"
],
"header": "@@ -186,8 +210,10 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
" SecurityManager sm = new SecurityManager();",
" System.setSecurityManager(sm);"
]
}
]
}
] |
derby-DERBY-5584-e3320c9c
|
DERBY-5584: distinct grouped aggregates can return wrong results
This change addresses a problem that can arise when a GroupedAggregateResultSet
that contains a distinct aggregate is processed multiple times in the
same query execution.
The problem involves this section of GroupedAggregateResultSet:
/*
** If there was a distinct aggregate, then that column
** was automatically included as the last column in
** the sort ordering. But we don't want it to be part
** of the ordering anymore, because we aren't grouping
** by that column, we just sorted it so that distinct
** aggregation would see the values in order.
*/
The solution that was implemented in GroupedAggregateResultSet prior to this
change was assuming that the result set was only opened and read once; it
physically removed the last column from the ordering columns as a side effect
of processing the result set.
However, during a query plan such as this cartesian product, the GROUP BY
subquery is created, then opened/read/closed, opened/read/closed, etc.,
once per row of the other side of the cartesian product.
In that case, we can't physically remove the last column each time, because
then the second and subsequent times that we read the result set, we are
sorting on the wrong columns and we produce the wrong results.
The solution is to have a better way of handling that extra invisible column,
so that we can consider it sometimes, and ignore it other times,
without doing something as destructive as physically removing it from the
ordering array, which is what we do now.
Note that Derby has had a limitation that there be at most one DISTINCT
aggregate in a query for a long time, probably ever since it was written.
See, for example, this link from the 10.2 docs:
http://db.apache.org/derby/docs/10.2/ref/rrefsqlj32693.html
Only one DISTINCT aggregate expression per SelectExpression is allowed.
For example, the following query is not valid:
SELECT AVG (DISTINCT flying_time), SUM (DISTINCT miles) FROM Flights
The GroupedAggregateResultSet implementation is aware of this limit, and knows
that there is at most one distinct aggregate in the result set. This change
neither increases that dependency nor lessens it; I'm just noting it here
for the record.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1292134 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/execute/GroupedAggregateResultSet.java",
"hunks": [
{
"added": [
"\tprivate\tint numDistinctAggs = 0;"
],
"header": "@@ -88,6 +88,7 @@ class GroupedAggregateResultSet extends GenericAggregateResultSet",
"removed": []
},
{
"added": [
"\t\t\t\tresultRows = new ExecIndexRow[numGCols()+1];"
],
"header": "@@ -237,7 +238,7 @@ class GroupedAggregateResultSet extends GenericAggregateResultSet",
"removed": [
"\t\t\t\tresultRows = new ExecIndexRow[order.length+1];"
]
},
{
"added": [
"\t/**",
"\t * Return the number of grouping columns.",
"\t *",
"\t * Since some additional sort columns may have been included",
"\t * in the sort for DISTINCT aggregates, this function is",
"\t * used to ignore those columns when computing the grouped",
"\t * results.",
"\t */",
"\tprivate int numGCols() { return order.length - numDistinctAggs; }"
],
"header": "@@ -324,33 +325,26 @@ class GroupedAggregateResultSet extends GenericAggregateResultSet",
"removed": [
"\t\t\tint numDistinctAggs = 0;",
"\t\t\tfor (int i = 0; i < aggregates.length; i++)",
"\t\t\t{",
"\t\t\t\tAggregatorInfo aInfo = (AggregatorInfo)",
"\t\t\t\t\taggInfoList.elementAt(i);",
"\t\t\t\tif (aInfo.isDistinct())",
"\t\t\t\t\tnumDistinctAggs++;",
"\t\t\t}",
"\t\t\tif (order.length > numDistinctAggs)",
"\t\t\t{",
"\t\t\t\tColumnOrdering[] newOrder = new ColumnOrdering[",
"\t\t\t\t\torder.length - numDistinctAggs];",
"\t\t\t\tSystem.arraycopy(order, 0, newOrder, 0,",
"\t\t\t\t\torder.length-numDistinctAggs);",
"\t\t\t\torder = newOrder;",
"\t\t\t}"
]
},
{
"added": [
"\t\t\t\t distinguisherCol == numGCols());"
],
"header": "@@ -409,7 +403,7 @@ class GroupedAggregateResultSet extends GenericAggregateResultSet",
"removed": [
"\t\t\t\t distinguisherCol == order.length);"
]
},
{
"added": [
"\t\tfor (int index = 0; index < numGCols(); index++)"
],
"header": "@@ -486,7 +480,7 @@ class GroupedAggregateResultSet extends GenericAggregateResultSet",
"removed": [
"\t\tfor (int index = 0; index < order.length; index++)"
]
},
{
"added": [
"\t\treturn numGCols();"
],
"header": "@@ -495,7 +489,7 @@ class GroupedAggregateResultSet extends GenericAggregateResultSet",
"removed": [
"\t\treturn order.length;"
]
}
]
}
] |
derby-DERBY-5585-d07d421b
|
DERBY-5585: Improve error message when user function can't find class.
This patch was contributed by Danoja Dias (danojadias at gmail dot com)
Additional text is added to the 42x50 and 42x51 error messages suggesting
an additional possible cause of the error might be derby.database.classpath.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1754348 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5603-78b8a9c8
|
DERBY-5603: EmbedConnection.clearLOBMapping() incorrectly clears lobFiles causing a ConcurrentModificationException
Rewrote map iteration code.
Fixed incorrect comment.
Made getlobHMObj private.
Patch file: derby-5603-1a-avoid_concurrentmodification.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1294512 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/jdbc/EmbedConnection.java",
"hunks": [
{
"added": [],
"header": "@@ -3262,8 +3262,6 @@ public abstract class EmbedConnection implements EngineConnection",
"removed": [
"\t\t//initialize the locator value to 0 and",
"\t\t//the hash table object to null."
]
},
{
"added": [
" // Try a bit harder to close all open files, as open file handles",
" // can cause problems further down the road.",
" SQLException firstException = null;",
" while (it.hasNext()) {",
" try {",
" ((LOBFile) it.next()).close();",
" } catch (IOException ioe) {",
" // Discard all exceptions besides the first one.",
" if (firstException == null) {",
" firstException = Util.javaException(ioe);",
" }",
" }",
" }",
" lobFiles.clear();",
" if (firstException != null) {",
" throw firstException;",
" }"
],
"header": "@@ -3277,18 +3275,25 @@ public abstract class EmbedConnection implements EngineConnection",
"removed": [
"\t\t\t\twhile (it.hasNext()) {",
"\t\t\t\t\ttry {",
"\t\t\t\t\t\t((LOBFile) it.next()).close();",
"\t\t\t\t\t} catch (IOException ioe) {",
"\t\t\t\t\t\tthrow Util.javaException(ioe);",
"\t\t\t\t\t}",
"\t\t\t\t\tfinally {",
"\t\t\t\t\t\tlobFiles.clear();",
"\t\t\t\t\t}",
"\t\t\t\t}"
]
},
{
"added": [
"\tprivate HashMap getlobHMObj() {"
],
"header": "@@ -3337,7 +3342,7 @@ public abstract class EmbedConnection implements EngineConnection",
"removed": [
"\tpublic HashMap getlobHMObj() {"
]
}
]
}
] |
derby-DERBY-5605-45a4a1da
|
DERBY-5605 Calling Blob/Clob free() explicitly after implicit free throws exception in client driver.
Changed stored procedures to noop if the locator is already freed.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1709431 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/jdbc/LOBStoredProcedure.java",
"hunks": [
{
"added": [
" // DERBY-5605. Do not throw exception if already freed.",
" return;"
],
"header": "@@ -56,7 +56,8 @@ public class LOBStoredProcedure {",
"removed": [
" throw newSQLException(SQLState.LOB_LOCATOR_INVALID);"
]
}
]
}
] |
derby-DERBY-5607-d94fffbd
|
DERBY-5607: Use InternalDriver rather than a JDBC DataSource in order to get a connection to the credentials db when creating a database with NATIVE authentication enabled.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1242105 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/jdbc/authentication/NativeAuthenticationServiceImpl.java",
"hunks": [
{
"added": [
"import org.apache.derby.jdbc.InternalDriver;"
],
"header": "@@ -49,6 +49,7 @@ import org.apache.derby.iapi.store.access.TransactionController;",
"removed": []
},
{
"added": [
" Properties properties = new Properties();",
" properties.setProperty( Attribute.USERNAME_ATTR, userName );",
" properties.setProperty( Attribute.PASSWORD_ATTR, userPassword );",
" String connectionURL = Attribute.PROTOCOL + _credentialsDB;",
" Connection conn = InternalDriver.activeDriver().connect( connectionURL, properties );",
" "
],
"header": "@@ -403,27 +404,20 @@ public final class NativeAuthenticationServiceImpl",
"removed": [
" String dataSourceName = JVMInfo.J2ME ?",
" \"org.apache.derby.jdbc.EmbeddedSimpleDataSource\" :",
" \"org.apache.derby.jdbc.EmbeddedDataSource\";",
"",
" DataSource dataSource = (DataSource) Class.forName( dataSourceName ).newInstance();",
"",
" callDataSourceSetter( dataSource, \"setDatabaseName\", _credentialsDB );",
" callDataSourceSetter( dataSource, \"setUser\", userName );",
" callDataSourceSetter( dataSource, \"setPassword\", userPassword );",
" Connection conn = dataSource.getConnection();",
" catch (ClassNotFoundException cnfe) { throw wrap( cnfe ); }",
" catch (InstantiationException ie) { throw wrap( ie ); }",
" catch (IllegalAccessException ie) { throw wrap( ie ); }"
]
}
]
}
] |
derby-DERBY-5608-d96e3aa0
|
DERBY-5608: BaseTestCase.readProcessOutput should read getInputStream() and getErrorStream() in separate threads
Use SpawnedProcess to read stdout/stderr from the subprocess, since it already
has code to do this in separate threads.
Patch file: derby-5608-1a-use_spawnedprocess.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1242681 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/SpawnedProcess.java",
"hunks": [
{
"added": [
" private boolean suppressOutput;",
""
],
"header": "@@ -41,6 +41,8 @@ public final class SpawnedProcess {",
"removed": []
},
{
"added": [
" /**",
" * Causes output obtained from the subprocess to be suppressed when",
" * executing the {@code complete}-methods.",
" *",
" * @see #getFullServerOutput() to obtain suppressed output from stdout",
" * @see #getFullServerError() to obtain suppressed output from stderr",
" */",
" public void suppressOutputOnComplete() {",
" suppressOutput = true;",
" }",
""
],
"header": "@@ -51,6 +53,17 @@ public final class SpawnedProcess {",
"removed": []
},
{
"added": [
" public String getFullServerOutput() throws InterruptedException {"
],
"header": "@@ -70,7 +83,7 @@ public final class SpawnedProcess {",
"removed": [
" public String getFullServerOutput() throws Exception {"
]
},
{
"added": [
" /**",
" * Get the full server error output (stderr) as a string using the default",
" * encoding which is assumed is how it was originally written.",
" * <p>",
" * This method should only be called after the process has completed.",
" * That is, {@link #complete(boolean)} or {@link #complete(boolean, long)}",
" * should be called first.",
" */",
" public String getFullServerError() throws InterruptedException {",
" // First wait until we've read all the output on stderr.",
" errSaver.thread.join();",
"",
" synchronized (this) {",
" return errSaver.stream.toString();",
" }",
" }",
""
],
"header": "@@ -79,6 +92,23 @@ public final class SpawnedProcess {",
"removed": []
},
{
"added": [
" if (!suppressOutput && err.size() != 0) {"
],
"header": "@@ -191,7 +221,7 @@ public final class SpawnedProcess {",
"removed": [
" if (err.size() != 0) {"
]
},
{
"added": [
" if (!suppressOutput && (destroy || exitCode != 0) &&",
" out.size() != 0) {"
],
"header": "@@ -200,7 +230,8 @@ public final class SpawnedProcess {",
"removed": [
" if ((destroy || exitCode != 0) && out.size() != 0) {"
]
}
]
}
] |
derby-DERBY-5609-5a172c50
|
DERBY-5609: Prepare old test harness for running tests on Java 8
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1242098 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/functionTests/harness/jdk18.java",
"hunks": [
{
"added": [
"/*",
" ",
" Derby - Class org.apache.derbyTesting.functionTests.harness.jdk18",
" ",
" Licensed to the Apache Software Foundation (ASF) under one or more",
" contributor license agreements. See the NOTICE file distributed with",
" this work for additional information regarding copyright ownership.",
" The ASF licenses this file to You under the Apache License, Version 2.0",
" (the \"License\"); you may not use this file except in compliance with",
" the License. You may obtain a copy of the License at",
" ",
" http://www.apache.org/licenses/LICENSE-2.0",
" ",
" Unless required by applicable law or agreed to in writing, software",
" distributed under the License is distributed on an \"AS IS\" BASIS,",
" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
" See the License for the specific language governing permissions and",
" limitations under the License.",
" ",
" */",
"",
"package org.apache.derbyTesting.functionTests.harness;",
"",
"import java.util.Vector;",
"import java.util.StringTokenizer;",
"",
"",
"public class jdk18 extends jvm {",
" ",
" public String getName(){return \"jdk18\";}",
" public jdk18(boolean noasyncgc, boolean verbosegc, boolean noclassgc,",
" long ss, long oss, long ms, long mx, String classpath, String prof,",
" boolean verify, boolean noverify, boolean nojit, Vector D) {",
" super(noasyncgc,verbosegc,noclassgc,ss,oss,ms,mx,classpath,prof,",
" verify,noverify,nojit,D);",
" }",
" ",
" public jdk18(String classpath, Vector D) {",
" super(classpath,D);",
" }",
" ",
" public jdk18(long ms, long mx, String classpath, Vector D) {",
" super(ms,mx,classpath,D);",
" }",
" ",
" public jdk18() { }",
" ",
" ",
" public Vector getCommandLine() {",
" StringBuffer sb = new StringBuffer();",
" Vector v = super.getCommandLine();",
" appendOtherFlags(sb);",
" String s = sb.toString();",
" StringTokenizer st = new StringTokenizer(s);",
" while (st.hasMoreTokens()) {",
" v.addElement(st.nextToken());",
" }",
" return v;",
" }",
" ",
" public void appendOtherFlags(StringBuffer sb) {",
" if (noasyncgc) warn(\"jdk18 does not support noasyncgc\");",
" if (verbosegc) sb.append(\" -verbose:gc\");",
" if (noclassgc) sb.append(\" -Xnoclassgc\");",
" if (ss>=0) warn(\"jdk18 does not support ss\");",
" if (oss>=0) warn(\"jdk18 does not support oss\");",
" if (ms>=0) {",
" sb.append(\" -ms\");",
" sb.append(ms);",
" }",
" if (mx>=0) {",
" sb.append(\" -mx\");",
" sb.append(mx);",
" }",
" if (classpath!=null) {",
" sb.append(\" -classpath \");",
" sb.append(classpath);",
" }",
" if (prof!=null) warn(\"jdk18 does not support prof\");",
" if (verify) warn(\"jdk18 does not support verify\");",
" if (noverify) warn(\"jdk18 does not support noverify\");",
" if (nojit) sb.append(\" -Djava.compiler=NONE\");",
" if (D != null)",
" for (int i=0; i<D.size();i++) {",
" sb.append(\" -D\");",
" sb.append((String)(D.elementAt(i)));",
" }",
" }",
" public String getDintro() { return \"-D\"; }",
"}"
],
"header": "@@ -0,0 +1,90 @@",
"removed": []
}
]
}
] |
derby-DERBY-5610-d7582c6e
|
DERBY-5610 ServerPropertiesTest prints .java.net.SocketException: Connection reset to console but test passes
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1484507 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/drda/org/apache/derby/impl/drda/NetworkServerControlImpl.java",
"hunks": [
{
"added": [
" try {",
" writeCommandHeader(COMMAND_TESTCONNECTION);",
" writeLDString(database);",
" writeLDString(user);",
" writeLDString(password);",
" send();",
" readResult();",
" } catch (IOException ioe) {",
" consolePropertyMessage(\"DRDA_NoIO.S\",",
" new String [] {hostArg, ",
" (new Integer(portNumber)).toString(), ",
" ioe.getMessage()}); ",
" }"
],
"header": "@@ -1261,13 +1261,19 @@ public final class NetworkServerControlImpl {",
"removed": [
" ",
" writeCommandHeader(COMMAND_TESTCONNECTION);",
" writeLDString(database);",
" writeLDString(user);",
" writeLDString(password);",
" send();",
" readResult();"
]
},
{
"added": [
" if (msg != null && msg.startsWith(DRDA_MSG_PREFIX))"
],
"header": "@@ -3553,7 +3559,7 @@ public final class NetworkServerControlImpl {",
"removed": [
" if (msg.startsWith(DRDA_MSG_PREFIX))"
]
}
]
}
] |
derby-DERBY-5613-5ed54d8c
|
DERBY-5613 Queries with group by column not included in the column list for JOIN(INNER or OUTER) with NATURAL or USING does not fail
The issue of join column getting associated with left or right table is getting tracked under DERBY-5613. This commit changes the comments in the test to refer to DERBY-5613
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1243784 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5614-063dd554
|
DERBY-5614: NullPointerException with INSERT INTO [global temporary table] SELECT ... FROM [VTI]
Disables bulk-insert for GTTs when selecting from a VTI.
Added a new test case.
Added SampleVTI, which is an incomplete (only supports a few getters) VTI
intended for basic testing.
Patch file: derby-5614-1b-disable_bulkinsert_gtt.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1295085 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/functionTests/util/SampleVTI.java",
"hunks": [
{
"added": [
"/*",
"",
" Derby - Class org.apache.derbyTesting.functionTests.util.SampleVTI",
"",
" Licensed to the Apache Software Foundation (ASF) under one or more",
" contributor license agreements. See the NOTICE file distributed with",
" this work for additional information regarding copyright ownership.",
" The ASF licenses this file to You under the Apache License, Version 2.0",
" (the \"License\"); you may not use this file except in compliance with",
" the License. You may obtain a copy of the License at",
"",
" http://www.apache.org/licenses/LICENSE-2.0",
"",
" Unless required by applicable law or agreed to in writing, software",
" distributed under the License is distributed on an \"AS IS\" BASIS,",
" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
" See the License for the specific language governing permissions and",
" limitations under the License.",
"",
" */",
"package org.apache.derbyTesting.functionTests.util;",
"",
"import java.io.InputStream;",
"import java.io.Reader;",
"import java.sql.ResultSet;",
"import java.sql.ResultSetMetaData;",
"import java.sql.SQLException;",
"",
"import org.apache.derby.vti.VTITemplate;",
"",
"/**",
" * A very simple, read-only, sample VTI.",
" * <p>",
" * This VTI is incomplete and has its quirks - it is intended for basic testing",
" * only! Supported getters:",
" * <ul> <li>getString</li>",
" * <li>getInt</li>",
" * </ul>",
" */",
"public class SampleVTI",
" extends VTITemplate {",
"",
" private static final String[][] oneColData = new String[][] {",
" {\"one\"}, {\"two\"}, {\"three\"}, {\"four\"}, {\"five\"}",
" };",
"",
" /** Returns a sample VTI that is empty (has zero rows). */",
" public static ResultSet emptySampleVTI() {",
" return new SampleVTI(new String[0][0]);",
" }",
"",
" /**",
" * Returns a sample VTI with the some test data.",
" *",
" *@return A result set with a single column with string data (five rows).",
" */",
" public static ResultSet oneColSampleVTI() {",
" return new SampleVTI(oneColData);",
" }",
"",
" public static String[][] oneColSampleVTIData() {",
" return (String[][])oneColData.clone();",
" }",
"",
" private final String[][] data;",
" private final int rows;",
" private final int cols;",
" private int index = -1;",
" private boolean wasNull;",
" private boolean closed;",
"",
" private SampleVTI(String[][] data) {",
" this.data = data;",
" this.rows = data.length;",
" this.cols = rows == 0 ? 0 : data[0].length;",
" }",
"",
" private String getColumn(int columnIndex)",
" throws SQLException {",
" if (closed) {",
" throw new SQLException(\"result set closed\");",
" }",
" if (columnIndex < 1 || columnIndex > cols) {",
" throw new SQLException(\"column value out of range\");",
" }",
" String val = data[index][columnIndex -1];",
" wasNull = val == null;",
" return val;",
" }",
"",
" //@Override",
" public ResultSetMetaData getMetaData() throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" //@Override",
" public boolean next() throws SQLException {",
" if (closed) {",
" throw new SQLException(\"result set closed\");",
" }",
" return ++index < rows;",
" }",
"",
" //@Override",
" public void close() throws SQLException {",
" this.closed = true;",
" }",
"",
" //@Override",
" public String getString(int columnIndex)",
" throws SQLException {",
" return getColumn(columnIndex);",
" }",
"",
" //@Override",
" public int getInt(int columnIndex)",
" throws SQLException {",
" String raw = getColumn(columnIndex);",
" if (wasNull) {",
" raw = \"0\";",
" }",
" try {",
" return Integer.parseInt(raw);",
" } catch (NumberFormatException nfe) {",
" throw new SQLException(\"cannot get value as int\");",
" }",
" }",
"",
" //@Override",
" public boolean wasNull() {",
" return wasNull;",
" }",
"",
" public int getHoldability() throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public boolean isClosed() throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateNString(int columnIndex, String nString) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateNString(String columnLabel, String nString) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public String getNString(int columnIndex) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public String getNString(String columnLabel) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public Reader getNCharacterStream(int columnIndex) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public Reader getNCharacterStream(String columnLabel) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateNCharacterStream(String columnLabel, Reader reader, long length) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateAsciiStream(int columnIndex, InputStream x, long length) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateBinaryStream(int columnIndex, InputStream x, long length) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateCharacterStream(int columnIndex, Reader x, long length) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateBinaryStream(String columnLabel, InputStream x, long length) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateCharacterStream(String columnLabel, Reader reader, long length) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateBlob(int columnIndex, InputStream inputStream, long length) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateClob(int columnIndex, Reader reader, long length) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateClob(String columnLabel, Reader reader, long length) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateCharacterStream(int columnIndex, Reader x) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateClob(int columnIndex, Reader reader) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateClob(String columnLabel, Reader reader) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateNClob(int columnIndex, Reader reader) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public void updateNClob(String columnLabel, Reader reader) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public Object getObject(int columnIndex, Class type) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"",
" public Object getObject(String columnLabel, Class type) throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
"}"
],
"header": "@@ -0,0 +1,285 @@",
"removed": []
}
]
}
] |
derby-DERBY-5615-0f43aec7
|
DERBY-5615: Permission problems with classpath subsubprotocol
Change the structure of CPFile.getInputStream() back to what it was
before the original fix for this issue (revision 1582655), but with
doPrivileged() calls around all operations that require privileges.
The restructuring in the original fix apparently prevented some
resources from being freed, so that DatabaseClassLoadingTest and
NativeAuthenticationServiceTest failed on Windows platforms because
they could not delete the jar file that contained the classpath
database.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1582754 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/io/CPFile.java",
"hunks": [
{
"added": [],
"header": "@@ -26,12 +26,9 @@ import org.apache.derby.io.StorageFile;",
"removed": [
"import java.io.IOException;",
"import java.security.PrivilegedActionException;",
"import java.security.PrivilegedExceptionAction;"
]
},
{
"added": [
" InputStream is = null;",
" ClassLoader cl = getContextClassLoader(Thread.currentThread());",
" if (cl != null) {",
" is = getResourceAsStream(cl, path);",
" }",
" // don't assume the context class loader is tied",
" // into the class loader that loaded this class.",
" if (is == null) {",
" cl = getClass().getClassLoader();",
" // Javadoc indicates implementations can use",
" // null as a return from Class.getClassLoader()",
" // to indicate the system/bootstrap classloader.",
" if (cl != null) {",
" is = getResourceAsStream(cl, path);",
" } else {",
" is = getSystemResourceAsStream(path);",
" }",
" if (is == null) {",
" throw new FileNotFoundException(toString());",
" return is;",
""
],
"header": "@@ -95,22 +92,32 @@ class CPFile extends InputStreamFile",
"removed": [
" URL url = getURL();",
" if (url == null) {",
" throw new FileNotFoundException(toString());",
" try {",
" return openStream(url);",
" } catch (FileNotFoundException fnf) {",
" throw fnf;",
" } catch (IOException ioe) {",
" FileNotFoundException fnf = new FileNotFoundException(toString());",
" fnf.initCause(ioe);",
" throw fnf;"
]
},
{
"added": [
" /**",
" * Privileged wrapper for {@code ClassLoader.getResourceAsStream(String)}.",
" */",
" private static InputStream getResourceAsStream(",
" final ClassLoader cl, final String name) {",
" return AccessController.doPrivileged(",
" new PrivilegedAction<InputStream>() {",
" public InputStream run() {",
" return cl.getResourceAsStream(name);",
" }",
" });",
" }",
"",
" /**",
" * Privileged wrapper for",
" * {@code ClassLoader.getSystemResourceAsStream(String)}.",
" */",
" private static InputStream getSystemResourceAsStream(final String name) {",
" return AccessController.doPrivileged(",
" new PrivilegedAction<InputStream>() {",
" public InputStream run() {",
" return ClassLoader.getSystemResourceAsStream(name);",
" }",
" });"
],
"header": "@@ -169,17 +176,29 @@ class CPFile extends InputStreamFile",
"removed": [
" /** Privileged wrapper for {@code URL.openStream()}. */",
" private static InputStream openStream(final URL url) throws IOException {",
" try {",
" return AccessController.doPrivileged(",
" new PrivilegedExceptionAction<InputStream>() {",
" public InputStream run() throws IOException {",
" return url.openStream();",
" }",
" });",
" } catch (PrivilegedActionException pae) {",
" throw (IOException) pae.getCause();",
" }"
]
}
]
}
] |
derby-DERBY-5615-9f1b3146
|
DERBY-5615: Permission problems with classpath subsubprotocol
Wrap CPFile's privileged operations in doPrivileged() so that
classpath databases can be accessed with a security manager.
Make more of the test cases in DatabaseClassLoadingTest and
NativeAuthenticationServiceTest run with a security manager.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1582655 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/io/CPFile.java",
"hunks": [
{
"added": [
"import java.io.IOException;",
"import java.security.AccessController;",
"import java.security.PrivilegedAction;",
"import java.security.PrivilegedActionException;",
"import java.security.PrivilegedExceptionAction;"
],
"header": "@@ -26,7 +26,12 @@ import org.apache.derby.io.StorageFile;",
"removed": []
},
{
"added": [
" URL url = getURL();",
"",
" if (url == null) {",
" throw new FileNotFoundException(toString());",
" }",
"",
" try {",
" return openStream(url);",
" } catch (FileNotFoundException fnf) {",
" throw fnf;",
" } catch (IOException ioe) {",
" FileNotFoundException fnf = new FileNotFoundException(toString());",
" fnf.initCause(ioe);",
" throw fnf;",
" }",
""
],
"header": "@@ -90,30 +95,22 @@ class CPFile extends InputStreamFile",
"removed": [
" \t//System.out.println(\"HERE FOR \" + toString());",
" \tInputStream is = null;",
" \tClassLoader cl = Thread.currentThread().getContextClassLoader();",
" \tif (cl != null)",
" \t\tis = cl.getResourceAsStream(path);",
" \t",
" \t// don't assume the context class loader is tied",
" \t// into the class loader that loaded this class.",
" \tif (is == null)",
" \t{",
" \t\tcl = getClass().getClassLoader();",
" \t\t// Javadoc indicates implementations can use",
" \t\t// null as a return from Class.getClassLoader()",
" \t\t// to indicate the system/bootstrap classloader.",
" \t\tif (cl != null)",
" \t\t\tis = cl.getResourceAsStream(path);",
" \t\telse",
" \t\t\tis = ClassLoader.getSystemResourceAsStream(path);",
" \t}",
" \t",
" \tif (is == null)",
" \t\tthrow new FileNotFoundException(toString());",
" \treturn is;",
" \t"
]
},
{
"added": [
" ClassLoader cl = getContextClassLoader(Thread.currentThread());",
" myURL = getResource(cl, path);"
],
"header": "@@ -123,10 +120,10 @@ class CPFile extends InputStreamFile",
"removed": [
" ClassLoader cl = Thread.currentThread().getContextClassLoader();",
" myURL = cl.getResource(path);"
]
}
]
}
] |
derby-DERBY-5617-04846d11
|
DERBY-5617: Improve process handling in SpawnedProcess
Added a mechanism to kill processes if they live for too long (currently
the threshold is 45 minutes, this may be too high).
Don't let interrupts abort waiting for or terminating a process.
Clean up the process properly when it has terminated.
Patch file: derby-5617-1a-spawnedprocess_improvements.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1244444 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/NetworkServerTestSetup.java",
"hunks": [
{
"added": [
" spawnedServer.complete(2000);"
],
"header": "@@ -204,7 +204,7 @@ final public class NetworkServerTestSetup extends BaseTestSetup {",
"removed": [
" spawnedServer.complete(true);"
]
}
]
},
{
"file": "java/testing/org/apache/derbyTesting/junit/SpawnedProcess.java",
"hunks": [
{
"added": [
"import java.io.OutputStream;",
"import java.util.Timer;",
"import java.util.TimerTask;",
"",
" * <p>",
" * There are three main aspects handled by this class:",
" * <ul> <li>Draining the output streams of the process.<br/>",
" * Happens automatically, the output gathered can be accessed with",
" * {@linkplain #getFailMessage}, {@linkplain #getFullServerError},",
" * {@linkplain #getFullServerOutput}, and",
" * {@linkplain #getNextServerOutput}</li>",
" * <li>Waiting for process completion, followed by cleanup (see",
" * {@linkplain #complete()} and {@linkplain #complete(long)})</li>",
" * <li>Forcibly destroying a process that live too long, for instance",
" * if inter-process communication hangs. This happens automatically",
" * if a threshold value is exceeded.</li>",
" * </ul>",
" * <p>",
" * <em>Implementation notes</em>: Active waiting is employed when waiting for",
" * the process to complete. This is considered acceptable since the expected",
" * usage pattern is to spawn the process, execute a set of tests, and then",
" * finally asking the process to shut down. Waiting for the process to",
" * complete is the last step, and a process typically lives only for a short",
" * period of time anyway (often only for seconds, seldom more than a few",
" * minutes).",
" * <br/>",
" * Forcibly destroying processes that live too long makes the test run",
" * continue even when facing inter-process communication hangs. The prime",
" * example is when both the client and the server are waiting for the other",
" * party to send data. Since the timeout is very high this feature is intended",
" * to avoid automated test runs from hanging indefinitely, for instance due to",
" * environmental issues affecting the process.",
"//@NotThreadSafe",
" private static final String TAG = \"DEBUG: {SpawnedProcess} \";",
" private static Timer KILL_TIMER;",
"",
" /**",
" * Property allowing the kill threshold to be overridden.",
" * <p>",
" * Interprets the numeric value as milliseconds, ignored if non-numeric.",
" * Overriding this value may be required if the test machine is extremely",
" * slow, or you want to kill hung processes earlier for some reason.",
" */",
" private static final String KILL_THRESHOLD_PROPERTY =",
" \"derby.tests.process.killThreshold\";",
" private static final long KILL_THRESHOLD_DEFAULT = 45*60*1000; // 45 minutes",
" /** The maximum allowed time for a process to live. */",
" private static final long KILL_THRESHOLD;",
" static {",
" long tmpThreshold = KILL_THRESHOLD_DEFAULT;",
" String tmp = BaseTestCase.getSystemProperty(KILL_THRESHOLD_PROPERTY);",
" if (tmp != null) {",
" try {",
" tmpThreshold = Long.parseLong(tmp);",
" } catch (NumberFormatException nfe) {",
" // Ignore, use the default set previously.",
" System.err.println(TAG + \"Invalid kill threshold: \" + tmp);",
" }",
" }",
" KILL_THRESHOLD = tmpThreshold;",
" }",
"",
" private static void sleep(long ms) {",
" try {",
" Thread.sleep(ms);",
" } catch (InterruptedException ie) {",
" // Ignore the interrupt. We want to make sure the process",
" // terminates before returning, and we don't want to preserve",
" // the interrupt flag because it causes Derby to shut down. These",
" // are test requirements and don't apply for production code.",
" // Print a notice to stderr.",
" System.out.println(TAG + \"Interrupted while sleeping (ignored)\");",
" }",
" }",
""
],
"header": "@@ -22,17 +22,88 @@ package org.apache.derbyTesting.junit;",
"removed": [
" * Handles the output streams (stderr and stdout) written",
" * by the process by spawning off background threads to read",
" * them into byte arrays. The class provides access to the",
" * output, typically called once the process is complete."
]
},
{
"added": [
" private final TimerTask killTask;",
"",
" /**",
" * Creates a new wrapper to handle the given process.",
" *",
" * @param javaProcess a (running) process",
" * @param name name to associate with the process",
" */",
" errSaver = startStreamSaver(javaProcess.getErrorStream(), name",
" outSaver = startStreamSaver(javaProcess.getInputStream(), name",
" killTask = scheduleKill(javaProcess, name);",
" }",
"",
" /**",
" * Schedules a task to kill/terminate the task after a predefined timeout.",
" *",
" * @param name name of the process",
" * @param process the process",
" * @return The task object.",
" */",
" private TimerTask scheduleKill(Process process, String name) {",
" synchronized (KILL_THRESHOLD_PROPERTY) {",
" if (KILL_TIMER == null) {",
" // Can't use 1.5 methods yet due to J2ME.",
" KILL_TIMER = new Timer();",
" } ",
" }",
" TimerTask killer = new ProcessKillerTask(process, name);",
" KILL_TIMER.schedule(killer, KILL_THRESHOLD);",
" return killer;",
" * Causes output obtained from the process to be suppressed when"
],
"header": "@@ -43,18 +114,46 @@ public final class SpawnedProcess {",
"removed": [
" errSaver = streamSaver(javaProcess.getErrorStream(), name",
" outSaver = streamSaver(javaProcess.getInputStream(), name",
" * Causes output obtained from the subprocess to be suppressed when"
]
},
{
"added": [
" * That is, {@link #complete()} or {@link #complete(long)}"
],
"header": "@@ -79,7 +178,7 @@ public final class SpawnedProcess {",
"removed": [
" * That is, {@link #complete(boolean)} or {@link #complete(boolean, long)}"
]
},
{
"added": [
" * That is, {@link #complete()} or {@link #complete(long)}"
],
"header": "@@ -97,7 +196,7 @@ public final class SpawnedProcess {",
"removed": [
" * That is, {@link #complete(boolean)} or {@link #complete(boolean, long)}"
]
},
{
"added": [
" * encoding which is assumed is how it was originally",
" public String getNextServerOutput() {"
],
"header": "@@ -115,12 +214,11 @@ public final class SpawnedProcess {",
"removed": [
" * encoding which is assumed is how it was orginally",
" public String getNextServerOutput() throws Exception",
" {"
]
},
{
"added": [
" public String getFailMessage(String reason) {",
" sleep(500);"
],
"header": "@@ -136,9 +234,8 @@ public final class SpawnedProcess {",
"removed": [
" public String getFailMessage(String reason) throws InterruptedException",
" {",
" Thread.sleep(500);"
]
},
{
"added": [
" * Waits for the process to terminate.",
" * <p>",
" * This call will block until one of the following conditions are met:",
" * <ul> <li>the process terminates on its own</li>",
" * <li>the hung-process watchdog mechanism forcibly terminates the",
" * process (see {@linkplain #scheduleKill})</li>",
" * @return The process exit code.",
" * @throws IOException if printing diagnostics fails",
" public int complete()",
" throws IOException {",
" return complete(Long.MAX_VALUE); ",
"",
" * Waits for the process to terminate, forcibly terminating it if it",
" * takes longer than the specified timeout.",
" * <p>",
" * This call will block until one of the following conditions are met:",
" * <ul> <li>the process terminates on its own</li>",
" * <li>the timeout is exceeded, at which point the process is",
" * forcibly destroyed</li>",
" * <li>the hung-process watchdog mechanism forcibly terminates the",
" * process (see {@linkplain #scheduleKill})</li>",
" * @return The process exit code.",
" * @throws IOException if printing diagnostics fails",
" public int complete(long timeout)",
" throws IOException {",
" long start = System.currentTimeMillis();",
" Integer exitCode = null;",
" while (exitCode == null) {",
" try {",
" exitCode = new Integer(javaProcess.exitValue());",
" } catch (IllegalThreadStateException itse) {",
" // This exception means the process is running.",
" if (System.currentTimeMillis() - start > timeout) {",
" javaProcess.destroy();",
" }",
" sleep(500);",
" }",
" // Clean up",
" killTask.cancel();",
" cleanupProcess();",
" joinWith(errSaver.thread);",
" joinWith(outSaver.thread);",
" printDiagnostics(exitCode.intValue());",
" return exitCode.intValue();",
" }",
" ",
" /**",
" * Cleans up the process, explicitly closing the streams associated with it.",
" */",
" private void cleanupProcess() {",
" // Doing this is considered best practice.",
" closeStream(javaProcess.getOutputStream());",
" closeStream(javaProcess.getErrorStream());",
" closeStream(javaProcess.getInputStream());",
" javaProcess.destroy();",
" }",
" /**",
" * Prints diagnostics to stdout/stderr if the process failed.",
" *",
" * @param exitCode the exit code of the spawned process",
" * @throws IOException if writing to an output stream fails",
" * @see #suppressOutput",
" */",
" private synchronized void printDiagnostics(int exitCode)",
" throws IOException {",
" // Always write the error, except when suppressed.",
" ByteArrayOutputStream err = errSaver.stream;",
" if (!suppressOutput && err.size() != 0) {",
" System.err.println(\"START-SPAWNED:\" + name + \" ERROR OUTPUT:\");",
" err.writeTo(System.err);",
" System.err.println(\"END-SPAWNED :\" + name + \" ERROR OUTPUT:\");",
" }",
" // Only write contents of stdout if it appears the server",
" // failed in some way, or output is suppressed.",
" ByteArrayOutputStream out = outSaver.stream;",
" if (!suppressOutput && exitCode != 0 && out.size() != 0) {",
" System.out.println(\"START-SPAWNED:\" + name",
" + \" STANDARD OUTPUT: exit code=\" + exitCode);",
" out.writeTo(System.out);",
" System.out.println(\"END-SPAWNED :\" + name",
" + \" STANDARD OUTPUT:\");",
" }",
" }",
" /** Joins up with the specified thread. */",
" private void joinWith(Thread t) {",
" try {",
" t.join();",
" } catch (InterruptedException ie) {",
" // Ignore the interrupt. We want to make sure the process",
" // terminates before returning, and we don't want to preserve",
" // the interrupt flag because it causes Derby to shut down. These",
" // are test requirements and don't apply for production code.",
" // Print a notice to stderr.",
" System.out.println(TAG + \"Interrupted while joining \" +",
" \"with thread '\" + t.toString() + \"'\");",
" }",
" }",
" /**",
" * Closes the specified stream, ignoring any exceptions.",
" *",
" * @param stream stream to close (may be {@code null})",
" */",
" private void closeStream(Object stream) {",
" if (stream instanceof InputStream) {",
" try {",
" ((InputStream)stream).close();",
" } catch (IOException ioe) {",
" // Ignore exception on close",
" }",
" } else if (stream instanceof OutputStream) {",
" try {",
" ((OutputStream)stream).close();",
" } catch (IOException ioe) {",
" // Ignore exception on close"
],
"header": "@@ -169,78 +266,132 @@ public final class SpawnedProcess {",
"removed": [
" * Complete the process.",
" * @param destroy true to destroy it, false to wait indefinitely to complete ",
" public int complete(boolean destroy) throws InterruptedException, IOException {",
" return complete(destroy, -1L);",
" ",
" * Complete the process.",
" * @param destroy True to destroy it, false to wait for it to complete ",
" * based on timeout.",
" * ",
" * @param timeout milliseconds to wait until finished or else destroy.",
" * -1 don't timeout",
" * ",
" public int complete(boolean destroy, long timeout) throws InterruptedException, IOException {",
" int exitCode;",
" if (timeout >= 0 ) {",
" final long start = System.currentTimeMillis();",
" boolean timedOut = true;",
" long totalwait = -1;",
" while (totalwait < timeout) {",
" try { ",
" exitCode = javaProcess.exitValue();",
" //if no exception thrown, exited normally",
" destroy = timedOut = false;",
" break;",
" }catch (IllegalThreadStateException ite) {",
" // Ignore exception, it means that the process is running.",
" Thread.sleep(1000);",
" totalwait = System.currentTimeMillis() - start;",
" }",
" }",
" // If we timed out, make sure we try to destroy the process.",
" if (timedOut) {",
" destroy = true;",
" \t}",
" if (destroy)",
" javaProcess.destroy();",
" exitCode = javaProcess.waitFor();",
" // The process has completed. Wait until we've read all output.",
" outSaver.thread.join();",
" errSaver.thread.join();",
" synchronized (this) {",
" // Always write the error",
" ByteArrayOutputStream err = errSaver.stream;",
" if (!suppressOutput && err.size() != 0) {",
" System.err.println(\"START-SPAWNED:\" + name + \" ERROR OUTPUT:\");",
" err.writeTo(System.err);",
" System.err.println(\"END-SPAWNED :\" + name + \" ERROR OUTPUT:\");",
" }",
" // Only write the error if it appeared the server",
" // failed in some way.",
" ByteArrayOutputStream out = outSaver.stream;",
" if (!suppressOutput && (destroy || exitCode != 0) &&",
" out.size() != 0) {",
" System.out.println(\"START-SPAWNED:\" + name",
" + \" STANDARD OUTPUT: exit code=\" + exitCode);",
" out.writeTo(System.out);",
" System.out.println(\"END-SPAWNED :\" + name",
" + \" STANDARD OUTPUT:\");",
" ",
" return exitCode;"
]
},
{
"added": [
" /**",
" * Creates and starts a stream saver that reads the specified input stream",
" * in a separate stream.",
" *",
" * @param in input stream to read from",
" * @param name name of the thread",
" * @return A {@code StreamSaver} object.",
" */",
" private StreamSaver startStreamSaver(final InputStream in,"
],
"header": "@@ -257,7 +408,15 @@ public final class SpawnedProcess {",
"removed": [
" private StreamSaver streamSaver(final InputStream in,"
]
},
{
"added": [
"",
" /**",
" * A task that will kill the specified process.",
" *",
" * @see #scheduleKill(java.lang.Process, java.lang.String) ",
" */",
" private static class ProcessKillerTask",
" extends TimerTask {",
"",
" private final String name;",
" private Process process;",
"",
" public ProcessKillerTask(Process process, String name) {",
" this.process = process;",
" this.name = name;",
" }",
"",
" public synchronized boolean cancel() {",
" // Since this task will usually be in the timer queue for a long",
" // time, nullify the process reference on cancel to free resources.",
" process = null;",
" return super.cancel();",
" }",
"",
" public synchronized void run() {",
" // We may have just been cancelled ",
" if (process == null) {",
" return;",
" }",
"",
" System.err.println(\"DEBUG: Destroying process '\" + name + \"'\");",
" process.destroy();",
" int retriesAllowed = 10;",
" while (retriesAllowed > 0) {",
" try {",
" int exitCode = process.exitValue();",
" System.err.println(\"DEBUG: Destroyed process '\" + name +",
" \"', exit code is \" + exitCode);",
" break;",
" } catch (IllegalThreadStateException itse) {",
" // Sleep for a second and retry.",
" sleep(1000);",
" retriesAllowed--;",
" }",
" }",
" if (retriesAllowed == 0) {",
" System.err.println(",
" \"DEBUG: Faild to destroy process '\" + name + \"'\");",
" } ",
" process = null;",
" }",
" }"
],
"header": "@@ -291,4 +450,56 @@ public final class SpawnedProcess {",
"removed": []
}
]
}
] |
derby-DERBY-5618-4a08a159
|
DERBY-2162/DERBY-5618: Close the URLClassLoader when tests are done
with it so that file handles are freed and the jar files can be
deleted.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1582220 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/ClasspathSetup.java",
"hunks": [
{
"added": [
"import java.io.Closeable;",
"import java.io.IOException;",
"import java.security.PrivilegedExceptionAction;"
],
"header": "@@ -19,10 +19,13 @@",
"removed": []
},
{
"added": [
" private URLClassLoader _newClassLoader;"
],
"header": "@@ -49,6 +52,7 @@ public class ClasspathSetup extends TestSetup",
"removed": []
},
{
"added": [
" public ClasspathSetup(Test test, URL resource)"
],
"header": "@@ -61,7 +65,7 @@ public class ClasspathSetup extends TestSetup",
"removed": [
" public ClasspathSetup( Test test, URL resource ) throws Exception"
]
},
{
"added": [
" _newClassLoader = new URLClassLoader( new URL[] { _resource }, _originalClassLoader );",
" Thread.currentThread().setContextClassLoader( _newClassLoader );"
],
"header": "@@ -84,9 +88,9 @@ public class ClasspathSetup extends TestSetup",
"removed": [
" URLClassLoader newClassLoader = new URLClassLoader( new URL[] { _resource }, _originalClassLoader );",
" Thread.currentThread().setContextClassLoader( newClassLoader );"
]
},
{
"added": [
" protected void tearDown() throws Exception",
" new PrivilegedExceptionAction<Void>()",
" public Void run() throws IOException",
"",
" // On Java 7 and higher, URLClassLoader implements the",
" // Closable interface and has a close() method. Use that",
" // method, if it's available, to free all resources",
" // associated with the class loader. DERBY-2162.",
" if (_newClassLoader instanceof Closeable) {",
" ((Closeable) _newClassLoader).close();",
" }",
"",
" _originalClassLoader = null;",
" _newClassLoader = null;",
" _resource = null;",
" }",
" /**",
" * Check whether this platform supports closing a {@code URLClassLoader}.",
" *",
" * @return {@code true} if {@code URLClassLoader} has a {@code close()}",
" * method (Java 7 and higher), or {@code false} otherwise",
" */",
" public static boolean supportsClose() {",
" return Closeable.class.isAssignableFrom(URLClassLoader.class);",
" }",
"}"
],
"header": "@@ -94,22 +98,42 @@ public class ClasspathSetup extends TestSetup",
"removed": [
" protected void tearDown()",
" new PrivilegedAction<Void>()",
" public Void run()",
" ",
" }",
"}"
]
}
]
}
] |
derby-DERBY-562-ff049ad7
|
fix of DERBY-562, committing patch for: Sunitha Kambhampati
This patch
- changes the error message thrown when the stream is either less or greater than the requested length to
'Input stream did not have exact amount of data as the requested length.'
- enhances the characterStreams.out test, to print out the nested sql exceptions to ensure that the proper error message is returned.
- updates to the master files.
I verified that we are testing for these two error cases (ie stream has less or more data than requested length), for the following supported stream related api - setCharacterStream, setAsciiStream, setBinaryStream on PreparedStatement. Derby does not support setUnicodeStream api that is deprecated in jdbc 3.0.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@292830 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/jdbc/RawToBinaryFormatStream.java",
"hunks": [
{
"added": [
"\t\t\tthrow new IOException(MessageService.getTextMessage(SQLState.SET_STREAM_INEXACT_LENGTH_DATA));"
],
"header": "@@ -79,7 +79,7 @@ class RawToBinaryFormatStream extends LimitInputStream {",
"removed": [
"\t\t\tthrow new IOException(MessageService.getTextMessage(SQLState.SET_STREAM_INSUFFICIENT_DATA));"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/jdbc/ReaderToUTF8Stream.java",
"hunks": [
{
"added": [
"\t\t\tthrow new IOException(MessageService.getTextMessage(SQLState.SET_STREAM_INEXACT_LENGTH_DATA));"
],
"header": "@@ -160,7 +160,7 @@ final class ReaderToUTF8Stream",
"removed": [
"\t\t\tthrow new IOException(MessageService.getTextMessage(SQLState.SET_STREAM_INSUFFICIENT_DATA));"
]
}
]
}
] |
derby-DERBY-5620-ff005030
|
DERBY-5620: Replace illegal characters from test name when creating the failure folder
Replace all non-alphanumeric, except '-' and '_', with '_'. TestCase.getName
returning null is not dealt with, as all test cases should be given a name.
Patch file: derby-5620-2a-alphanum.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1291657 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/TestConfiguration.java",
"hunks": [
{
"added": [
" // DERBY-5620: Ensure valid file name.",
" char[] tmpName = test.getName().toCharArray();",
" for (int i=0; i < tmpName.length; i++) {",
" switch (tmpName[i]) {",
" case '-':",
" case '_':",
" continue;",
" default:",
" if (!Character.isLetterOrDigit(tmpName[i])) {",
" tmpName[i] = '_';",
" }",
" }",
" }",
" sb.append(tmpName);"
],
"header": "@@ -1914,7 +1914,20 @@ public final class TestConfiguration {",
"removed": [
" sb.append(test.getName());"
]
}
]
}
] |
derby-DERBY-5622-49aa62a2
|
DERBY-5622: Reduce chance for hash collisions when changing boot passwords.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1356749 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/services/jce/JCECipherFactory.java",
"hunks": [
{
"added": [
""
],
"header": "@@ -340,7 +340,7 @@ public final class JCECipherFactory implements CipherFactory, java.security.Priv",
"removed": [
"\t\t"
]
},
{
"added": [
" if (checkKey != verifyKey)",
" { throw StandardException.newException(errorState); }"
],
"header": "@@ -732,8 +732,8 @@ public final class JCECipherFactory implements CipherFactory, java.security.Priv",
"removed": [
"\t\tif (checkKey != verifyKey)",
"\t\t\tthrow StandardException.newException(errorState);"
]
},
{
"added": [
"\t\t{ throw StandardException.newException(SQLState.WRONG_BOOT_PASSWORD); }",
"",
" // DERBY-5622:",
" // if we survive those two quick checks, verify that the generated key is still correct",
" // by using it to decrypt something encrypted by the original generated key",
" CipherProvider newDecrypter = createNewCipher",
" ( DECRYPT, generateKey( generatedKey ), IV );",
" vetCipherProviders( newDecrypter, verify, SQLState.WRONG_BOOT_PASSWORD );",
" "
],
"header": "@@ -790,9 +790,15 @@ public final class JCECipherFactory implements CipherFactory, java.security.Priv",
"removed": [
"\t\t\tthrow StandardException.newException(SQLState.WRONG_BOOT_PASSWORD);",
"",
""
]
},
{
"added": [
" /**",
" * <p>",
" * Verify that a decrypter matches an encrypter. Raises an exception if they don't.",
" * The verification is performed by encrypting a block of text and checking that",
" * it decrypts to the same block.",
" * </p>",
" */",
" private void vetCipherProviders",
" ( CipherProvider decrypter, CipherProvider encrypter, String sqlState )",
" throws StandardException",
" {",
" int clearTextLength = 1024;",
" int byteSize = 256;",
" byte[] clearText = new byte[ clearTextLength ];",
" byte[] cipherText = new byte[ clearTextLength ];",
" byte[] unencryptedText = new byte[ clearTextLength ];",
"",
" for ( int i = 0; i < clearTextLength; i++ ) { clearText[ i ] = (byte) (i % byteSize); }",
"",
" int bytesEncrypted = encrypter.encrypt",
" ( clearText, 0, clearTextLength, cipherText, 0 );",
" int bytesDecrypted = decrypter.decrypt",
" ( cipherText, 0, bytesEncrypted, unencryptedText, 0 );",
"",
" if ( (bytesEncrypted != clearTextLength) || (bytesDecrypted != clearTextLength) )",
" {",
" throw StandardException.newException( sqlState );",
" }",
"",
" for ( int i = 0; i < clearTextLength; i++ )",
" {",
" if ( clearText[ i ] != unencryptedText[ i ] )",
" {",
" throw StandardException.newException( sqlState );",
" }",
" }",
" }",
""
],
"header": "@@ -803,6 +809,44 @@ public final class JCECipherFactory implements CipherFactory, java.security.Priv",
"removed": []
}
]
}
] |
derby-DERBY-5622-5c759ff9
|
DERBY-2687 store/encryptDatabase.sql fails intermittently with ClassNotFoundException, Log Corrupted
Patch derby-2687-2 + removed an additional unused line. This converts
encryptDatabase.sql to JUnit and also makes the test ignore a hash
collision case: the stored two byte digest of the secret key can in can
1/2**16 cases match the the digest of bogus key gotten by decoding the
encrypted key using the wrong bootpassword, thus allowing boot to
proceed using a wrong encryption key, leading to a boot crash (the
"boot issue").
Another issue (less likely) can still make the test fail (the
"bootpassword change issue"), but cf. the improvement issue DERBY-5622
which would resolve that.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1292084 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/Decorator.java",
"hunks": [
{
"added": [
" {",
" return encryptedDatabaseBpw(test, getBootPhrase(16));",
" }",
"",
" /**",
" * Decorate a set of tests to use an encrypted",
" * single use database. This is to run tests",
" * using encryption as a general test and",
" * not specific tests of how encryption is handled.",
" * E.g. tests of setting various URL attributes",
" * would be handled in a specific test.",
" * <BR>",
" * The database will use the default encryption",
" * algorithm.",
" * ",
" * @param test test to decorate",
" * @param bootPassword boot passphrase to use",
" * @return decorated tests",
" */",
" public static Test encryptedDatabaseBpw(Test test, String bootPassword)",
" attributes.setProperty(\"bootPassword\", bootPassword);",
""
],
"header": "@@ -56,17 +56,37 @@ public class Decorator {",
"removed": [
" attributes.setProperty(\"bootPassword\", getBootPhrase(16));",
" "
]
},
{
"added": [
" {",
" return encryptedDatabaseBpw(test, algorithm, getBootPhrase(16));",
" }",
"",
"",
" /**",
" * Decorate a set of tests to use an encrypted",
" * single use database. This is to run tests",
" * using encryption as a general test and",
" * not specific tests of how encryption is handled.",
" * E.g. tests of setting various URL attributes",
" * would be handled in a specific test.",
" * <BR>",
" * The database will use the specified encryption",
" * algorithm.",
" * ",
" * @param test test to decorate",
" * @param bootPassword boot passphrase to use",
" * @return decorated tests",
" */",
" public static Test encryptedDatabaseBpw(Test test,",
" final String algorithm,",
" String bootPassword)",
" attributes.setProperty(\"bootPassword\", bootPassword);",
""
],
"header": "@@ -85,15 +105,38 @@ public class Decorator {",
"removed": [
" attributes.setProperty(\"bootPassword\", getBootPhrase(64));",
" "
]
}
]
}
] |
derby-DERBY-5623-7adbb9a9
|
DERBY-5623: Loosen up synchronization in FileMonitor
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1292724 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/services/monitor/FileMonitor.java",
"hunks": [
{
"added": [],
"header": "@@ -21,7 +21,6 @@",
"removed": [
"import org.apache.derby.iapi.services.monitor.Monitor;"
]
},
{
"added": [
"import java.security.AccessController;",
"import java.security.PrivilegedAction;",
"import java.security.PrivilegedExceptionAction;",
""
],
"header": "@@ -33,8 +32,10 @@ import java.io.File;",
"removed": [
"import java.net.URL;",
"import java.util.Enumeration;"
]
},
{
"added": [
"public final class FileMonitor extends BaseMonitor"
],
"header": "@@ -43,7 +44,7 @@ import java.util.Properties;",
"removed": [
"public final class FileMonitor extends BaseMonitor implements java.security.PrivilegedExceptionAction"
]
},
{
"added": [
"\tfinal boolean initialize(final boolean lite)",
" // SECURITY PERMISSION - OP2, OP2a, OP2b",
" return ((Boolean) AccessController.doPrivileged(new PrivilegedAction() {",
" public Object run() {",
" return Boolean.valueOf(PBinitialize(lite));",
" }",
" })).booleanValue();",
"\tfinal Properties getDefaultModuleProperties() {",
" // SECURITY PERMISSION - IP1",
" return (Properties) AccessController.doPrivileged(",
" new PrivilegedAction() {",
" public Object run() {",
" return FileMonitor.super.getDefaultModuleProperties();",
" }",
" });",
"\tpublic final String getJVMProperty(final String key) {",
" // SECURITY PERMISSION - OP1",
" return (String) AccessController.doPrivileged(new PrivilegedAction() {",
" public Object run() {",
" return PBgetJVMProperty(key);",
" }",
" });",
"\tpublic synchronized final Thread getDaemonThread(",
" final Runnable task,",
" final String name,",
" final boolean setMinPriority) {",
" return (Thread) AccessController.doPrivileged(new PrivilegedAction() {",
" public Object run() {",
" try {",
" return FileMonitor.super.getDaemonThread(",
" task, name, setMinPriority);",
" } catch (IllegalThreadStateException e) {",
" // We may get an IllegalThreadStateException if all the",
" // previously running daemon threads have completed and the",
" // daemon group has been automatically destroyed. If that's",
" // what happened, create a new daemon group and try again.",
" if (daemonGroup != null && daemonGroup.isDestroyed()) {",
" daemonGroup = createDaemonGroup();",
" return FileMonitor.super.getDaemonThread(",
" task, name, setMinPriority);",
" } else {",
" throw e;",
" }",
" }",
" }",
" });",
" }",
"\tpublic final void setThreadPriority(final int priority) {",
" AccessController.doPrivileged(new PrivilegedAction() {",
" public Object run() {",
" FileMonitor.super.setThreadPriority(priority);",
" return null;",
" }",
" });",
"\tfinal InputStream applicationPropertiesStream()",
"\t\t\treturn (InputStream) AccessController.doPrivileged(",
" new PrivilegedExceptionAction() {",
" public Object run() throws IOException {",
" return PBapplicationPropertiesStream();",
" }",
" });"
],
"header": "@@ -190,87 +191,86 @@ public final class FileMonitor extends BaseMonitor implements java.security.Priv",
"removed": [
"\tprivate int action;",
"\tprivate String key3;",
"\tprivate Runnable task;",
"\tprivate int intValue;",
"",
"\tsynchronized final boolean initialize(boolean lite)",
"\t\taction = lite ? 0 : 1;",
"\t\ttry {",
"\t\t\tObject ret = java.security.AccessController.doPrivileged(this);",
"",
"\t\t\treturn ((Boolean) ret).booleanValue();",
" } catch (java.security.PrivilegedActionException pae) {",
"\t\t\tthrow (RuntimeException) pae.getException();",
"\t\t}",
"\tsynchronized final Properties getDefaultModuleProperties() {",
"\t\taction = 2;",
" \t\ttry {",
"\t\t\treturn (Properties) java.security.AccessController.doPrivileged(this);",
" } catch (java.security.PrivilegedActionException pae) {",
" throw (RuntimeException) pae.getException();",
" }",
"\tpublic synchronized final String getJVMProperty(String key) {",
"\t\ttry {",
"",
"\t\t\taction = 3;",
"\t\t\tkey3 = key;",
"\t\t\tString value = (String) java.security.AccessController.doPrivileged(this);",
"\t\t\tkey3 = null;",
"\t\t\treturn value;",
" } catch (java.security.PrivilegedActionException pae) {",
"\t\t\tthrow (RuntimeException) pae.getException();",
"\t\t}",
"\tpublic synchronized final Thread getDaemonThread(Runnable task, String name, boolean setMinPriority) {",
"",
"\t\taction = 4;",
"\t\tkey3 = name;",
"\t\tthis.task = task;",
"\t\tthis.intValue = setMinPriority ? 1 : 0;",
"",
"\t\ttry {",
"",
"\t\t\tThread t = (Thread) java.security.AccessController.doPrivileged(this);",
"",
"\t\t\tkey3 = null;",
"\t\t\ttask = null;",
"",
"\t\t\treturn t;",
" } catch (java.security.PrivilegedActionException pae) {",
"\t\t\tthrow (RuntimeException) pae.getException();",
"\t\t}",
"\t}",
"\tpublic synchronized final void setThreadPriority(int priority) {",
"\t\taction = 5;",
"\t\tintValue = priority;",
"\t\ttry {",
"\t\t\tjava.security.AccessController.doPrivileged(this);",
" } catch (java.security.PrivilegedActionException pae) {",
"\t\t\tthrow (RuntimeException) pae.getException();",
"\t\t}",
"\tsynchronized final InputStream applicationPropertiesStream()",
"\t\taction = 6;",
"\t\t\treturn (InputStream) java.security.AccessController.doPrivileged(this);"
]
},
{
"added": [],
"header": "@@ -278,49 +278,6 @@ public final class FileMonitor extends BaseMonitor implements java.security.Priv",
"removed": [
"",
"\tpublic synchronized final Object run() throws IOException {",
"\t\tswitch (action) {",
"\t\tcase 0:",
"\t\tcase 1:",
"\t\t\t// SECURITY PERMISSION - OP2, OP2a, OP2b",
"\t\t\treturn new Boolean(PBinitialize(action == 0));",
"\t\tcase 2: ",
"\t\t\t// SECURITY PERMISSION - IP1",
"\t\t\treturn super.getDefaultModuleProperties();",
"\t\tcase 3:",
"\t\t\t// SECURITY PERMISSION - OP1",
"\t\t\treturn PBgetJVMProperty(key3);",
"\t\tcase 4:",
" {",
" boolean setMinPriority = (intValue != 0);",
" try {",
" return super.getDaemonThread(task, key3, setMinPriority);",
" } catch (IllegalThreadStateException e) {",
" // We may get an IllegalThreadStateException if all the",
" // previously running daemon threads have completed and the",
" // daemon group has been automatically destroyed. If that's",
" // what has happened, create a new daemon group and try again.",
" if (daemonGroup != null && daemonGroup.isDestroyed()) {",
" daemonGroup = createDaemonGroup();",
" return super.getDaemonThread(task, key3, setMinPriority);",
" } else {",
" throw e;",
" }",
" }",
" }",
"\t\tcase 5:",
"\t\t\tsuper.setThreadPriority(intValue);",
"\t\t\treturn null;",
"\t\tcase 6:",
"\t\t\t// SECURITY PERMISSION - OP3",
"\t\t\treturn PBapplicationPropertiesStream();",
"",
"\t\tdefault:",
"\t\t\treturn null;",
"\t\t}",
"\t}",
""
]
}
]
}
] |
derby-DERBY-5624-056eff7c
|
DERBY-5624 System can run out of stack space while processing DropOnCommit requests.
Only run the testDERBY_5624 in largedata on windows until linux issue resolved.
Currently on linux with 1024 file descriptors per user this test fails.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1292595 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5624-89c0bc38
|
DERBY-5624 System can run out of stack space while processing DropOnCommit requests.
minor comment changes.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1292432 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5624-f63124fd
|
DERBY-5624 System can run out of stack space while processing DropOnCommit requests.
forgot to svn add this file to last checkin.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1292096 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5627-b9bd28c8
|
DERBY-5627: Remove unused methods from the UUID classes
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1293147 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/services/uuid/BasicUUID.java",
"hunks": [
{
"added": [],
"header": "@@ -21,7 +21,6 @@",
"removed": [
"import org.apache.derby.iapi.services.io.FormatIdUtil;"
]
},
{
"added": [],
"header": "@@ -76,40 +75,6 @@ public class BasicUUID implements UUID, Formatable",
"removed": [
"\t/**",
"\t\tConstructor only called by BasicUUIDFactory.",
"\t\tConstructs a UUID from the byte array representation",
"\t\tproduced by toByteArrayio.",
"\t\t@see BasicUUID#toByteArray",
"\t**/",
"\tpublic BasicUUID(byte[] b)",
"\t{",
"\t\tint lsequence = 0;",
"\t\tfor (int ix = 0; ix < 4; ix++)",
"\t\t{",
"\t\t\tlsequence = lsequence << 8;",
"\t\t\tlsequence = lsequence | (0xff & b[ix]);",
"\t\t}",
"",
"\t\tlong ltimemillis = 0;",
"\t\tfor (int ix = 4; ix < 10; ix++)",
"\t\t{",
"\t\t\tltimemillis = ltimemillis << 8;",
"\t\t\tltimemillis = ltimemillis | (0xff & b[ix]);",
"\t\t}",
"",
"\t\tlong linetaddr = 0;",
"\t\tfor (int ix = 10; ix < 16; ix++)",
"\t\t{",
"\t\t\tlinetaddr = linetaddr << 8;",
"\t\t\tlinetaddr = linetaddr | (0xff & b[ix]);",
"\t\t}",
"",
"\t\tsequence = lsequence;",
"\t\ttimemillis = ltimemillis;",
"\t\tmajorId = linetaddr;",
"\t}",
""
]
},
{
"added": [],
"header": "@@ -123,7 +88,6 @@ public class BasicUUID implements UUID, Formatable",
"removed": [
"\t\t// RESOLVE: write out the byte array instead?"
]
},
{
"added": [],
"header": "@@ -274,42 +238,6 @@ public class BasicUUID implements UUID, Formatable",
"removed": [
"\t/**",
"\t Store this UUID in a byte array. Arrange the bytes in the UUID",
"\t in the same order the code which stores a UUID in a string",
"\t does.",
"\t ",
"\t @see org.apache.derby.catalog.UUID#toByteArray",
"\t*/",
"\tpublic byte[] toByteArray()",
"\t{",
"\t\tbyte[] result = new byte[16];",
"",
"\t\tint lsequence = sequence; ",
"\t\tresult[0] = (byte)(lsequence >>> 24);",
"\t\tresult[1] = (byte)(lsequence >>> 16);",
"\t\tresult[2] = (byte)(lsequence >>> 8);",
"\t\tresult[3] = (byte)lsequence;",
"",
"\t\tlong ltimemillis = timemillis;",
"\t\tresult[4] = (byte)(ltimemillis >>> 40);",
"\t\tresult[5] = (byte)(ltimemillis >>> 32);",
"\t\tresult[6] = (byte)(ltimemillis >>> 24);",
"\t\tresult[7] = (byte)(ltimemillis >>> 16);",
" \t\tresult[8] = (byte)(ltimemillis >>> 8);",
"\t\tresult[9] = (byte)ltimemillis;",
"",
"\t\tlong linetaddr = majorId;",
"\t\tresult[10] = (byte)(linetaddr >>> 40);",
"\t\tresult[11] = (byte)(linetaddr >>> 32);",
"\t\tresult[12] = (byte)(linetaddr >>> 24);",
"\t\tresult[13] = (byte)(linetaddr >>> 16);",
"\t\tresult[14] = (byte)(linetaddr >>> 8);",
"\t\tresult[15] = (byte)linetaddr;",
"",
"\t\treturn result;",
"\t}",
""
]
}
]
},
{
"file": "java/testing/org/apache/derbyTesting/unitTests/services/T_UUIDFactory.java",
"hunks": [
{
"added": [],
"header": "@@ -151,26 +151,5 @@ public class T_UUIDFactory extends T_Generic {",
"removed": [
"",
"\t\tbyte[] uuidByteArray = uuid.toByteArray();",
"\t\tUUID uuid_b = factory.recreateUUID(uuidByteArray);",
"\t\tif (!uuid_b.equals(uuid))",
"\t\t{",
"\t\t\t// Resolve: format this with a message factory",
"\t\t\tString badByteArrayString = \"\";",
"\t\t\tfor (int ix = 0; ix < 16; ix++)",
"\t\t\t{",
"\t\t\t\tbadByteArrayString +=",
"\t\t\t\t\tInteger.toHexString(0x00ff&uuidByteArray[ix])+\".\";",
"\t\t\t}",
"",
"\t\t\tString message = ",
"\t\t\t\t\"Conversion error: \"",
"\t\t\t\t+ uuidstring ",
"\t\t\t\t+ \" != \" ",
"\t\t\t\t+ badByteArrayString;",
"\t\t\tout.printlnWithHeader(message);",
"\t\t\tresultSoFar = false;",
"\t\t}"
]
}
]
}
] |
derby-DERBY-5630-a5a01336
|
DERBY-5630; intermittent test failure in store/lockTableVTI.sql
fixing up javadoc, some comments, adjusting some exception class thrown
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1564635 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5631-9414ec95
|
DERBY-5631: Extend SecurityManagerSetup to add extra privileges to the set of default privileges (merge two policy files)
Added the capability to merge two policy files in SecurityManagerSetup. For now
the only merge allowed is to merge an additional policy resource with the
default test policy. The intended use-case is for tests requiring a few extra
permissions to be runnable in the test framework.
Refreshed class comment.
Made getDefaultPolicy() private.
Added a debug println stating which policy is installed by the decorator.
Patch file: derby-5631-1c-merge_policy_files.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1295436 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/SecurityManagerSetup.java",
"hunks": [
{
"added": [
"import java.io.IOException;",
"import java.io.InputStream;",
"import java.io.OutputStream;",
"import java.security.PrivilegedActionException;"
],
"header": "@@ -20,9 +20,13 @@",
"removed": []
},
{
"added": [
"import org.apache.derbyTesting.functionTests.util.PrivilegedFileOpsForTests;",
"",
" * Configures the wrapped test to be run with the specified security policy.",
" * <p>",
" * This setup class normally installs the default policy file. This can be",
" * overridden by specifying {@literal java.security.policy=<NONE>} (see",
" * {@linkplain #NO_POLICY}), and can also be overridden by installing a",
" * security manager explicitly before the default security manager is installed.",
" * <p>",
" * Individual tests/suites can be configured to be run without a security",
" * manager, with a specific policy file, or with a specific policy file merged",
" * with the default policy file. The last option is useful when you only need",
" * to extend the default policy with a few extra permissions to run a test.",
""
],
"header": "@@ -31,13 +35,23 @@ import junit.extensions.TestSetup;",
"removed": [
" * Setup for running Derby JUnit tests with the SecurityManager",
" * which is the default for tests.",
" *",
" "
]
},
{
"added": [
" /** An additional policy to install (may be {@code null}). */",
" private final String additionalPolicyResource;",
" public SecurityManagerSetup(Test test, String policyResource) {",
" this(test, policyResource, false);",
" }",
"",
" /**",
" * Installs a new security policy.",
" *",
" * @param test the test to wrap",
" * @param policyResource the policy to install",
" * @param mergePolicies if {@code false} the specified policy will be the",
" * only policy installed, if {@code true} the specified policy will be",
" * merged with the default test policy for the test framework",
" */",
" public SecurityManagerSetup(Test test, String policyResource,",
" boolean mergePolicies) {",
" super(test);",
" if (mergePolicies) {",
" // By choice, only support merging with the default test policy.",
" this.decoratorPolicyResource = getDefaultPolicy();",
" this.additionalPolicyResource = policyResource;",
" } else {",
" this.additionalPolicyResource = null;",
" }"
],
"header": "@@ -73,14 +87,36 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
" public SecurityManagerSetup(Test test, String policyResource)",
" {",
" super(test);"
]
},
{
"added": [
" this(test, policyResource, false);"
],
"header": "@@ -91,9 +127,7 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
"\t\tsuper(test);",
"\t\tthis.decoratorPolicyResource = policyResource != null ?",
"\t\t policyResource : getDefaultPolicy();"
]
},
{
"added": [
" protected void setUp()",
" throws IOException {",
" String resource = getEffectivePolicyResource(",
" decoratorPolicyResource, additionalPolicyResource);",
" installSecurityManager(resource, decoratorSecurityManager);"
],
"header": "@@ -133,8 +167,11 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
"\tprotected void setUp() {",
"\t\tinstallSecurityManager(decoratorPolicyResource, decoratorSecurityManager);"
]
},
{
"added": [
" private static String getDefaultPolicy()"
],
"header": "@@ -150,7 +187,7 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
" public static String getDefaultPolicy()"
]
},
{
"added": [
" println(\"installed policy \" + policyFile);"
],
"header": "@@ -221,7 +258,7 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
""
]
},
{
"added": [
" /**",
" * Returns the location of the effective policy resource.",
" * <p>",
" * If two valid policy resources from different locations are specified,",
" * they will be merged into one policy file.",
" *",
" * @param policy1 first policy",
" * @param policy2 second policy (may be {@code null})",
" * @return The location of a policy resource.",
" * @throws IOException if reading or writing a policy resource fails",
" */",
" private static String getEffectivePolicyResource(String policy1,",
" String policy2)",
" throws IOException {",
" URL url1 = BaseTestCase.getTestResource(policy1);",
" String resource = url1.toExternalForm();",
" if (policy2 != null) {",
" URL url2 = BaseTestCase.getTestResource(policy2);",
" // Don't use URL.equals - it blocks and goes onto the network.",
" if (!url1.toExternalForm().equals(url2.toExternalForm())) {",
" resource = mergePolicies(url1, url2);",
" }",
" }",
" return resource;",
" }",
"",
" /**",
" * Merges the two specified policy resources (typically files), and writes",
" * the combined policy to a new file.",
" *",
" * @param policy1 the first policy",
" * @param policy2 the second policy",
" * @return The resource location string for a policy file.",
" * @throws IOException if reading or writing to one of the resources fails",
" */",
" private static String mergePolicies(URL policy1, URL policy2)",
" throws IOException {",
" // Create target directory for the merged policy files.",
" String sytemHome =",
" BaseTestCase.getSystemProperty(\"derby.system.home\");",
" File sysDir = new File(sytemHome == null ? \"system\" : sytemHome);",
" File varDir = new File(sysDir, \"var\");",
" // If running as the first test the system directory may not exist.",
" // This situation looks a little bit suspicious - investigate?",
" mkdir(sysDir);",
" mkdir(varDir);",
"",
" // Read the contents of both policy files and write them out to",
" // a new policy file. Construct a somewhat informative file name.",
" File mergedPF = new File(varDir,",
" new File(policy2.getPath()).getName() +",
" \"-MERGED_WITH-\" +",
" new File(policy1.getPath()).getName());",
" OutputStream o =",
" PrivilegedFileOpsForTests.getFileOutputStream(mergedPF);",
" byte[] buf = new byte[1024];",
" int read;",
" InputStream i1 = openStream(policy1);",
" while ((read = i1.read(buf)) != -1) {",
" o.write(buf, 0, read);",
" }",
" i1.close();",
" InputStream i2 = openStream(policy2);",
" while ((read = i2.read(buf)) != -1) {",
" o.write(buf, 0, read);",
" }",
" i2.close();",
" o.close();",
" return mergedPF.toURI().toURL().toExternalForm();",
" }",
"",
" /** Opens the resource stream in a privileged block. */",
" private static InputStream openStream(final URL resource)",
" throws IOException {",
" try {",
" return (InputStream)AccessController.doPrivileged(",
" new java.security.PrivilegedExceptionAction(){",
" public Object run() throws IOException {",
" return resource.openStream();",
" }",
" }",
" );",
" } catch (PrivilegedActionException pae) {",
" throw (IOException)pae.getException();",
" }",
" }",
"",
" /** Creates the specified directory if it doesn't exist. */",
" private static void mkdir(final File dir) {",
" AccessController.doPrivileged(",
" new java.security.PrivilegedAction(){",
" public Object run(){",
" if (!dir.exists() && !dir.mkdir()) {",
" fail(\"failed to create directory: \" + dir.getPath());",
" }",
" return null;",
" }",
" }",
" );",
" }",
"",
" /** Prints a debug message if debugging is enabled. */",
" private static void println(String msg) {",
" BaseTestCase.println(\"{SecurityManagerSetup} \" + msg);",
" }"
],
"header": "@@ -440,4 +477,109 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": []
}
]
}
] |
derby-DERBY-5631-b0ec63db
|
DERBY-5631: Extend SecurityManagerSetup to add extra privileges to the set of default privileges (merge two policy files)
Added missing privileged block in conversion from File -> URI -> URL -> String.
Replaced a block of code with an existing utility method.
Patch file: derby-5631-1f-merge_policy_files_fix-priv.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1298787 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/SecurityManagerSetup.java",
"hunks": [
{
"added": [
"import java.security.PrivilegedAction;",
"import java.security.PrivilegedExceptionAction;"
],
"header": "@@ -27,7 +27,9 @@ import java.net.MalformedURLException;",
"removed": []
},
{
"added": [
" AccessController.doPrivileged(new PrivilegedAction() {"
],
"header": "@@ -247,7 +249,7 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
"\t\tAccessController.doPrivileged(new java.security.PrivilegedAction() {"
]
},
{
"added": [
" try {",
" URL policyURL = getResourceURL(policyResource);",
" set.setProperty(\"java.security.policy\", policyURL.toExternalForm());",
" } catch (MalformedURLException mue) {",
" BaseTestCase.alarm(\"Unreadable policy URL: \" + policyResource);"
],
"header": "@@ -269,21 +271,13 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
"\t\tURL policyURL = BaseTestCase.getTestResource(policyResource);",
"",
"\t\t// maybe the passed in resource was an URL to begin with",
"\t\tif ( policyURL == null )",
"\t\t{",
"\t\t\ttry { policyURL = new URL( policyResource ); }",
"\t\t\tcatch (Exception e) { System.out.println( \"Unreadable url: \" + policyResource ); }",
"\t\t}",
"",
"\t\tif (policyURL != null) {",
"\t\t\tset.setProperty(\"java.security.policy\",",
"\t\t\t\t\tpolicyURL.toExternalForm());",
""
]
},
{
"added": [
" AccessController.doPrivileged(new PrivilegedAction() {"
],
"header": "@@ -442,7 +436,7 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
"\t\t AccessController.doPrivileged(new java.security.PrivilegedAction() {"
]
},
{
"added": [
" new PrivilegedAction()"
],
"header": "@@ -467,7 +461,7 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
" new java.security.PrivilegedAction()"
]
},
{
"added": [
" final File mergedPF = new File(varDir,"
],
"header": "@@ -548,7 +542,7 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
" File mergedPF = new File(varDir,"
]
},
{
"added": [
" try {",
" return (String)",
" AccessController.doPrivileged(new PrivilegedExceptionAction() {",
" public Object run() throws MalformedURLException {",
" return mergedPF.toURI().toURL().toExternalForm();",
" }",
" });",
" } catch (PrivilegedActionException pae) {",
" throw (MalformedURLException)pae.getException();",
" }"
],
"header": "@@ -567,7 +561,16 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
" return mergedPF.toURI().toURL().toExternalForm();"
]
},
{
"added": [
" new PrivilegedExceptionAction(){"
],
"header": "@@ -575,7 +578,7 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
" new java.security.PrivilegedExceptionAction(){"
]
},
{
"added": [
" new PrivilegedAction(){"
],
"header": "@@ -589,7 +592,7 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
" new java.security.PrivilegedAction(){"
]
}
]
}
] |
derby-DERBY-5631-e1383165
|
DERBY-5631: Extend SecurityManagerSetup to add extra privileges to the set of default privileges (merge two policy files)
The previous commit for this issue (revision 1295436) broke all test-cases
using no security policy ("<NONE>").
Fixed handling of NO_POLICY.
Patch file: derby-5631-1d-merge_policy_files-fix.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1295507 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/SecurityManagerSetup.java",
"hunks": [
{
"added": [
" * @return The location of a policy resource, or {@linkplain #NO_POLICY}.",
" String resource = policy1;",
" if (!NO_POLICY.equals(resource)) {",
" URL url1 = BaseTestCase.getTestResource(policy1);",
" resource = url1.toExternalForm();",
" if (policy2 != null) {",
" URL url2 = BaseTestCase.getTestResource(policy2);",
" // Don't use URL.equals - it blocks and goes onto the network.",
" if (!url1.toExternalForm().equals(url2.toExternalForm())) {",
" resource = mergePolicies(url1, url2);",
" }"
],
"header": "@@ -485,19 +485,22 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
" * @return The location of a policy resource.",
" URL url1 = BaseTestCase.getTestResource(policy1);",
" String resource = url1.toExternalForm();",
" if (policy2 != null) {",
" URL url2 = BaseTestCase.getTestResource(policy2);",
" // Don't use URL.equals - it blocks and goes onto the network.",
" if (!url1.toExternalForm().equals(url2.toExternalForm())) {",
" resource = mergePolicies(url1, url2);"
]
}
]
}
] |
derby-DERBY-5631-e6ffa2a1
|
DERBY-5631: Extend SecurityManagerSetup to add extra privileges to the set of default privileges (merge two policy files)
Introduced constant NO_POLICY for "<NONE>".
Corrected class name in license.
Fixed typo.
Patch file: derby-5631-2a-introduce_NO_POLICY_constant.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1294088 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/SecurityManagerSetup.java",
"hunks": [
{
"added": [
" * Derby - Class org.apache.derbyTesting.junit.SecurityManagerSetup"
],
"header": "@@ -1,6 +1,6 @@",
"removed": [
" * Derby - Class org.apache.derbyTesting.functionTests.util.SecurityManagerSetup"
]
},
{
"added": [
" /** Constant used to indicate that no security policy is to be installed. */",
" static final String NO_POLICY = \"<NONE>\";",
""
],
"header": "@@ -38,6 +38,9 @@ import junit.framework.TestSuite;",
"removed": []
},
{
"added": [
" return new SecurityManagerSetup(test, NO_POLICY);"
],
"header": "@@ -114,7 +117,7 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
"\t\treturn new SecurityManagerSetup(test, \"<NONE>\");"
]
},
{
"added": [
" installSecurityManager(NO_POLICY);",
" * Install specific policy file with the security manager"
],
"header": "@@ -123,11 +126,11 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
"\t\tinstallSecurityManager(\"<NONE>\");",
"\t * Install specific polciy file with the security manager"
]
},
{
"added": [
" if (NO_POLICY.equals(decoratorPolicyResource))"
],
"header": "@@ -136,7 +139,7 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
" if (\"<NONE>\".equals(decoratorPolicyResource))"
]
},
{
"added": [
" if (NO_POLICY.equals(set.getProperty(\"java.security.policy\")))"
],
"header": "@@ -202,7 +205,7 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
"\t\tif (\"<NONE>\".equals(set.getProperty(\"java.security.policy\")))"
]
}
]
},
{
"file": "java/testing/org/apache/derbyTesting/junit/TestConfiguration.java",
"hunks": [
{
"added": [
" if (SecurityManagerSetup.NO_POLICY.equals("
],
"header": "@@ -2012,7 +2012,7 @@ public final class TestConfiguration {",
"removed": [
" if (\"<NONE>\".equals("
]
}
]
}
] |
derby-DERBY-5631-ed505d03
|
DERBY-5631: Extend SecurityManagerSetup to add extra privileges to the set of default privileges (merge two policy files)
Deal with errors in the URL handling.
Patch file: derby-5631-1e-merge_policy_files-fix-url.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1295609 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/SecurityManagerSetup.java",
"hunks": [
{
"added": [
"import java.net.MalformedURLException;"
],
"header": "@@ -23,6 +23,7 @@ import java.io.File;",
"removed": []
},
{
"added": [
" URL url1 = getResourceURL(policy1);",
" URL url2 = getResourceURL(policy2);"
],
"header": "@@ -493,10 +494,10 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": [
" URL url1 = BaseTestCase.getTestResource(policy1);",
" URL url2 = BaseTestCase.getTestResource(policy2);"
]
},
{
"added": [
" /**",
" * Returns a URL for the given policy resource.",
" *",
" * @param policy the policy resource",
" * @return A {@code URL} denoting the policy resource.",
" * @throws MalformedURLException if the resource string not a valid URL",
" */",
" private static URL getResourceURL(final String policy)",
" throws MalformedURLException {",
" URL url = BaseTestCase.getTestResource(policy);",
" if (url == null) {",
" // Assume the policy is expressed as an URL already, probably",
" // as a file.",
" url = new URL(policy);",
" }",
" return url; ",
" }",
""
],
"header": "@@ -506,6 +507,24 @@ public final class SecurityManagerSetup extends TestSetup {",
"removed": []
}
]
}
] |
derby-DERBY-5632-fa87f1c0
|
DERBY-5632: Logical deadlock happened when freezing/unfreezing the database
Stop using explicit synchronization on the conglomerate cache.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1456352 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/store/access/CacheableConglomerate.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.store.raw.ContainerKey;"
],
"header": "@@ -25,6 +25,7 @@ import org.apache.derby.iapi.services.cache.Cacheable;",
"removed": []
},
{
"added": [
" private final RAMAccessManager accessManager;",
" CacheableConglomerate(RAMAccessManager parent)",
" this.accessManager = parent;"
],
"header": "@@ -44,12 +45,14 @@ created.",
"removed": [
" CacheableConglomerate()"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/store/access/RAMAccessManager.java",
"hunks": [
{
"added": [],
"header": "@@ -52,7 +52,6 @@ import org.apache.derby.iapi.store.access.TransactionController;",
"removed": [
"import org.apache.derby.iapi.store.raw.ContainerKey;"
]
},
{
"added": [
" ConglomerateFactory getFactoryFromConglomId("
],
"header": "@@ -371,7 +370,7 @@ public abstract class RAMAccessManager",
"removed": [
" private ConglomerateFactory getFactoryFromConglomId("
]
},
{
"added": [
" CacheableConglomerate cache_entry =",
" (CacheableConglomerate) conglom_cache.find(conglomid_obj);",
" if (cache_entry != null) {",
" conglom = cache_entry.getConglom();",
" conglom_cache.release(cache_entry);"
],
"header": "@@ -464,36 +463,12 @@ public abstract class RAMAccessManager",
"removed": [
" synchronized (conglom_cache)",
" {",
" CacheableConglomerate cache_entry = ",
" (CacheableConglomerate) conglom_cache.findCached(conglomid_obj);",
"",
" if (cache_entry != null)",
" {",
" conglom = cache_entry.getConglom();",
" conglom_cache.release(cache_entry);",
"",
" // SanityManager.DEBUG_PRINT(\"find\", \"find hit : \" + conglomid);",
" }",
" else",
" {",
" // SanityManager.DEBUG_PRINT(\"find\", \"find miss: \" + conglomid);",
" // If not in cache - ask the factory for it and insert it.",
"",
" conglom = ",
" getFactoryFromConglomId(conglomid).readConglomerate(",
" xact_mgr, new ContainerKey(0, conglomid));",
"",
" if (conglom != null)",
" {",
" // on cache miss, put the missing conglom in the cache.",
" cache_entry = (CacheableConglomerate) ",
" this.conglom_cache.create(conglomid_obj, conglom);",
" this.conglom_cache.release(cache_entry);",
" }",
" }"
]
},
{
"added": [
" conglom_cache.ageOut();"
],
"header": "@@ -512,49 +487,7 @@ public abstract class RAMAccessManager",
"removed": [
" synchronized (conglom_cache)",
" {",
" conglom_cache.ageOut();",
" }",
"",
" return;",
" }",
"",
" /**",
" * Update a conglomerate directory entry.",
" * <p>",
" * Update the Conglom column of the Conglomerate Directory. The ",
" * Conglomerate with id \"conglomid\" is replaced by \"new_conglom\".",
" * <p>",
" *",
" * @param conglomid The conglomid of conglomerate to replace.",
" * @param new_conglom The new Conglom to update the conglom column to.",
" *",
"\t * @exception StandardException Standard exception policy.",
" **/",
" /* package */ void conglomCacheUpdateEntry(",
" long conglomid, ",
" Conglomerate new_conglom) ",
" throws StandardException",
" {",
" Long conglomid_obj = new Long(conglomid);",
"",
" synchronized (conglom_cache)",
" {",
" // remove the current entry",
" CacheableConglomerate conglom_entry = (CacheableConglomerate) ",
" conglom_cache.findCached(conglomid_obj);",
"",
" if (conglom_entry != null)",
" conglom_cache.remove(conglom_entry);",
"",
" // insert the updated entry.",
" conglom_entry = (CacheableConglomerate) ",
" conglom_cache.create(conglomid_obj, new_conglom);",
" conglom_cache.release(conglom_entry);",
" }",
"",
" return;"
]
},
{
"added": [
" // Insert the new entry.",
" CacheableConglomerate conglom_entry = (CacheableConglomerate)",
" conglom_cache.create(new Long(conglomid), conglom);",
" conglom_cache.release(conglom_entry);"
],
"header": "@@ -571,15 +504,10 @@ public abstract class RAMAccessManager",
"removed": [
" synchronized (conglom_cache)",
" {",
" // insert the updated entry.",
" CacheableConglomerate conglom_entry = (CacheableConglomerate) ",
" conglom_cache.create(new Long(conglomid), conglom);",
" conglom_cache.release(conglom_entry);",
" }",
"",
" return;"
]
},
{
"added": [
" CacheableConglomerate conglom_entry = (CacheableConglomerate)",
" conglom_cache.findCached(new Long(conglomid));",
" if (conglom_entry != null) {",
" conglom_cache.remove(conglom_entry);",
" /**",
" * <p>",
" * Get the current transaction context.",
" * </p>",
" *",
" * <p>",
" * If there is an internal transaction on the context stack, return the",
" * internal transaction. Otherwise, if there is a nested user transaction",
" * on the context stack, return the nested transaction. Otherwise,",
" * return the current user transaction.",
" * </p>",
" *",
" * @return a context object referencing the current transaction",
" */",
" RAMTransactionContext getCurrentTransactionContext() {",
" RAMTransactionContext rtc =",
" (RAMTransactionContext) ContextService.getContext(",
" AccessFactoryGlobals.RAMXACT_INTERNAL_CONTEXT_ID);",
"",
" if (rtc == null) {",
" rtc = (RAMTransactionContext) ContextService.getContext(",
" AccessFactoryGlobals.RAMXACT_CHILD_CONTEXT_ID);",
" }",
"",
" if (rtc == null) {",
" rtc = (RAMTransactionContext) ContextService.getContext(",
" AccessFactoryGlobals.RAMXACT_CONTEXT_ID);",
" }",
" return rtc;",
" }"
],
"header": "@@ -593,19 +521,45 @@ public abstract class RAMAccessManager",
"removed": [
" synchronized (conglom_cache)",
" {",
" CacheableConglomerate conglom_entry = (CacheableConglomerate) ",
" conglom_cache.findCached(new Long(conglomid));",
" if (conglom_entry != null)",
" conglom_cache.remove(conglom_entry);",
"",
" return;"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/store/access/RAMTransaction.java",
"hunks": [
{
"added": [
"\t\tConglomerate conglom = findConglomerate(conglomId);"
],
"header": "@@ -382,17 +382,7 @@ public class RAMTransaction",
"removed": [
"\t\tConglomerate conglom = null;",
"",
"\t\tif (conglomId < 0)",
"\t\t{",
"\t\t\tif (tempCongloms != null)",
"\t\t\t\tconglom = (Conglomerate) tempCongloms.get(new Long(conglomId));",
"\t\t}",
" else",
" {",
" conglom = accessmanager.conglomCacheFind(this, conglomId);",
" }"
]
},
{
"added": [
" // Set an indication that ALTER TABLE has been called so that the",
" // conglomerate will be invalidated if an error happens. Only needed",
" // for non-temporary conglomerates, since they are the only ones that",
" // live in the conglomerate cache.",
" if (!is_temporary)"
],
"header": "@@ -617,18 +607,13 @@ public class RAMTransaction",
"removed": [
" // remove the old entry in the Conglomerate directory, and add the",
" // new one.",
"\t\tif (is_temporary)",
"\t\t\ttempCongloms.put(new Long(conglomId), conglom);",
"\t\t}",
"\t\telse",
" {",
"",
" // have access manager update the conglom to this new one.",
"\t\t\taccessmanager.conglomCacheUpdateEntry(conglomId, conglom);"
]
},
{
"added": [
" invalidateConglomerateCache();"
],
"header": "@@ -1971,12 +1956,7 @@ public class RAMTransaction",
"removed": [
"\t",
" if (alterTableCallMade)",
" {",
" accessmanager.conglomCacheInvalidate();",
" alterTableCallMade = false;",
" }"
]
}
]
}
] |
derby-DERBY-5635-ca998af8
|
DERBY-5635: Provide implementation for getMetaData() in VTITemplate
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1297396 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/vti/StringColumnVTI.java",
"hunks": [
{
"added": [],
"header": "@@ -28,7 +28,6 @@ import java.math.BigDecimal;",
"removed": [
"import java.sql.ResultSetMetaData;"
]
}
]
},
{
"file": "java/engine/org/apache/derby/vti/VTITemplate.java",
"hunks": [
{
"added": [],
"header": "@@ -21,23 +21,9 @@",
"removed": [
"import java.io.InputStream;",
"",
"import java.sql.Connection;",
"import java.sql.Statement;",
"import java.sql.ResultSet;",
"import java.sql.ResultSetMetaData;",
"import java.sql.SQLWarning;",
"import java.net.URL;",
"import java.util.Calendar;",
"import java.sql.Ref;",
"import java.sql.Blob;",
"import java.sql.Clob;",
"import java.sql.Array;",
""
]
}
]
},
{
"file": "java/testing/org/apache/derbyTesting/functionTests/util/SampleVTI.java",
"hunks": [
{
"added": [],
"header": "@@ -23,7 +23,6 @@ package org.apache.derbyTesting.functionTests.util;",
"removed": [
"import java.sql.ResultSetMetaData;"
]
},
{
"added": [],
"header": "@@ -88,11 +87,6 @@ public class SampleVTI",
"removed": [
" //@Override",
" public ResultSetMetaData getMetaData() throws SQLException {",
" throw new UnsupportedOperationException(\"Not supported yet.\");",
" }",
""
]
}
]
}
] |
derby-DERBY-5638-3ce7ebd1
|
DERBY-5638 intermittent test failure in test_05_ClobNegative when running full largedata._Suite; LobLimitsTestjava.sql.SQLException: Table/View 'BLOBTBL' already exists in Schema 'APP'.
Adding a new test fixture to LobLimitsTest.java which will shutdown the databases. This test fixture will be the last one to get executed in the suite since the tests are run in order in this particular suite. This shutdown will ensure that all the logs are applied to the database and hence there are no unapplied log files left at the end of the suite. Large data suite deals will large data objects which can cause us to have large log files and if the database is not shutdown at the end of the suite, the suite will finish successfully but will leave a database directory with large number of big log files. Nightly machines which run this suite on a regular basis can eventually run out of disk space if those machines do not delete the database directories from multiple runs.
I ran the large data suite on my machine and it ran with no problems. I checked the size of the database wombat after the suite finished and the size of the db was much smaller because of addition of shutdown databases fixture.
Another change made as part of the code changes is to do a truncate table after doing delete from table. TRUNCATE table will avoid post-commit work and shouldn't cause any concurrent locking to happen. We do not want to remove delete from table though because it exercises separate code path in Derby compared to TRUNCATE.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1303630 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5641-92fee619
|
DERBY-5641: Remove unused BaseDataFileFactory methods plus minor cleanups
Removed methods syncSideLog, pageToDirty and getTempDirectory.
Additional cleanups:
o removed unused imports
o removed unnecessary return statement
o made synchronization object freezeSemaphore final
o removed unused instance variable backupPath
o removed unused local variable (and exception instantiation)
multipleJBMSWarning
o replaces new Boolean with Boolean.valueOf
Patch file: derby-5641-1a-unused_methods_plus_cleanup.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1298709 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/store/raw/data/BaseDataFileFactory.java",
"hunks": [
{
"added": [],
"header": "@@ -82,12 +82,10 @@ import java.util.Hashtable;",
"removed": [
"import java.io.FileNotFoundException;",
"import java.security.AccessControlException;"
]
},
{
"added": [
"\tprivate final Object freezeSemaphore = new Object();"
],
"header": "@@ -150,7 +148,7 @@ public class BaseDataFileFactory",
"removed": [
"\tprivate Object freezeSemaphore;"
]
},
{
"added": [],
"header": "@@ -203,7 +201,6 @@ public class BaseDataFileFactory",
"removed": [
" private static final int GET_TEMP_DIRECTORY_ACTION = 1;"
]
},
{
"added": [],
"header": "@@ -225,7 +222,6 @@ public class BaseDataFileFactory",
"removed": [
" private String backupPath;"
]
},
{
"added": [],
"header": "@@ -435,8 +431,6 @@ public class BaseDataFileFactory",
"removed": [
"\t\tfreezeSemaphore = new Object();",
""
]
},
{
"added": [],
"header": "@@ -1401,20 +1395,6 @@ public class BaseDataFileFactory",
"removed": [
"\t/**",
"\t\tAsk the log factory to flush the side log up to this bip location",
"\t\tNot implemented in this class - subclass who deals with side log must",
"\t\toverride this.",
"",
"\t\t@exception StandardException Derby Standard Error Policy",
"\t*/",
"\tprivate void syncSideLog(long bipLocation)",
"\t\t throws StandardException",
"\t{",
"\t\treturn;",
"\t}",
"",
""
]
},
{
"added": [],
"header": "@@ -1464,18 +1444,6 @@ public class BaseDataFileFactory",
"removed": [
"\t/**",
"\t *\tThis page is going from clean to dirty, this is a chance for the",
"\t *\tsub class to do something if so desired",
"\t *",
"\t * @exception StandardException Standard Derby Error Policy",
"\t */",
"\tprivate void pageToDirty(RawTransaction t, StoredPage page)",
"\t\t throws StandardException",
"\t{",
"\t\treturn;\t\t\t\t\t// this implementation does nothing",
"\t}",
""
]
},
{
"added": [],
"header": "@@ -1501,20 +1469,6 @@ public class BaseDataFileFactory",
"removed": [
" synchronized StorageFile getTempDirectory()",
" {",
" actionCode = GET_TEMP_DIRECTORY_ACTION;",
" try",
" {",
" return (StorageFile) AccessController.doPrivileged( this);",
" }",
" catch (PrivilegedActionException pae)",
" { ",
" // getTempDirectory does not actually throw an exception",
" return null;",
" } ",
" }",
" "
]
},
{
"added": [],
"header": "@@ -1980,10 +1934,6 @@ public class BaseDataFileFactory",
"removed": [
" StandardException multipleJBMSWarning =",
" StandardException.newException(",
" SQLState.DATA_MULTIPLE_JBMS_WARNING, args);",
""
]
},
{
"added": [],
"header": "@@ -2092,8 +2042,6 @@ public class BaseDataFileFactory",
"removed": [
"",
" return;"
]
},
{
"added": [
" return Boolean.valueOf(bsegdir.exists());"
],
"header": "@@ -2591,7 +2539,7 @@ public class BaseDataFileFactory",
"removed": [
" return new Boolean(bsegdir.exists());"
]
},
{
"added": [
" return Boolean.valueOf(bsegdir.isDirectory());"
],
"header": "@@ -2600,7 +2548,7 @@ public class BaseDataFileFactory",
"removed": [
" return new Boolean(bsegdir.isDirectory());"
]
},
{
"added": [],
"header": "@@ -2628,7 +2576,6 @@ public class BaseDataFileFactory",
"removed": [
" this.backupPath = backupPath;"
]
},
{
"added": [],
"header": "@@ -2641,7 +2588,6 @@ public class BaseDataFileFactory",
"removed": [
" this.backupPath = null;"
]
},
{
"added": [],
"header": "@@ -2744,9 +2690,6 @@ public class BaseDataFileFactory",
"removed": [
" case GET_TEMP_DIRECTORY_ACTION:",
" return storageFactory.getTempDir();",
""
]
}
]
}
] |
derby-DERBY-5642-b2dc02ce
|
DERBY-5642: OutOfMemoryError in OCRecoveryTest on phoneME
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1298765 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5643-77fbd65a
|
DERBY-5643: Occasional hangs in replication tests on Linux
Make sure the forked processes have terminated completely before
starting the next test case.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1299573 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-5643-7d6ee2f4
|
DERBY-5643: Occasional hangs in replication tests on Linux
Increase startup timeout to 4 minutes in NetworkServerTestSetup to work
around http://bugs.sun.com/view_bug.do?bug_id=6483406. Make replication
tests and compatibility tests use NetworkServerTestSetup's helper methods
to check if the server is up.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1300475 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/NetworkServerTestSetup.java",
"hunks": [
{
"added": [
" /**",
" * <p>",
" * Setting maximum wait time to 4 minutes by default. On some platforms",
" * it may take this long to start the server. See for example",
" * <a href=\"http://bugs.sun.com/view_bug.do?bug_id=6483406\">this JVM",
" * bug</a> that sometimes makes server startup take more than 3 minutes.",
" * </p>",
" *",
" * <p>",
" * Increasing the wait time should not adversely affect those",
" * </p>",
" private static final long DEFAULT_WAIT_TIME = 240000;",
" /** Sleep for 100 ms before pinging the network server (again) */"
],
"header": "@@ -43,19 +43,27 @@ import org.apache.derby.drda.NetworkServerControl;",
"removed": [
" /** Setting maximum wait time to 40 seconds by default. On some platforms",
" * it may take this long to start the server. Increasing the wait",
" * time should not adversely affect those",
" private static final long DEFAULT_WAIT_TIME = 40000;",
" /** Sleep for 500 ms before pinging the network server (again) */"
]
},
{
"added": [
" long elapsed = System.currentTimeMillis() - startTime;",
" if (expectServerUp) {",
" if (elapsed > 60000L) {",
" BaseTestCase.alarm(",
" \"Very slow server startup: \" + elapsed + \" ms\");",
" return true;",
" } else if (elapsed > waitTime) {",
" return true;"
],
"header": "@@ -640,13 +648,15 @@ final public class NetworkServerTestSetup extends BaseTestSetup {",
"removed": [
" if (expectServerUp)",
" return true;",
" else",
" {",
" if (System.currentTimeMillis() - startTime > waitTime) {",
" return true;"
]
},
{
"added": [
" /**",
" * Set the period before network server times out on start up based on the",
" * in seconds, or use the default.",
" * For example: with DEFAULT_WAIT_TIME set to 240000, i.e. 4 minutes,",
" * <pre>",
" * -Dderby.tests.networkServerStartTimeout=600",
" * </pre>",
" * would extend the timeout to 10 minutes."
],
"header": "@@ -711,14 +721,16 @@ final public class NetworkServerTestSetup extends BaseTestSetup {",
"removed": [
" /*",
" * set the period before network server times out on start up based on the",
" * in seconds, or use the default",
" * for example: with DEFAULT_WAIT_TIME set to 40000, i.e. 40 seconds,",
" * -Dderby.tests.networkServerStartTimeout=60",
" * would extend the timeout to 1 minute."
]
},
{
"added": [
" BaseTestCase.fail(",
" \"trouble setting WAIT_TIME from passed in property \" +",
" \"derby.tests.networkServerStartTimeout\", e);"
],
"header": "@@ -730,9 +742,9 @@ final public class NetworkServerTestSetup extends BaseTestSetup {",
"removed": [
" e.printStackTrace();",
" fail(\"trouble setting WAIT_TIME from passed in property \" +",
" \"derby.tests.networkServerStartTimeout\");"
]
}
]
}
] |
derby-DERBY-5648-b60a998f
|
DERBY-5648: Raise an error if NATIVE password maintenance is performed on a missing user.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1300248 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/catalog/SystemProcedures.java",
"hunks": [
{
"added": [
" checkLegalUser( dd, userName );",
" "
],
"header": "@@ -2132,6 +2132,8 @@ public class SystemProcedures {",
"removed": []
},
{
"added": [
"",
" checkLegalUser( dd, userName );"
],
"header": "@@ -2183,6 +2185,8 @@ public class SystemProcedures {",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/iapi/error/SQLWarningFactory.java",
"hunks": [
{
"added": [
"\t * @param messageId A Derby messageId as defined in{@link SQLState org.apache.derby.shared.common.reference.SQLState}.",
"\tpublic static SQLWarning newSQLWarning( String messageId )",
" {",
"\t\treturn newSQLWarning(messageId, new Object[] {} );"
],
"header": "@@ -41,14 +41,13 @@ public class SQLWarningFactory {",
"removed": [
"\t * @param messageId",
"\t * A Derby messageId as defined in",
"\t * {@link SQLState org.apache.derby.shared.common.reference.SQLState}.",
"\tpublic static SQLWarning newSQLWarning(String messageId) {",
"\t\treturn newSQLWarning(messageId, null);"
]
}
]
}
] |
derby-DERBY-5649-2f326081
|
DERBY-5649; make improvements to nstest so it's easier to run/analyze/debug
adjusting the test so it runs better when -Dderby.nstest.backupRestore=false
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1301290 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/system/nstest/utils/MemCheck.java",
"hunks": [
{
"added": [
"import org.apache.derbyTesting.system.nstest.NsTest;",
""
],
"header": "@@ -23,6 +23,8 @@ package org.apache.derbyTesting.system.nstest.utils;",
"removed": []
},
{
"added": [
" ",
"\t\t\t\t// first check if there are still active tester threads, so ",
"\t\t\t\t// we do not make backups on an unchanged db every 10 mins for",
"\t\t\t\t// the remainder of MAX_ITERATIONS.",
"\t\t\t\tif (NsTest.numActiveTestThreads() != 0 && NsTest.numActiveTestThreads() > 1)",
"\t\t\t\t{",
"\t\t\t\t\tcontinue;",
"\t\t\t\t}",
"\t\t\t\telse",
"\t\t\t\t{",
"\t\t\t\t\tSystem.out.println(\"no more test threads, finishing memcheck thread also\");",
"\t\t\t\t\tshowmem();",
"\t\t\t\t\tstopNow=true;",
"\t\t\t\t}"
],
"header": "@@ -48,6 +50,20 @@ public class MemCheck extends Thread {",
"removed": []
}
]
}
] |
derby-DERBY-5649-e276048f
|
DERBY-5649; make improvements to nstest so it's easier to run/analyze/debug
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1300658 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/system/nstest/init/Initializer.java",
"hunks": [
{
"added": [
"\t\t// point, we just need to get a connection to the database"
],
"header": "@@ -49,8 +49,7 @@ public class Initializer {",
"removed": [
"\t\t// point, we just need",
"\t\t// to get a connection to the database"
]
}
]
},
{
"file": "java/testing/org/apache/derbyTesting/system/nstest/tester/BackupRestoreReEncryptTester.java",
"hunks": [
{
"added": [
" ",
"\t\t// purpose of this client is to work on a large set of data as defined ",
" // by the parameter NUM_HIGH_STRESS_ROWS"
],
"header": "@@ -71,13 +71,12 @@ public class BackupRestoreReEncryptTester extends TesterObject {",
"removed": [
"",
"\t\t// purpose of this client is",
"\t\t// to work on a large set of data as defined by the parameter",
"\t\t// NUM_HIGH_STRESS_ROWS"
]
},
{
"added": [
"\t\t\t// reduce number of deadlocks/lock issues"
],
"header": "@@ -92,8 +91,7 @@ public class BackupRestoreReEncryptTester extends TesterObject {",
"removed": [
"\t\t\t// reduce number of",
"\t\t\t// deadlocks/lock issues"
]
}
]
},
{
"file": "java/testing/org/apache/derbyTesting/system/nstest/tester/Tester1.java",
"hunks": [
{
"added": [
"\t// The connection to the database is open forever. This client will do ",
"\t// Insert/Update/Delete and simple Select queries over a small to medium ",
"\t// set of data determined randomly over MAX_LOW_STRESS_ROWS rows. ",
"\t// Autocommit is left on else too many deadlocks occur and the goal is to",
"\t// test the data flow and connection management of the network server, not",
"\t// the transaction management of the database."
],
"header": "@@ -45,14 +45,12 @@ public class Tester1 extends TesterObject {",
"removed": [
"\t// The connection to the database is open forever. This client",
"\t// will do Insert/Update/Delete and simple Select queries over",
"\t// a small to medium set of data determined randomly over",
"\t// MAX_LOW_STRESS_ROWS rows. Autocommit is left on else too many deadlocks",
"\t// occur and the",
"\t// goal is to test the data flow and connection management of the network",
"\t// server,",
"\t// not the transaction management of the database."
]
},
{
"added": [
"\t\t// to reduce number of deadlocks",
"\t\t// which we exit the thread.",
"\t\t// connection is only closed outside the loop. Since autocommit is on,",
"\t\t// we make an interation work over MAX_LOW_STRESS_ROWS number of rows.",
"\t\t// This thread could be made to pause (sleep) for a bit between each",
"\t\t// iteration."
],
"header": "@@ -76,19 +74,16 @@ public class Tester1 extends TesterObject {",
"removed": [
"\t\t// to reduce number of",
"\t\t// deadlocks",
"\t\t// which we exit the thread",
"\t\t// connection is only closed",
"\t\t// outside the loop. Since autocommit is on, we make an interation work",
"\t\t// over",
"\t\t// MAX_LOW_STRESS_ROWS number of rows. This thread could be made to",
"\t\t// pause (sleep) for a bit",
"\t\t// between each iteration."
]
}
]
},
{
"file": "java/testing/org/apache/derbyTesting/system/nstest/tester/TesterObject.java",
"hunks": [
{
"added": [
"\t\t\t\t\t+ \" is closing its connection to the database...\");"
],
"header": "@@ -120,7 +120,7 @@ public class TesterObject {",
"removed": [
"\t\t\t\t\t+ \" is closing it's connection to the database...\");"
]
},
{
"added": [
"\t// rows that we loop through via a result set and perform operations ",
"\t// (getXX calls) in order to ensure that data flows properly. The method ",
"\t// will return the total number of rows selected. Note that we do not touch",
"\t// rows with serialkey less than nstest.NUM_UNTOUCHED_ROWS, and the selects",
"\t// will be based on the parameter passed in, viz numRowsToSelect which is",
"\t// <= nstest.NUM_UNTOUCHED_ROWS"
],
"header": "@@ -190,15 +190,12 @@ public class TesterObject {",
"removed": [
"\t// rows that we",
"\t// loop through via a result set and perform operations (getXX calls) in",
"\t// order to ensure",
"\t// that data flows properly. The method will",
"\t// return the total number of rows selected. Note that we do not touch rows",
"\t// with serialkey",
"\t// less than nstest.NUM_UNTOUCHED_ROWS, and the selects will be based on the",
"\t// parameter passed",
"\t// in, viz numRowsToSelect which is <= nstest.NUM_UNTOUCHED_ROWS"
]
},
{
"added": [
"\t\t // Now work over the returned ResultSet and keep track of number of",
"\t\t // rows returned",
"\t\t // data flow out from the network server is also tested."
],
"header": "@@ -231,11 +228,10 @@ public class TesterObject {",
"removed": [
"\t\t // Now work over the returned ResultSet and keep track of number of rows",
"\t\t // returned",
"\t\t // data flow out",
"\t\t // from the network server is also tested."
]
},
{
"added": [
"\t\t// close the ResultSet and statement and release its resources."
],
"header": "@@ -302,7 +298,7 @@ public class TesterObject {",
"removed": [
"\t\t// close the ResultSet and statement and release it's resources."
]
}
]
},
{
"file": "java/testing/org/apache/derbyTesting/system/nstest/utils/DbUtil.java",
"hunks": [
{
"added": [
"\t\t\t// name for prepared statement, otherwise auto increment column",
"\t\t\t// will think it is trying to update/insert a null value to the",
"\t\t\t// column."
],
"header": "@@ -88,10 +88,9 @@ public class DbUtil {",
"removed": [
"\t\t\t// name",
"\t\t\t// for prepared statement, otherwise auto increment column will",
"\t\t\t// think",
"\t\t\t// it is trying to update/insert a null value to the column."
]
},
{
"added": [],
"header": "@@ -132,9 +131,6 @@ public class DbUtil {",
"removed": [
"\t\t\t// double t_dec = rand.nextDouble() *",
"\t\t\t// Math.pow(10,Math.abs(rand.nextInt()%18));",
"\t\t\t// double t_dec = rand.nextDouble();"
]
},
{
"added": [
"\t\t\t\t\t\t//+ id_ind + NsTest.SUCCESS);",
" + id_ind);",
"\t\t\t\tSystem.out.println(\"FAIL: \" + thread_id + \" inserted \" + rowsAdded + \"rows\");"
],
"header": "@@ -212,13 +208,12 @@ public class DbUtil {",
"removed": [
"\t\t\t\t\t\t+ id_ind + NsTest.SUCCESS);",
"\t\t\t\t",
"\t\t\t\tSystem.out.println(\"FAIL: \" + thread_id + \" insert failed\");",
"\t\t\te.printStackTrace();"
]
},
{
"added": [
"\t\t\td = rand.nextDouble() * Math.pow(10, Math.abs(rand.nextInt() % 6));"
],
"header": "@@ -289,7 +284,7 @@ public class DbUtil {",
"removed": [
"\t\t\td = rand.nextDouble() * Math.pow(10, rand.nextInt() % 18);"
]
},
{
"added": [
"\t\t//System.out.println(thread_id + \" attempting to update col \" + column",
"\t\t//\t\t+ \" to \" + ds2);"
],
"header": "@@ -364,8 +359,8 @@ public class DbUtil {",
"removed": [
"\t\tSystem.out.println(thread_id + \" attempting to update col \" + column",
"\t\t\t\t+ \" to \" + ds2);"
]
},
{
"added": [
"\t\t//System.out.println(thread_id",
"\t\t//\t\t+ \" attempting to delete a row with serialkey = \" + skey);"
],
"header": "@@ -416,8 +411,8 @@ public class DbUtil {",
"removed": [
"\t\tSystem.out.println(thread_id",
"\t\t\t\t+ \" attempting to delete a row with serialkey = \" + skey);"
]
},
{
"added": [
"\t\t\t\t\t//System.out",
"\t\t\t\t\t//.println(getThreadName()",
"\t\t\t\t\t//\t\t+ \" dbutil.pick_one() -> Obtained row from the table \"",
"\t\t\t\t\t//\t\t+ rowToReturn);"
],
"header": "@@ -491,10 +486,10 @@ public class DbUtil {",
"removed": [
"\t\t\t\t\tSystem.out",
"\t\t\t\t\t.println(getThreadName()",
"\t\t\t\t\t\t\t+ \" dbutil.pick_one() -> Obtained row from the table \"",
"\t\t\t\t\t\t\t+ rowToReturn);"
]
}
]
}
] |
derby-DERBY-5657-2489f473
|
DERBY-5657: Split complex error message into 3 shorter messages.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1301064 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/jdbc/authentication/AuthenticationServiceBase.java",
"hunks": [
{
"added": [
" {",
" throw StandardException.newException( SQLState.PROPERTY_BAD_NATIVE_VALUE );",
" }",
" {",
" throw StandardException.newException( SQLState.PROPERTY_CANT_UNDO_NATIVE );",
" }"
],
"header": "@@ -407,12 +407,16 @@ public abstract class AuthenticationServiceBase",
"removed": [
" { throw badNativeAuthenticationChange(); }",
" { throw badNativeAuthenticationChange(); }"
]
},
{
"added": [
" if ( userCredentials == null )",
" {",
" throw StandardException.newException( SQLState.PROPERTY_DBO_LACKS_CREDENTIALS );",
" }"
],
"header": "@@ -423,7 +427,10 @@ public abstract class AuthenticationServiceBase",
"removed": [
" if ( userCredentials == null ) { throw badNativeAuthenticationChange(); }"
]
}
]
},
{
"file": "java/shared/org/apache/derby/shared/common/reference/SQLState.java",
"hunks": [
{
"added": [
"\tString PROPERTY_BAD_NATIVE_VALUE = \"XCY05.S.1\";",
"\tString PROPERTY_CANT_UNDO_NATIVE = \"XCY05.S.2\";",
"\tString PROPERTY_DBO_LACKS_CREDENTIALS = \"XCY05.S.3\";"
],
"header": "@@ -261,7 +261,9 @@ public interface SQLState {",
"removed": [
"\tString PROPERTY_BAD_NATIVE_CHANGE = \"XCY05.S\";"
]
}
]
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.