id
stringlengths
22
25
commit_message
stringlengths
137
6.96k
diffs
listlengths
0
63
derby-DERBY-4-2060c4a6
DERBY-4397 Allow ORDER BY in subqueries Patch derby-4397-2 which implements this functionality and also adds a new test, OrderByInSubqueries. This patch also solves DERBY-4. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@891952 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/CreateViewNode.java", "hunks": [ { "added": [ "\tprivate OrderByList orderByList;" ], "header": "@@ -68,7 +68,7 @@ public class CreateViewNode extends DDLStatementNode", "removed": [ "" ] }, { "added": [ "\t * @param orderCols ORDER BY list" ], "header": "@@ -80,6 +80,7 @@ public class CreateViewNode extends DDLStatementNode", "removed": [] }, { "added": [ "\t\t\t\t Object qeText,", "\t\t\t\t Object orderCols)" ], "header": "@@ -88,7 +89,8 @@ public class CreateViewNode extends DDLStatementNode", "removed": [ "\t\t\t\t Object qeText)" ] }, { "added": [ "\t\tthis.orderByList = (OrderByList)orderCols;" ], "header": "@@ -96,6 +98,7 @@ public class CreateViewNode extends DDLStatementNode", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/DMLModStatementNode.java", "hunks": [ { "added": [ "\tstatic void\tbindRowScopedExpression" ], "header": "@@ -677,7 +677,7 @@ abstract class DMLModStatementNode extends DMLStatementNode", "removed": [ "\tvoid\tbindRowScopedExpression" ] }, { "added": [ "\t/**", "\t * Prints the sub-nodes of this object. See QueryTreeNode.java for", "\t * how tree printing is supposed to work.", "\t *", "\t * @param depth\t\tThe depth of this node in the tree", "\t */", "", "\tpublic void printSubNodes(int depth)", "\t{", "\t\tif (SanityManager.DEBUG)", "\t\t{", "\t\t\tsuper.printSubNodes(depth);", "", "\t\t\tprintLabel(depth, \"targetTableName: \");", "\t\t\ttargetTableName.treePrint(depth + 1);", "", "\t\t\tif (resultColumnList != null)", "\t\t\t{", "\t\t\t\tprintLabel(depth, \"resultColumnList: \");", "\t\t\t\tresultColumnList.treePrint(depth + 1);", "\t\t\t}", "\t\t}", "\t}", "" ], "header": "@@ -1914,6 +1914,30 @@ abstract class DMLModStatementNode extends DMLStatementNode", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/FromBaseTable.java", "hunks": [ { "added": [ "", "", "\t\t\t\tOrderByList orderByList = cvn.getOrderByList();", "" ], "header": "@@ -2237,12 +2237,14 @@ public class FromBaseTable extends FromTable", "removed": [ "\t", "\t" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/FromSubquery.java", "hunks": [ { "added": [ "\tprivate OrderByList orderByList;" ], "header": "@@ -46,6 +46,7 @@ import org.apache.derby.iapi.util.JBitSet;", "removed": [] }, { "added": [ "\t * @param orderByList ORDER BY list if any, or null", "\t\t\t\t\tObject orderByList,", "\t\tthis.orderByList = (OrderByList)orderByList;" ], "header": "@@ -57,18 +58,21 @@ public class FromSubquery extends FromTable", "removed": [] }, { "added": [ "", "\t\t\tif (orderByList != null)", "\t\t\t{", "\t\t\t\tprintLabel(depth, \"orderByList: \");", "\t\t\t\torderByList.treePrint(depth + 1);", "\t\t\t}" ], "header": "@@ -89,6 +93,12 @@ public class FromSubquery extends FromTable", "removed": [] }, { "added": [ "\t\tif (orderByList != null) {", "\t\t\torderByList.pullUpOrderByColumns(subquery);", "\t\t}", "" ], "header": "@@ -200,7 +210,10 @@ public class FromSubquery extends FromTable", "removed": [ "\t\t" ] }, { "added": [ "\t\tif (orderByList != null) {", "\t\t\torderByList.bindOrderByColumns(subquery);", "\t\t}", "" ], "header": "@@ -219,6 +232,10 @@ public class FromSubquery extends FromTable", "removed": [] }, { "added": [ "\t\t// Push the order by list down to the ResultSet", "\t\tif (orderByList != null)", "\t\t{", "\t\t\t// If we have more than 1 ORDERBY columns, we may be able to", "\t\t\t// remove duplicate columns, e.g., \"ORDER BY 1, 1, 2\".", "\t\t\tif (orderByList.size() > 1)", "\t\t\t{", "\t\t\t\torderByList.removeDupColumns();", "\t\t\t}", "", "\t\t\tsubquery.pushOrderByList(orderByList);", "\t\t\torderByList = null;", "\t\t}", "", "\t\t * Another complication is that we want to be able to only push" ], "header": "@@ -325,13 +342,27 @@ public class FromSubquery extends FromTable", "removed": [ "\t\t * Another complication is that we want to be able to only only push" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/InsertNode.java", "hunks": [ { "added": [ "\tprivate OrderByList orderByList;" ], "header": "@@ -110,6 +110,7 @@ public final class InsertNode extends DMLModStatementNode", "removed": [] }, { "added": [ " * @param orderByList The order by list for the source result set, null if", "\t *\t\t\tno order by list", "\t\t\tObject targetProperties,", " Object orderByList)" ], "header": "@@ -124,13 +125,16 @@ public final class InsertNode extends DMLModStatementNode", "removed": [ "\t\t\tObject targetProperties)" ] }, { "added": [ "\t\tthis.orderByList = (OrderByList) orderByList;" ], "header": "@@ -144,6 +148,7 @@ public final class InsertNode extends DMLModStatementNode", "removed": [] }, { "added": [ "\t\t\tif (orderByList != null) {", "\t\t\t\tprintLabel(depth, \"orderByList: \");", "\t\t\t\torderByList.treePrint(depth + 1);", "\t\t\t}", "" ], "header": "@@ -206,6 +211,11 @@ public final class InsertNode extends DMLModStatementNode", "removed": [] }, { "added": [ "\t\t// Bind the ORDER BY columns", "\t\tif (orderByList != null)", "\t\t{", "\t\t\torderByList.pullUpOrderByColumns(resultSet);", "", "\t\t\t// The select list may have new columns now, make sure to bind", "\t\t\t// those.", "\t\t\tsuper.bindExpressions();", "", "\t\t\torderByList.bindOrderByColumns(resultSet);", "\t\t}", "" ], "header": "@@ -421,6 +431,18 @@ public final class InsertNode extends DMLModStatementNode", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/OrderByList.java", "hunks": [ { "added": [ "\tprivate int resultSetNumber = -1;" ], "header": "@@ -76,6 +76,7 @@ public class OrderByList extends OrderedColumnList", "removed": [] }, { "added": [ "\t *\tBind the update columns by their names to the target resultset of the", "\t * cursor specification. This variant is used by InsertNode.", "\t *", "\t * \t@param target\tThe underlying result set", "\t *\t@exception StandardException\t\tThrown on error" ], "header": "@@ -112,12 +113,11 @@ public class OrderByList extends OrderedColumnList", "removed": [ "\t\tBind the update columns by their names to the target resultset", "\t\tof the cursor specification.", "", "\t\t@param target\tThe underlying result set", "\t", "\t\t@exception StandardException\t\tThrown on error" ] }, { "added": [ "" ], "header": "@@ -149,7 +149,7 @@ public class OrderByList extends OrderedColumnList", "removed": [ "\t" ] }, { "added": [ "\t\tresultSetNumber = cc.getNextResultSetNumber();" ], "header": "@@ -387,7 +387,7 @@ public class OrderByList extends OrderedColumnList", "removed": [ "\t\tint resultSetNumber = cc.getNextResultSetNumber();" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/QueryTreeNode.java", "hunks": [ { "added": [ "\t/**", "\t * Print call stack for debug purposes", "\t */", "", "\tpublic void stackPrint()", "\t{", "\t\tif (SanityManager.DEBUG)", "\t\t{", "\t\t\tdebugPrint(\"Stacktrace:\\n\");", "\t\t\tException e = new Exception(\"dummy\");", " StackTraceElement[] st= e.getStackTrace();", " for (int i=0; i<st.length; i++) {", " debugPrint(st[i] + \"\\n\");", " }", "", "\t\t\tdebugFlush();", "\t\t}", "\t}", "" ], "header": "@@ -342,6 +342,25 @@ public abstract class QueryTreeNode implements Visitable", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/ResultColumnList.java", "hunks": [ { "added": [ "\t\t\t if ((! countMismatchAllowed) && visibleSize() != nameList.size())", "\t\t\t\t\t\"The size of the 2 lists is expected to be the same. \" +", "\t\t\t\t\t\"visibleSize() = \" + visibleSize() +", "\t\t\t\t\t\", nameList.size() = \" + nameList.size());", "\t\tint size = (countMismatchAllowed) ? nameList.size() : visibleSize();" ], "header": "@@ -646,15 +646,16 @@ public class ResultColumnList extends QueryTreeNodeVector", "removed": [ "\t\t\t if ((! countMismatchAllowed) && size() != nameList.size())", "\t\t\t\t\t\"The size of the 2 lists is expected to be the same. size() = \" +", "\t\t\t\t\tsize() + \", nameList.size() = \" + nameList.size());", "\t\tint size = (countMismatchAllowed) ? nameList.size() : size();" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/ResultSetNode.java", "hunks": [ { "added": [ "\t\tif (!inOrder ||", " resultColumns.visibleSize() < target.resultColumnList.size()) {" ], "header": "@@ -935,7 +935,8 @@ public abstract class ResultSetNode extends QueryTreeNode", "removed": [ "\t\tif (!inOrder || resultColumns.size() < target.resultColumnList.size()) {" ] }, { "added": [ "\tprivate ResultColumn genNewRCForInsert(TableDescriptor targetTD," ], "header": "@@ -1001,7 +1002,7 @@ public abstract class ResultSetNode extends QueryTreeNode", "removed": [ "\tResultColumn genNewRCForInsert(TableDescriptor targetTD," ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/SelectNode.java", "hunks": [ { "added": [ "\t * o It does not have an ORDER BY clause (pushed from FromSubquery)." ], "header": "@@ -1342,6 +1342,7 @@ public class SelectNode extends ResultSetNode", "removed": [] }, { "added": [ "\t\t/* Don't flatten if selectNode now has an order by */", "\t\tif ((orderByList != null) &&", "\t\t\t (orderByList.size() > 0))", "\t\t{", "\t\t\treturn false;", "\t\t}", "" ], "header": "@@ -1385,6 +1386,13 @@ public class SelectNode extends ResultSetNode", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/SubqueryNode.java", "hunks": [ { "added": [ "\tprivate OrderByList orderByList;", "" ], "header": "@@ -166,6 +166,8 @@ public class SubqueryNode extends ValueNode", "removed": [] }, { "added": [ "\t * @param orderCols ORDER BY list", "\t\t\t\t\t\t\tObject leftOperand,", "\t\t\t\t\t Object orderCols)", "\t\tthis.orderByList = (OrderByList)orderCols;" ], "header": "@@ -201,15 +203,18 @@ public class SubqueryNode extends ValueNode", "removed": [ "\t\t\t\t\t\t\tObject leftOperand)" ] }, { "added": [ "", "\t\t\tif (orderByList != null)", "\t\t\t{", "\t\t\t\tprintLabel(depth, \"orderByList: \");", "\t\t\t\torderByList.treePrint(depth + 1);", "\t\t\t}" ], "header": "@@ -269,6 +274,12 @@ public class SubqueryNode extends ValueNode", "removed": [] }, { "added": [ "\t\tif (orderByList != null) {", "\t\t\torderByList.pullUpOrderByColumns(resultSet);", "\t\t}", "", "\t\tif (orderByList != null) {", "\t\t\torderByList.bindOrderByColumns(resultSet);", "\t\t}", "" ], "header": "@@ -504,11 +515,19 @@ public class SubqueryNode extends ValueNode", "removed": [] }, { "added": [ "\t\t\t orderByList == null &&" ], "header": "@@ -625,6 +644,7 @@ public class SubqueryNode extends ValueNode", "removed": [] }, { "added": [ "\t\t\t orderByList == null &&" ], "header": "@@ -694,6 +714,7 @@ public class SubqueryNode extends ValueNode", "removed": [] }, { "added": [ "\t\t// Push the order by list down to the ResultSet", "\t\tif (orderByList != null) {", "\t\t\t// If we have more than 1 ORDERBY columns, we may be able to", "\t\t\t// remove duplicate columns, e.g., \"ORDER BY 1, 1, 2\".", "\t\t\tif (orderByList.size() > 1)", "\t\t\t{", "\t\t\t\torderByList.removeDupColumns();", "\t\t\t}", "", "\t\t\tresultSet.pushOrderByList(orderByList);", "\t\t\torderByList = null;", "\t\t}", "", "" ], "header": "@@ -788,6 +809,20 @@ public class SubqueryNode extends ValueNode", "removed": [] } ] } ]
derby-DERBY-400-66125641
DERBY-400: Fixed the SQLState for some of the new client messages to better match what is required by SQL2003 git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@372240 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/shared/org/apache/derby/shared/common/reference/SQLState.java", "hunks": [ { "added": [ " String NOT_IMPLEMENTED = \"0A000.S\";", " String JDBC2_METHOD_NOT_IMPLEMENTED = \"0A002.S\";", " String JDBC3_METHOD_NOT_SUPPORTED = \"0A003.S\";" ], "header": "@@ -1329,7 +1329,9 @@ public interface SQLState {", "removed": [ " String NOT_IMPLEMENTED = \"0A000.S\";" ] }, { "added": [], "header": "@@ -1437,8 +1439,6 @@ public interface SQLState {", "removed": [ " String LOGIN_FAILED = \"08004\";", " String NO_CURRENT_CONNECTION = \"08003\";" ] }, { "added": [ " ", " // Connection exceptions - SQL State class 08", " //following are database severity", " String NO_CURRENT_CONNECTION = \"08003\";", " String LOGIN_FAILED = \"08004\";", " // There can be multiple causes for 08003, which according", " // to SQL2003 spec means \"connection does not exist\"", " // We use a suffix to distinguish them. Because of the suffix", " // you *must* add a severity code", " String NOGETCONN_ON_CLOSED_POOLED_CONNECTION = \"08003.C.1\";", " // system severity", " String SHUTDOWN_DATABASE = \"08006.D\"; ", " " ], "header": "@@ -1450,7 +1450,19 @@ public interface SQLState {", "removed": [ "" ] }, { "added": [], "header": "@@ -1471,8 +1483,6 @@ public interface SQLState {", "removed": [ " //following are database severity", " String SHUTDOWN_DATABASE = \"08006.D\";" ] }, { "added": [], "header": "@@ -1488,13 +1498,10 @@ public interface SQLState {", "removed": [ " String NOGETCONN_ON_CLOSED_POOLED_CONNECTION = \"XN001.C\";", " String JDBC2_METHOD_NOT_IMPLEMENTED = \"XN007.S\";", " String JDBC3_METHOD_NOT_SUPPORTED = \"XN008.S\";" ] } ] } ]
derby-DERBY-400-e4b24389
DERBY-400: Fixed the SQL states for warnings so they are compliant with SQL2003 -- can't use SQL states that are reserved for standard SQL States. Also refactored the two messages for JDBC2 not supported and JDBC3 not supported into a single message. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@372368 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/CallableStatement.java", "hunks": [ { "added": [ " new MessageId(SQLState.JDBC_METHOD_NOT_IMPLEMENTED));" ], "header": "@@ -915,7 +915,7 @@ public class CallableStatement extends PreparedStatement", "removed": [ " new MessageId(SQLState.JDBC2_METHOD_NOT_IMPLEMENTED));" ] }, { "added": [ " new MessageId(SQLState.JDBC_METHOD_NOT_IMPLEMENTED));" ], "header": "@@ -947,7 +947,7 @@ public class CallableStatement extends PreparedStatement", "removed": [ " new MessageId(SQLState.JDBC2_METHOD_NOT_IMPLEMENTED));" ] }, { "added": [ " new MessageId(SQLState.JDBC_METHOD_NOT_IMPLEMENTED));" ], "header": "@@ -1007,7 +1007,7 @@ public class CallableStatement extends PreparedStatement", "removed": [ " new MessageId(SQLState.JDBC2_METHOD_NOT_IMPLEMENTED));" ] }, { "added": [ " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();" ], "header": "@@ -1027,112 +1027,112 @@ public class CallableStatement extends PreparedStatement", "removed": [ " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();" ] }, { "added": [ " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();", " throw jdbcMethodNotImplemented();" ], "header": "@@ -1145,245 +1145,245 @@ public class CallableStatement extends PreparedStatement", "removed": [ " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();" ] }, { "added": [ " private SQLException jdbcMethodNotImplemented() throws SQLException" ], "header": "@@ -1428,7 +1428,7 @@ public class CallableStatement extends PreparedStatement", "removed": [ " private SQLException jdbc3MethodNotSupported() throws SQLException" ] } ] }, { "file": "java/shared/org/apache/derby/shared/common/reference/SQLState.java", "hunks": [ { "added": [ " String JDBC_METHOD_NOT_IMPLEMENTED = \"0AX01.S\";" ], "header": "@@ -1330,8 +1330,7 @@ public interface SQLState {", "removed": [ " String JDBC2_METHOD_NOT_IMPLEMENTED = \"0A002.S\";", " String JDBC3_METHOD_NOT_SUPPORTED = \"0A003.S\";" ] } ] } ]
derby-DERBY-4001-7e23da13
DERBY-4001: Sequence comparison with "ALL" does not yield correct results There are three essential changes in the patch: 1) ProjectRestrictNode.pullOptPredicates() Don't pull any predicates if the from table is marked as a not exists table. This way the flattening of queries like the ones below will work, because the predicate 1<>1 is not pulled out and applied on the outer table. SELECT * FROM T WHERE NOT EXISTS (SELECT * FROM T WHERE 1<>1) SELECT * FROM T WHERE X < ALL (SELECT X FROM T WHERE 1<>1) SELECT * FROM T WHERE X NOT IN (SELECT X FROM T WHERE 1<>1) 2) SubqueryNode.preprocess() Don't allow not exists flattening unless all the predicates in the subquery reference the base table of the inner query. When all the predicates reference that table, none of them will be used in the outer query, so they won't cause any trouble for the flattening. This makes queries like the one below work: SELECT * FROM T T1 WHERE NOT EXISTS (SELECT * FROM T T2 WHERE T1.X > 100) No flattening will happen in this case, though. Although it may sound like (2) would prevent the example queries in (1) from being flattened, that's not the case. This is because simple predicates like 1<>1 are pushed down before SubqueryNode.preprocess() gets to the flattening, so it doesn't see those predicates. The flattening is still safe, since we have made sure that those predicates won't be pulled out again. 3) SubqueryNode.preprocess() If an ALL subquery or a NOT IN subquery is flattened, a new join condition is created, for instance WHERE X < ALL (SELECT Y ...) results in the join condition X >= Y and WHERE X NOT IN (SELECT Y ...) results in the join condition X = Y The patch adds a check so that the flattening only happens if the right side of the join condition references the base table of the subquery. If it does, we know that the join condition cannot be used to filter rows from the outer table, so it's safe to do the flattening. This prevents queries like the ones below from being flattened, and they now work as expected: SELECT * FROM T WHERE X < ALL (SELECT 100 FROM T) SELECT * FROM T T1 WHERE X = ALL (SELECT T1.X FROM T) SELECT * FROM T WHERE X NOT IN (SELECT 100 FROM T) SELECT * FROM T T1 WHERE X NOT IN (SELECT T1.X+100 FROM T) git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@769273 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/SubqueryNode.java", "hunks": [ { "added": [ "\t\t\treturn getNewJoinCondition(leftOperand, getRightOperand());" ], "header": "@@ -668,10 +668,7 @@ public class SubqueryNode extends ValueNode", "removed": [ "\t\t\tValueNode rightOperand;", "\t\t\trightOperand = ((ResultColumn) rrsn.getResultColumns().elementAt(0)).", "\t\t\t\t\t\t\t\tgetExpression();", "\t\t\treturn getNewJoinCondition(leftOperand, rightOperand);" ] }, { "added": [ " * DERBY-4001: Extra conditions to allow flattening to a NOT", " * EXISTS join (in a NOT EXISTS join it does matter on which", " * side of the join predicates/restrictions are applied):", " * o All the predicates must reference the FBT, otherwise", " * predicates meant for the right side of the join may be", " * applied to the left side of the join.", " * o The right operand (in ALL and NOT IN) must reference the", " * FBT, otherwise the generated join condition may be used", " * to restrict the left side of the join.", "\t\t\t\t\t\t select.getWherePredicates().allPushable())", " FromBaseTable fbt =", " singleFromBaseTable(select.getFromList());", "", " if (fbt != null && (!flattenableNotExists ||", " (select.getWherePredicates().allReference(fbt) &&", " rightOperandFlattenableToNotExists(numTables, fbt))))", " {", " return flattenToExistsJoin(numTables,", " outerFromList, outerSubqueryList,", " outerPredicateList, flattenableNotExists);", " }" ], "header": "@@ -758,16 +755,32 @@ public class SubqueryNode extends ValueNode", "removed": [ "\t\t\t\t\t\t select.getWherePredicates().allPushable() &&", "\t\t\t\t\t\t singleFromBaseTable(select.getFromList()))", "\t\t\t\t\treturn flattenToExistsJoin(numTables,", "\t\t\t\t\t\t\t\t\t\t outerFromList, outerSubqueryList,", "\t\t\t\t\t\t\t\t\t\t outerPredicateList, flattenableNotExists);" ] }, { "added": [ " * @return the {@code FromBaseTable} if the from list from the subquery", " * contains a single entry which is a FBT or a PRN/FBT, or {@code null}", " * if the subquery does not contain a single FBT", "\tprivate FromBaseTable singleFromBaseTable(FromList fromList)", " FromBaseTable fbt = null;", "", " if (fromList.size() == 1) {", " FromTable ft = (FromTable) fromList.elementAt(0);", " if (ft instanceof FromBaseTable) {", " fbt = (FromBaseTable) ft;", " } else if (ft instanceof ProjectRestrictNode) {", " ResultSetNode child =", " ((ProjectRestrictNode) ft).getChildResult();", " if (child instanceof FromBaseTable) {", " fbt = (FromBaseTable) child;", " }", " }", " }", "", " return fbt;", " /**", " * <p>", " * Check if the right operand is on a form that makes it possible to", " * flatten this query to a NOT EXISTS join. We don't allow flattening if", " * the right operand doesn't reference the base table of the subquery.", " * (Requirement added as part of DERBY-4001.)", " * </p>", " *", " * <p>", " * The problem with the right operand not referencing the base table of the", " * subquery, is that the join condition may then be used to filter rows", " * from the right side (outer) table in the NOT EXISTS join. In a NOT", " * EXISTS join, the join condition can only safely be applied to the", " * left side (inner) table of the join. Otherwise, it will filter out all", " * the interesting rows too early.", " * </p>", " *", " * <p>Take the query below as an example:</p>", " *", " * <pre><code>", " * SELECT * FROM T1 WHERE X NOT IN (SELECT 1 FROM T2)", " * </code></pre>", " *", " * <p>", " * Here, the right operand is 1, and the join condition is {@code T1.X=1}.", " * If flattened, the join condition will be used directly on the outer", " * table, and hide all rows with {@code X<>1}, although those are the only", " * rows we're interested in. If the join condition had only been used on", " * the inner table, the NOT EXISTS join logic would do the correct thing.", " * </p>", " *", " * <p>", " * If the join condition references the inner table, the condition cannot", " * be used directly on the outer table, so it is safe to flatten the query.", " * </p>", " *", " * @param numTables the number of tables in this statement", " * @param fbt the only {@code FromBaseTable} in this subquery", " * @return {@code true} if it is OK to flatten this query to a NOT EXISTS", " * join, {@code false} otherwise", " */", " private boolean rightOperandFlattenableToNotExists(", " int numTables, FromBaseTable fbt) throws StandardException {", "", " boolean flattenable = true;", "", " // If there is no left operand, there is no right operand. If there is", " // no right operand, it cannot cause any problems for the flattening.", " if (leftOperand != null) {", " JBitSet tableSet = new JBitSet(numTables);", " getRightOperand().categorize(tableSet, false);", " // The query can be flattened to NOT EXISTS join only if the right", " // operand references the base table.", " flattenable = tableSet.get(fbt.getTableNumber());", " }", "", " return flattenable;", " }", "" ], "header": "@@ -830,31 +843,89 @@ public class SubqueryNode extends ValueNode", "removed": [ "\t * @return Whether or not the from list from the subquery contains a", "\t *\t\t\tsingle entry which is a FBT or a PRN/FBT.", "\tprivate boolean singleFromBaseTable(FromList fromList)", "\t\tboolean retCode = (fromList.size() == 1);", "", "\t\tif (retCode)", "\t\t{", "\t\t\tFromTable ft = (FromTable) fromList.elementAt(0);", "", "\t\t\tif (((ft instanceof ProjectRestrictNode) &&", "\t\t\t\t ((ProjectRestrictNode) ft).getChildResult() instanceof FromBaseTable) ||", "\t\t\t\tft instanceof FromBaseTable)", "\t\t\t{", "\t\t\t}", "\t\t\telse", "\t\t\t{", "\t\t\t\tretCode = false;", "\t\t\t}", "\t\t}", "", "\t\treturn retCode;" ] }, { "added": [ "\t\t\t\t\t\t! getRightOperand().getTypeServices().isNullable());" ], "header": "@@ -866,10 +937,8 @@ public class SubqueryNode extends ValueNode", "removed": [ "\t\t\tValueNode rightOperand = ((ResultColumn) resultSet.getResultColumns().elementAt(0)).", "\t\t\t\t\t\t\t\t\tgetExpression();", "\t\t\t\t\t\t! rightOperand.getTypeServices().isNullable());" ] }, { "added": [ "\t\t\tValueNode rightOperand = getRightOperand();" ], "header": "@@ -965,9 +1034,7 @@ public class SubqueryNode extends ValueNode", "removed": [ "\t\t\tValueNode rightOperand;", "\t\t\trightOperand = ((ResultColumn) select.getResultColumns().elementAt(0)).", "\t\t\t\t\t\t\t\tgetExpression();" ] } ] } ]
derby-DERBY-4018-c6fc81ea
DERBY-4018: ArrayIndexOutOfBoundsException in TopService.inService under heavy multithreaded use of EmbeddedDriver Closed another unsynchronized window between Vector.size() and Vector.elementAt(). git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@790218 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/services/monitor/TopService.java", "hunks": [ { "added": [ " // DERBY-4018: Need to hold the synchronization over the entire loop", " // to prevent concurrent modifications from causing an", " // ArrayIndexOutOfBoundsException." ], "header": "@@ -229,6 +229,9 @@ final class TopService {", "removed": [] }, { "added": [ " for (int i = 0;; i++) {", " final ModuleInstance module;", "", " // DERBY-4018: Synchronized block in order to close the window", " // between size() and elementAt() where the size may change", " // and result in an ArrayIndexOutOfBoundsException.", " synchronized (moduleInstances) {", " if (i < moduleInstances.size()) {", " module = (ModuleInstance) moduleInstances.elementAt(i);", " } else {", " // No more instances to look at, break out of the loop.", " break;", " }", " }" ], "header": "@@ -269,8 +272,20 @@ final class TopService {", "removed": [ "\t\t\tfor (int i = 0; i < moduleInstances.size(); i++) {", "\t\t\t\tModuleInstance module = (ModuleInstance) moduleInstances.elementAt(i);" ] } ] } ]
derby-DERBY-4018-f0e75f3f
DERBY-4018: ArrayIndexOutOfBoundsException in TopService.inService under heavy multithreaded use of EmbeddedDriver Factored out two for loops and surrounded them with synchronization on the vector they were iterating over. This prevent others from changing the size of the vector between the calls to Vector.size() and Vector.get(). git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@789264 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/services/monitor/TopService.java", "hunks": [ { "added": [ " ModuleInstance module = findModuleInstance(instance);", " if (module != null) {", " topModule = module;", " notifyAll();", " }" ], "header": "@@ -102,14 +102,11 @@ final class TopService {", "removed": [ "\t\t\tfor (int i = 0; i < moduleInstances.size(); i++) {", "\t\t\t\tModuleInstance module = (ModuleInstance) moduleInstances.elementAt(i);", "\t\t\t\tif (module.getInstance() == instance) {", "\t\t\t\t\ttopModule = module;", "\t\t\t\t\tnotifyAll();", "\t\t\t\t\tbreak;", "\t\t\t\t}", "\t\t\t}" ] }, { "added": [ " /**", " * Find a {@code ModuleInstance} object whose {@code getInstance()} method", " * returns the object specified by the {@code instance} parameter.", " *", " * @param instance the instance to look for", " * @return a {@code ModuleInstance} object, or {@code null} if no match", " * was found", " */", " private ModuleInstance findModuleInstance(Object instance) {", " synchronized (moduleInstances) {", " for (int i = 0; i < moduleInstances.size(); i++) {", " ModuleInstance module = (ModuleInstance) moduleInstances.get(i);", " if (module.getInstance() == instance) {", " return module;", " }", " }", " }", " return null;", " }", "" ], "header": "@@ -223,6 +220,26 @@ final class TopService {", "removed": [] }, { "added": [ " return findModuleInstance(instance) != null;" ], "header": "@@ -394,14 +411,7 @@ final class TopService {", "removed": [ "", "\t\tfor (int i = 0; i < moduleInstances.size(); i++) {", "", "\t\t\tModuleInstance mi = (ModuleInstance) moduleInstances.elementAt(i);", "\t\t\tif (mi.getInstance() == instance)", "\t\t\t\treturn true;", "\t\t}", "\t\treturn false;" ] } ] } ]
derby-DERBY-4027-8072a568
DERBY-4027: An attempt was made to access an out of range slot on a page When a new value is inserted into an index backing a nullable unique constraint, a check is performed to verify that the adjacent slots do not contain the same value as the one being inserted. This extra check is needed because the index backing such a constraint is not unique (it allows multiple NULL values). If the spot on which the new value is inserted is at the beginning or the end of the index page, the last value of the previous page or the first value of the next page is checked. Currently, the code attempts to read that value right after the slot pointer has been moved to that page. This was the cause of the bug, as that page may be empty, and any pointer to a slot on an empty page is pointing to a non-existent slot, hence the out-of-range-slot error. This patch fixes the bug by checking again after moving to another page that we are attempting to read a valid row. If not, we skip to the next (or previous, depending on the direction of the traversal) index page in the chain and look for a duplicate there. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@744984 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/store/access/btree/BTreeController.java", "hunks": [ { "added": [ " // DERBY-4027: We have moved to the previous page and need", " // to recheck that the slot number is valid (it won't be", " // if the page we moved to is empty). Restart from the top", " // of the loop body to get the slot number rechecked.", " continue;" ], "header": "@@ -393,6 +393,11 @@ public class BTreeController extends OpenBTree implements ConglomerateController", "removed": [] } ] } ]
derby-DERBY-4028-4d5163b2
DERBY-4028: two rows can be inserted with the same value in a column that a unique constraint on that column should prevent If the slot next to the one where a row is inserted contains a deleted duplicate, move past it and see if there's another non-deleted duplicate hidden behind it. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@752826 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/store/access/btree/BTreeController.java", "hunks": [ { "added": [ "", " // If we found a deleted row, we don't know whether there", " // is a duplicate, so we need to continue the search.", " final boolean continueSearch =", " (ret == MATCH_FOUND && leaf.page.isDeletedAtSlot(slot));", "", " if (!continueSearch) {", " if (newLeaf) {", " // Since we have moved away from the original leaf,", " // we need some logic to make sure we don't hold", " // latches that we're not supposed to hold.", " if (ret == RESCAN_REQUIRED) {", " // When a rescan is required, we must release the", " // original leaf, since the callers expect all", " // latches to have been released (and so they", " // should have been, so this is probably a bug -", " // see DERBY-4080).", " originalLeaf.release();", " }", " if (ret != RESCAN_REQUIRED) {", " // Since a rescan is not required, we still hold", " // the latch on the non-original leaf. No other", " // leaves than the original one should be latched", " // when we return, so release the current leaf.", " leaf.release();", " }", " }", " return ret;", " slot--;" ], "header": "@@ -405,16 +405,37 @@ public class BTreeController extends OpenBTree implements ConglomerateController", "removed": [ " //release the page if required", " if (ret == RESCAN_REQUIRED && newLeaf) {", " originalLeaf.release();", " }", " if (ret != RESCAN_REQUIRED && newLeaf) {", " leaf.release();", " return ret;", " slot++;" ] }, { "added": [ "", " // If we found a deleted row, we don't know whether there", " // is a duplicate, so we need to continue the search.", " final boolean continueSearch =", " (ret == MATCH_FOUND && leaf.page.isDeletedAtSlot(slot));", "", " if (!continueSearch) {", " if (newLeaf) {", " // Since we have moved away from the original leaf,", " // we need some logic to make sure we don't hold", " // latches that we're not supposed to hold.", " if (ret == RESCAN_REQUIRED) {", " // When a rescan is required, we must release the", " // original leaf, since the callers expect all", " // latches to have been released (and so they", " // should have been, so this is probably a bug -", " // see DERBY-4080).", " originalLeaf.release();", " }", " if (ret != RESCAN_REQUIRED) {", " // Since a rescan is not required, we still hold", " // the latch on the non-original leaf. No other", " // leaves than the original one should be latched", " // when we return, so release the current leaf.", " leaf.release();", " }", " }", " return ret;" ], "header": "@@ -465,13 +486,35 @@ public class BTreeController extends OpenBTree implements ConglomerateController", "removed": [ " if (ret == RESCAN_REQUIRED && newLeaf) {", " originalLeaf.release();", " }", " if (ret != RESCAN_REQUIRED && newLeaf) {", " leaf.release();", " return ret;" ] }, { "added": [ " * Compares two rows for insert. If the two rows are not equal,", " * {@link #NO_MATCH} is returned. Otherwise, it tries to get a lock on", " * the row in the tree. If the lock is obtained without waiting,", " * {@link #MATCH_FOUND} is returned (even if the row has been deleted).", " * Otherwise, {@link #RESCAN_REQUIRED} is returned to indicate that the", " * latches have been released and the B-tree must be rescanned.", " *", " * If {@code MATCH_FOUND} is returned, the caller should check whether", " * the row has been deleted. If so, it may have to move to check the", " * adjacent rows to be sure that there is no non-deleted duplicate row.", " *", " * If {@code MATCH_FOUND} or {@code RESCAN_REQUIRED} is returned, the", " * transaction will hold an update lock on the specified record when", " * the method returns.", " *", " * <b>Note!</b> This method should only be called when the index is almost", " * unique (that is, a non-unique index backing a unique constraint).", " *", " * @return {@code NO_MATCH} if no duplicate is found,", " * {@code MATCH_FOUND} if a duplicate is found, or", " * {@code RESCAN_REQUIRED} if the B-tree must be rescanned" ], "header": "@@ -479,22 +522,31 @@ public class BTreeController extends OpenBTree implements ConglomerateController", "removed": [ " * Compares two row for insert. If the two rows are equal it checks if the ", " * row in tree is deleted. If not MATCH_FOUND is returned. If the row is ", " * deleted it tries to get a lock on that. If a lock is obtained without ", " * waiting (ie without losing the latch) the row was deleted within the ", " * same transaction and its safe to insert. NO_MATCH is returned in this ", " * case. If latch is released while waiting for lock rescaning the tree ", " * is required as the tree might have been rearanged by some other ", " * transaction. RESCAN_REQUIRED is returned in this case.", " * In case of NO_MATCH and MATCH_FOUND latch is also released.", " * @return 0 if no duplicate", " * 1 if duplicate ", " * 2 if rescan required" ] } ] } ]
derby-DERBY-4037-d43d4c3e
DERBY-4037 updated test to wait longer if expected background tasks have not cleaned up expected pages yet. Fast machines should see no slowdown in the test. Slower machines will wait up to 100 seconds before reporting failure after checking iteratively for result every 1 second. Also enhanced printout to identify which test is failing between the 2 possibilities. /mikem git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@803694 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4038-1a363021
DERBY-4038; convert access.sql to junit to avoid an intermittent error on IBM zseries machines when the test is run in the encryption suite. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@897938 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/functionTests/suites/EncryptionSuite.java", "hunks": [ { "added": [ "import java.sql.Connection;", "import java.sql.Statement;", "import java.util.Properties;", "import org.apache.derbyTesting.functionTests.tests.store.AccessTest;" ], "header": "@@ -20,8 +20,12 @@", "removed": [] }, { "added": [ " Properties sysProps = new Properties();", " sysProps.put(\"derby.optimizer.optimizeJoinOrder\", \"false\");", " sysProps.put(\"derby.optimizer.ruleBasedOptimization\", \"true\");", " sysProps.put(\"derby.optimizer.noTimeout\", \"true\");", " ", " suite.addTestSuite(AccessTest.class);", " ", " protected void setUp() {", " ", " try { ", " Connection conn = getConnection();", " Statement s = createStatement();", "", " s.execute(\"CREATE FUNCTION PADSTRING (DATA VARCHAR(32000), \"", " + \"LENGTH INTEGER) RETURNS VARCHAR(32000) EXTERNAL NAME \" +", " \"'org.apache.derbyTesting.functionTests.util.Formatters\" +", " \".padString' LANGUAGE JAVA PARAMETER STYLE JAVA\");", " s.close();", " conn.close();", "", " } catch (SQLException se) {", " // ignore", " }", " }", " ", " public void tearDown() throws Exception {", " Statement st = createStatement();", " super.tearDown();", " try {", " st.executeUpdate(\"DROP FUNCTION PADSTRING\");", " } catch (SQLException e) {", " // never mind.", " }", " }", " " ], "header": "@@ -98,9 +102,44 @@ public final class EncryptionSuite extends BaseJDBCTestCase {", "removed": [] } ] }, { "file": "java/testing/org/apache/derbyTesting/junit/RuntimeStatisticsParser.java", "hunks": [ { "added": [ " private String [] startPosition = {\"None\"};", " private String [] stopPosition = {\"None\"};" ], "header": "@@ -35,6 +35,8 @@ public class RuntimeStatisticsParser {", "removed": [] }, { "added": [ " ", " startPosition = getStartPosition();", " stopPosition = getStopPosition();" ], "header": "@@ -75,6 +77,9 @@ public class RuntimeStatisticsParser {", "removed": [] }, { "added": [ " /**", " * @param tableName", " * @param indexName", " * @return true if passed indexName was used for Index Scan ResultSet ", " * for the passed tableName", " */", " public boolean usedConstraintForIndexScan(String tableName){", " return (statistics.indexOf(\"Index Scan ResultSet for \" + ", " tableName + \" using constraint\")!= -1);", " }" ], "header": "@@ -206,9 +211,17 @@ public class RuntimeStatisticsParser {", "removed": [ " ", "" ] }, { "added": [ " /**", " * Return whether or not the query used an equals scan qualifier.", " */", " public boolean hasEqualsQualifier() {", " return qualifiers.contains(new Qualifier(\"=\", false));", " }", " ", " /**", " * Return whether there are no qualifiers (i.e. qualifiers: None)", " */", " public boolean hasNoQualifiers() {", " int startPos = statistics.indexOf(\"qualifiers:\\n\");", " if (startPos >= 0) {", " // start search after \"qualifiers:\\n\"", " String searchString = statistics.substring(startPos + 12);", " if (searchString.indexOf(\"None\")>1)", " return true;", " else", " {", " System.out.println(\"statistics.substring: \" + searchString);", " return false;", " }", " }", " else {", " throw new AssertionError(", " \"Expected to find \\\"qualifiers: None\\\", \" +", " \"but didn't even find 'qualifiers'\");", " }", " } " ], "header": "@@ -281,8 +294,36 @@ public class RuntimeStatisticsParser {", "removed": [ " " ] }, { "added": [ " ", " /**", " * Find the start position ; sometimes using a scan start / stop is", " * a way of doing qualifiers using an index", " * @ return the String array following start position:", " */", " public String [] getStartPosition() {", " int startStartIndex = statistics.indexOf(\"start position:\");", " int endStartIndex = statistics.indexOf(\"stop position:\");", " if (startStartIndex >= 0 && endStartIndex >= 0)", " {", " String positionLines = statistics.substring(startStartIndex, endStartIndex);", " String [] startPositionLines = positionLines.split(\"\\n\");", " return startPositionLines;}", " else ", " return null;", " ", " }", "", " /**", " * Find the stop position ; sometimes using a scan start / stop is", " * a way of doing qualifiers using an index", " * @ return the String array following start position:", " */", " public String [] getStopPosition() {", " int startStopIndex = statistics.indexOf(\"stop position:\");", " int endStopIndex = statistics.indexOf(\"qualifiers:\");", " if (startStopIndex >= 0 && endStopIndex >= 0)", " {", " String positionLines = statistics.substring(startStopIndex, endStopIndex);", " String [] startPositionLines = positionLines.split(\"\\n\");", " return startPositionLines;}", " else ", " return null;", " }", "" ], "header": "@@ -365,5 +406,41 @@ public class RuntimeStatisticsParser {", "removed": [] } ] } ]
derby-DERBY-404-00d8393f
DERBY-404 Remove buggy code from RowUtil.isRowEmpty and never used parameter and fix callers. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@293436 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/store/access/RowUtil.java", "hunks": [ { "added": [ "\t\tReturns true if row is null or row.length is zero.", "\t\t@return true if row is empty.", " DataValueDescriptor[] row) " ], "header": "@@ -243,14 +243,12 @@ public class RowUtil", "removed": [ "\t\tReturns true if row is null, row.length is null,", "\t\tor columnList is not null but has not bits set.", "\t\t@return true if no columns are selected in this row.", " DataValueDescriptor[] row, ", " FormatableBitSet columnList) " ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/access/btree/BTreeScan.java", "hunks": [ { "added": [ "\t\tif (RowUtil.isRowEmpty(this.init_startKeyValue))" ], "header": "@@ -239,7 +239,7 @@ public abstract class BTreeScan extends OpenBTree implements ScanManager", "removed": [ "\t\tif (RowUtil.isRowEmpty(this.init_startKeyValue, (FormatableBitSet) null))" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/access/conglomerate/GenericScanController.java", "hunks": [ { "added": [ "\t\tif (RowUtil.isRowEmpty(this.init_startKeyValue))" ], "header": "@@ -291,7 +291,7 @@ public abstract class GenericScanController", "removed": [ "\t\tif (RowUtil.isRowEmpty(this.init_startKeyValue, (FormatableBitSet) null))" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/access/heap/Heap.java", "hunks": [ { "added": [ "\t\tif (!RowUtil.isRowEmpty(startKeyValue)", "\t\t\t|| !RowUtil.isRowEmpty(stopKeyValue))" ], "header": "@@ -663,8 +663,8 @@ public final class Heap", "removed": [ "\t\tif (!RowUtil.isRowEmpty(startKeyValue, (FormatableBitSet) null)", "\t\t\t|| !RowUtil.isRowEmpty(stopKeyValue, (FormatableBitSet) null))" ] } ] } ]
derby-DERBY-4040-a4a66c4b
DERBY-4040: SQLChar.getLength returns wrong length for some data values. When asked to return the character length of a SQLChar or SQLVarchar and the value is represented as a stream, skip the two header bytes and then decode the whole stream. The cause of the bug being fixed, was that the byte length was used as the character length. Patch file: derby-4040-1a-SQLChar_length_and_test.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@742357 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/types/SQLChar.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.services.io.InputStreamUtil;" ], "header": "@@ -44,6 +44,7 @@ import org.apache.derby.iapi.jdbc.CharacterStreamDescriptor;", "removed": [] } ] } ]
derby-DERBY-4042-4e83499f
DERBY-4042: Removed double quote from file name used in test The double quote caused failures on Windows. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@740698 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4042-ccc1b8d6
DERBY-4042: org.apache.derby.impl.load.Import needs to escape single quotes Made sure that the export and import procedures properly quoted string literals and SQL identifiers when constructing internal SQL statements. Achieved by using the helper methods in StringUtil and IdUtil. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@739830 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/load/ExportResultSetForObject.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.IdUtil;" ], "header": "@@ -27,6 +27,7 @@ import java.sql.ResultSet;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/load/Import.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.IdUtil;", "import org.apache.derby.iapi.util.StringUtil;" ], "header": "@@ -21,19 +21,17 @@", "removed": [ "import java.sql.ResultSet;", "import java.sql.SQLWarning;", "import java.sql.ResultSetMetaData;", "import java.sql.DatabaseMetaData;" ] }, { "added": [ "", " sb.append(\"(\") ;", " sb.append(quoteStringArgument(inputFileName));", " sb.append(quoteStringArgument(columnDelimiter));", " sb.append(quoteStringArgument(characterDelimiter));", " sb.append(quoteStringArgument(codeset));", " sb.append(quoteStringArgument(", " columnInfo.getExpectedVtiColumnTypesAsString()));" ], "header": "@@ -206,32 +204,22 @@ public class Import extends ImportAbstract{", "removed": [ " ", " /* special handling of single quote delimiters", " * Single quote should be writeen with an extra quote otherwise sql will", " * throw syntac error.", " * i.e to recognize a quote it has to be appended with extra quote ('')", " */", " if(characterDelimiter!=null && characterDelimiter.equals(\"'\"))", " characterDelimiter = \"''\";", " if(columnDelimiter !=null && columnDelimiter.equals(\"'\"))", " columnDelimiter = \"''\";", " ", " ", " sb.append(\"(\") ; ", " sb.append(\t(inputFileName !=null ? \"'\" + inputFileName + \"'\" : null));", " sb.append(\t(columnDelimiter !=null ? \"'\" + columnDelimiter + \"'\" : null));", " sb.append(\t(characterDelimiter !=null ? \"'\" + characterDelimiter + \"'\" : null));", " sb.append(\t(codeset !=null ? \"'\" + codeset + \"'\" : null));", " sb.append( \"'\" + columnInfo.getExpectedVtiColumnTypesAsString() + \"'\");" ] }, { "added": [ " String entityName = IdUtil.mkQualifiedName(schemaName, tableName);" ], "header": "@@ -250,8 +238,7 @@ public class Import extends ImportAbstract{", "removed": [ " String entityName = (schemaName == null ? \"\\\"\"+ tableName + \"\\\"\" : ", " \"\\\"\" + schemaName + \"\\\"\" + \".\" + \"\\\"\" + tableName + \"\\\"\"); " ] } ] } ]
derby-DERBY-4044-c61e11be
DERBY-4044: Use helper methods from IdUtil to quote SQL identifiers in EmbedResultSet git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@890357 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedResultSet.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.IdUtil;" ], "header": "@@ -79,6 +79,7 @@ import java.util.Calendar;", "removed": [] }, { "added": [ " insertSQL.append(IdUtil.normalToDelimited(" ], "header": "@@ -3604,7 +3605,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ " insertSQL.append(quoteSqlIdentifier(" ] }, { "added": [ " updateWhereCurrentOfSQL.append(IdUtil.normalToDelimited(", " IdUtil.normalToDelimited(getCursorName()));" ], "header": "@@ -3696,14 +3697,14 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ " updateWhereCurrentOfSQL.append(quoteSqlIdentifier(", " quoteSqlIdentifier(getCursorName()));" ] }, { "added": [ " IdUtil.normalToDelimited(getCursorName()));" ], "header": "@@ -3772,7 +3773,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ " quoteSqlIdentifier(getCursorName()));" ] }, { "added": [ " return IdUtil.mkQualifiedName(targetTable.getSchemaName(),", " targetTable.getBaseName());" ], "header": "@@ -3808,24 +3809,10 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\tif (targetTable.getSchemaName() != null)", "\t\t\treturn quoteSqlIdentifier(targetTable.getSchemaName()) + \".\" + ", "\t\t\t\t\tquoteSqlIdentifier(targetTable.getBaseName());", "\t\telse", "\t\t\treturn quoteSqlIdentifier(targetTable.getBaseName());", " private String quoteSqlIdentifier(String orgValue) {", " int i = 0, start = 0;", " String retValue = \"\";", " while ((i = orgValue.indexOf(\"\\\"\", start) + 1) > 0) {", " retValue += orgValue.substring(start, i) + \"\\\"\";", " start = i;", " }", " retValue += orgValue.substring(start, orgValue.length());", " return \"\\\"\" + retValue + \"\\\"\";", " }", " " ] } ] } ]
derby-DERBY-4050-8f33dfd1
DERBY-4050 Multithreaded clob update causes growth in table that does not get reclaimed - Changes clob update case to call getPage() instead of getPageNoWait() so we are sure to reclaim the page. - Adds some debug statements for other places that we might not reclaim space. - Adds a test. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@743023 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/store/raw/data/ReclaimSpaceHelper.java", "hunks": [ { "added": [ " {", " }", " {", " // If code gets here, the space will be lost forever, and", " // can only be reclaimed by a full offline compress of the", " // table/index.", "", " if (SanityManager.DEBUG)", " {", " if (SanityManager.DEBUG_ON(DaemonService.DaemonTrace))", " {", " SanityManager.DEBUG(", " DaemonService.DaemonTrace, ", " \" gave up after 3 tries to get container lock \" + ", " work);", " }", " }", "", " }" ], "header": "@@ -259,9 +259,28 @@ public class ReclaimSpaceHelper", "removed": [] }, { "added": [ "\t\t// We are reclaiming row space or long column. ", "\t\t// First get an xlock on the head row piece." ], "header": "@@ -281,8 +300,8 @@ public class ReclaimSpaceHelper", "removed": [ "\t\t// We are reclaiming row space or long column. First get an xlock on the", "\t\t// head row piece." ] }, { "added": [ " {", " }", " {", " // If code gets here, the space will be lost forever, and", " // can only be reclaimed by a full offline compress of the", " // table/index.", "", " if (SanityManager.DEBUG)", " {", " if (SanityManager.DEBUG_ON(DaemonService.DaemonTrace))", " {", " SanityManager.DEBUG(", " DaemonService.DaemonTrace, ", " \" gave up after 3 tries to get row lock \" + ", " work);", " }", " }", " }" ], "header": "@@ -291,9 +310,27 @@ public class ReclaimSpaceHelper", "removed": [] } ] } ]
derby-DERBY-4053-ad40edaf
DERBY-4053 The local XA connections were not getting rollback during Database close and this resulted into exception at connection close if the connection object had any pending transaction. Made changes so that all the connections(except global XA connections) will have their transactions rolled back before those connections are closed. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@793588 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/drda/org/apache/derby/impl/drda/Database.java", "hunks": [ { "added": [], "header": "@@ -70,7 +70,6 @@ class Database", "removed": [ "" ] }, { "added": [], "header": "@@ -80,8 +79,6 @@ class Database", "removed": [ "\tboolean forXA = false;", "" ] }, { "added": [ "\t * Database close does following cleanup tasks", "\t * 1)Rollback any pending transaction on the Connection object (except ", "\t * for a global-XA Connection obejct) before closing the Connection. ", "\t * Without the rollback, the Connection close will result into an ", "\t * exception if there is a pending transaction on that Connection.", "\t * 2)Clean up the statement table " ], "header": "@@ -336,7 +333,12 @@ class Database", "removed": [ "\t * Close the connection and clean up the statement table" ] } ] }, { "file": "java/engine/org/apache/derby/jdbc/EmbedXAConnection.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.jdbc.BrokeredConnectionControl;" ], "header": "@@ -22,6 +22,7 @@", "removed": [] } ] } ]
derby-DERBY-4054-b514f88e
DERBY-4054 ClobReclamationTest.xtestMultiThreadedUpdateTableLocking() demonstrates the problem. This does not fix the issue. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@743820 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4055-921b2648
DERBY-4059 If space reclamation cannot obtain container lock it will not retry getting the lock This change does make the change so that the retry occurs but in practical application it is unlikely that we will get the lock after three retries, because the retry happens so quickly. See https://issues.apache.org/jira/browse/DERBY-4055?focusedCommentId=12673436#action_12673436 for suggestions on long term solutions to the retry problem. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@745982 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/store/raw/data/ReclaimSpaceHelper.java", "hunks": [ { "added": [ "import org.apache.derby.shared.common.reference.SQLState;" ], "header": "@@ -42,6 +42,7 @@ import org.apache.derby.iapi.store.raw.Transaction;", "removed": [] }, { "added": [ "\t\t\t\t// it is however, unlikely that three tries will be ", "\t\t\t\t// enough because there is no delay between retries.", "\t\t\t\t// See DERBY-4059 and DERBY-4055 for details." ], "header": "@@ -259,6 +260,9 @@ public class ReclaimSpaceHelper", "removed": [] }, { "added": [ "\t *\tOpen container shared no wait", "\t *", "\t * @param tran Transaction", "\t * @param rlock LockingPolicy", "\t * @param containerId container id.", "\t * ", "\t * @return ContainerHandle or null if it could not obtain lock.", "\t * ", "\t * @throws StandardException", "\t\tContainerHandle containerHdl = null;", "\t\ttry {", "\t\t\t\tcontainerHdl = tran.openContainer", "\t\t\t\t(containerId, rlock,", "\t\t\t\t\t\tContainerHandle.MODE_FORUPDATE |", "\t\t\t\t\t\tContainerHandle.MODE_LOCK_NOWAIT); ", "\t\t} catch (StandardException se) {", "\t\t\t// DERBY-4059", "\t\t\t// if this is a lock timeout just return null.", "\t\t\t// otherwise throw the exception", "\t\t\tif (!se.getSQLState().equals(SQLState.LOCK_TIMEOUT)) {", "\t\t\t\tthrow se;", "\t\t\t}", "\t\t}" ], "header": "@@ -509,17 +513,34 @@ public class ReclaimSpaceHelper", "removed": [ "\t\tOpen container shared no wait", "\t\tContainerHandle containerHdl = tran.openContainer", "\t\t\t(containerId, rlock,", "\t\t\t ContainerHandle.MODE_FORUPDATE |", "\t\t\t ContainerHandle.MODE_LOCK_NOWAIT); ", "" ] } ] } ]
derby-DERBY-4055-ad8fd168
DERBY-4055 Test case to reproduce part of the issue pertaining to not getting the row lock. To reproduce enable ClobReclamationTest.xtestMultiThreadUpdateSingleRow(). This does not fix the issue. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@743867 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4057-1bd9cd27
DERBY-4057 Space is not reclaimed if transaction is rolled back a few changes to make the ClobReclamationTest better behaved across a variety of platforms. The test tries to measure background thread behavior, and needs to allow for machines/jvms that might be single threaded and slow. In one case added a sleep after all aborts to allow the background thread to catch up and mark all pages but head one free. In other case added more to the fudge factor of how much immediate insert might progress before background thread can catch up and make free page available. If this does not work may just need to add sleep after every insert. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1641526 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4057-2678c360
DERBY-4057 Removing one check in test that is failing across platforms. Separate work under DERBY-6775 will improve the test in this area. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1643463 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4057-2cd0cc18
DERBY-4057 Space is not reclaimed if transaction is rolled back Another change to try and get test to behave well across all platforms. Still encountering some issues that I believe are because background work piles up on slow machines. Trying to address errors encountered in nightly tests: http://people.apache.org/~myrnavl/derby_test_results/main/windows/testlog/ibm16/1643981-suites.All_diff.txt http://download.java.net/javadesktop/derby/request_5600553/javadb-task-3984530.html git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1644145 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4057-888d12ea
DERBY-4057 Space is not reclaimed if transaction is rolled back Added infrastructure called at insert abort time to queue post abort work. After the abort work is queued in the case of aborted inserts to reclaim space and if possible mark pages free, which then in turn allows them to be used by subsequent work on the table. This work queues this work on heap tables when the aborted insert is the last row on a page, or if the aborted insert is a row that contains a long column (a row that is bigger than a page - usually a blob or clob), or a long row (a row that spans multiple pages). git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1641418 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/store/access/conglomerate/ConglomerateFactory.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.store.access.AccessFactory;", "import org.apache.derby.iapi.store.raw.PageKey;" ], "header": "@@ -27,9 +27,11 @@ import org.apache.derby.catalog.UUID;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/iapi/store/access/conglomerate/TransactionManager.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.store.raw.ContainerKey;" ], "header": "@@ -21,6 +21,7 @@", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/iapi/store/raw/Page.java", "hunks": [ { "added": [ "/usr/bin/mksh: p4: not found" ], "header": "@@ -1,4 +1,5 @@", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/iapi/store/raw/data/DataFactory.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.store.raw.UndoHandler;" ], "header": "@@ -35,6 +35,7 @@ import org.apache.derby.iapi.store.raw.RawStoreFactory;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/store/access/RAMAccessManager.java", "hunks": [ { "added": [ " /* package */ Conglomerate conglomCacheFind(long conglomid)" ], "header": "@@ -464,9 +464,7 @@ public abstract class RAMAccessManager", "removed": [ " /* package */ Conglomerate conglomCacheFind(", " TransactionManager xact_mgr,", " long conglomid)" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/access/RAMTransaction.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.store.raw.ContainerKey;", "import org.apache.derby.iapi.store.raw.Page;", "import org.apache.derby.iapi.store.raw.PageKey;" ], "header": "@@ -64,12 +64,12 @@ import org.apache.derby.iapi.store.access.TransactionController;", "removed": [ "", "", "" ] }, { "added": [ " conglom = accessmanager.conglomCacheFind(conglomId);" ], "header": "@@ -401,7 +401,7 @@ public class RAMTransaction", "removed": [ " conglom = accessmanager.conglomCacheFind(this, conglomId);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/access/btree/index/B2IFactory.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.store.access.AccessFactory;" ], "header": "@@ -35,6 +35,7 @@ import org.apache.derby.shared.common.sanity.SanityManager;", "removed": [] }, { "added": [ "import org.apache.derby.iapi.store.raw.PageKey;", "import org.apache.derby.iapi.store.raw.Transaction;" ], "header": "@@ -44,7 +45,9 @@ import org.apache.derby.iapi.store.access.TransactionController;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/store/access/heap/HeapConglomerateFactory.java", "hunks": [ { "added": [ "import org.apache.derby.impl.store.access.conglomerate.RowPosition;", "import org.apache.derby.iapi.store.access.AccessFactory;" ], "header": "@@ -32,8 +32,10 @@ import org.apache.derby.iapi.error.StandardException;", "removed": [] }, { "added": [ "import org.apache.derby.iapi.store.raw.PageKey;", "import org.apache.derby.iapi.store.raw.Transaction;" ], "header": "@@ -42,8 +44,10 @@ import org.apache.derby.iapi.store.raw.FetchDescriptor;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/store/access/heap/HeapPostCommit.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.store.access.conglomerate.Conglomerate;" ], "header": "@@ -26,6 +26,7 @@ import org.apache.derby.iapi.services.daemon.Serviceable;", "removed": [] }, { "added": [ "import org.apache.derby.iapi.store.raw.ContainerKey;", "import org.apache.derby.iapi.store.raw.PageKey;" ], "header": "@@ -36,8 +37,10 @@ import org.apache.derby.iapi.store.access.RowUtil;", "removed": [] }, { "added": [ " private PageKey page_key = null;" ], "header": "@@ -74,9 +77,7 @@ class HeapPostCommit implements Serviceable", "removed": [ " private Heap heap = null;", " private long page_number = ContainerHandle.INVALID_PAGE_NUMBER;", "" ] }, { "added": [ " PageKey page_key)", " this.page_key = page_key; " ], "header": "@@ -84,12 +85,10 @@ class HeapPostCommit implements Serviceable", "removed": [ " Heap heap,", " long input_page_number)", " this.heap = heap; ", " this.page_number = input_page_number; " ] }, { "added": [ " * Reclaim space taken of committed deleted rows or aborted inserted rows.", " * lock. It will attempt obtain exclusive row locks on rows marked ", " * deleted, where successful those rows can be reclaimed as they must be ", " * \"committed deleted\" or \"aborted inserted\" rows." ], "header": "@@ -98,13 +97,13 @@ class HeapPostCommit implements Serviceable", "removed": [ " * Reclaim space taken up by committed deleted rows.", " * lock. It will attempt obtain exclusive row locks on deleted rows, where", " * successful those rows can be reclaimed as they must be \"committed ", " * deleted\" rows." ] }, { "added": [ "" ], "header": "@@ -127,6 +126,7 @@ class HeapPostCommit implements Serviceable", "removed": [] }, { "added": [ " \"Calling Heap removePage().; pagenumber=\" +", " pageno + \"\\n\");" ], "header": "@@ -213,7 +213,8 @@ class HeapPostCommit implements Serviceable", "removed": [ " \"Calling Heap removePage().; pagenumber=\"+pageno+\"\\n\");" ] }, { "added": [ " // if does not exist will throw exception, which the code will ", " // handle in the same way as it does heap.open failing if trying ", " // to open a dropped container.", "", " Conglomerate conglom = ", " internal_xact.findExistingConglomerateFromKey(", " page_key.getContainerId());", "", " if (SanityManager.DEBUG)", " {", " // This code can only handle Heap conglomerates.", " SanityManager.ASSERT(conglom instanceof Heap,", " \"Code expecting PageKey/ContainerKey of a Heap\");", " }", "", " Heap heap = (Heap) conglom;", "" ], "header": "@@ -328,6 +329,23 @@ class HeapPostCommit implements Serviceable", "removed": [] }, { "added": [ " purgeCommittedDeletes(", " heapcontroller, this.page_key.getPageNumber());", " // dropped or the lock was not granted.", "\t\t\t// If this exception is because lock could not be obtained, " ], "header": "@@ -346,19 +364,20 @@ class HeapPostCommit implements Serviceable", "removed": [ " purgeCommittedDeletes(heapcontroller, this.page_number);", " // dropper or the lock was not granted.", "\t\t\t// If this expcetion is because lock could not be obtained, " ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/RawStore.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.store.raw.UndoHandler;" ], "header": "@@ -48,6 +48,7 @@ import org.apache.derby.iapi.store.access.FileResource;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/data/BaseDataFileFactory.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.store.raw.PageKey;", "import org.apache.derby.iapi.store.raw.UndoHandler;" ], "header": "@@ -56,9 +56,11 @@ import org.apache.derby.iapi.store.raw.ContainerHandle;", "removed": [] }, { "added": [ "" ], "header": "@@ -73,6 +75,7 @@ import org.apache.derby.iapi.reference.Attribute;", "removed": [] }, { "added": [ "", " // Class to use to notify upon undo of deletes", " private UndoHandler undo_handler = null;", "" ], "header": "@@ -226,6 +229,10 @@ public class BaseDataFileFactory", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/data/BasePage.java", "hunks": [ { "added": [ "\t/** @see Page#getPageIdentity */", "\tpublic final PageKey getPageKey() {", "\t\tif (SanityManager.DEBUG) {", "\t\t\tSanityManager.ASSERT(isLatched(), \"page is not latched.\");", "\t\t\tSanityManager.ASSERT(identity != null, \"identity is null.\");", "\t\t}", "", "\t\treturn identity;", "\t}", "" ], "header": "@@ -353,6 +353,16 @@ abstract class BasePage implements Page, Observer, TypedFormat", "removed": [] }, { "added": [ " * Also used by access methods after undo of an insert.", " * <p>" ], "header": "@@ -1462,6 +1472,8 @@ abstract class BasePage implements Page, Observer, TypedFormat", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/data/InsertOperation.java", "hunks": [ { "added": [ " RawTransaction rxact = (RawTransaction)xact;", "" ], "header": "@@ -207,11 +207,12 @@ public final class InsertOperation extends LogicalPageOperation", "removed": [ "\t\t\tRawTransaction rxact = (RawTransaction)xact;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/xact/Xact.java", "hunks": [ { "added": [ "\tprotected List<Serviceable> postCommitWorks; // a list of post commit work", "\tprotected List<Serviceable> postAbortWorks; // a list of post abort work", "\tprotected List<Serviceable> postTerminationWorks; // work to be done after", "\t\t\t\t\t\t\t\t\t\t\t\t // transaction terminates,", "\t\t\t\t\t\t\t\t\t\t\t\t // commit or abort", " " ], "header": "@@ -221,10 +221,12 @@ public class Xact extends RawTransaction implements Limit, LockOwner {", "removed": [ "\tprotected List<Serviceable> \t\tpostCommitWorks; // a list of post commit work", "\tprotected List<Serviceable>\t\t postTerminationWorks; // work to be done after", "\t\t\t\t\t\t\t\t\t\t\t\t // transaction terminates,", "\t\t\t\t\t\t\t\t\t\t\t\t // commit or abort" ] }, { "added": [ " " ], "header": "@@ -1023,6 +1025,7 @@ public class Xact extends RawTransaction implements Limit, LockOwner {", "removed": [] }, { "added": [ "", "\t/**", "\t\tAdd to the list of post abort work that may be processed after this", "\t\ttransaction aborts. ", "", "\t\t@param work the post commit work that is added", "\t*/", "\tpublic void addPostAbortWork(Serviceable work)", "\t{", "\t\tif (recoveryTransaction)", "\t\t\treturn;", "", "\t\tif (postAbortWorks == null)", "\t\t\tpostAbortWorks = new ArrayList<Serviceable>(1);", "\t\tpostAbortWorks.add(work);", "\t}", "" ], "header": "@@ -1274,11 +1277,28 @@ public class Xact extends RawTransaction implements Limit, LockOwner {", "removed": [] }, { "added": [ " private void transferPostCommitorAbortWork(List <Serviceable> work_list)", " throws StandardException", " {", "\t\tif (work_list != null && !work_list.isEmpty())", "\t\t\tint pcsize = work_list.size();" ], "header": "@@ -2045,26 +2065,16 @@ public class Xact extends RawTransaction implements Limit, LockOwner {", "removed": [ "", "\tprivate final void postTermination() throws StandardException", "\t{", "\t\t// move all the postTermination work to the postCommit queue", "\t\tint count = (postTerminationWorks == null) ? ", "\t\t\t0 : postTerminationWorks.size(); ", "", "\t\tfor (int i = 0; i < count; i++)", "\t\t\taddPostCommitWork(postTerminationWorks.get(i));", "", "\t\tif (count > 0)", "\t\t\tpostTerminationWorks.clear();", "", "\t\tif (postCommitWorks != null && !postCommitWorks.isEmpty())", "\t\t\tint pcsize = postCommitWorks.size();" ] }, { "added": [ "\t\t\t\t\t// to avoid confusion, copy the post commit or abort work ", " // to an array if this is going to do some work now", "\t\t\t\t\twork = (Serviceable[])work_list.toArray(work);", "\t\t\t\t\t// clear this for post commit or abort processing to queue ", " // its own post commit works - when it commits, it will ", " // send all its post commit request to the daemon instead ", " // of dealing with it here.", "\t\t\t\t\twork_list.clear();", "\t\t\t\t\t//All the post commit or abort work that is part of the ", " //database creation should be done on this thread ", " //immediately.", "\t\t\t\t\tboolean doWorkInThisThread = ", " xactFactory.inDatabaseCreation();", "\t\t\t\t\t\t//All the other work should be submitted to the post ", " //commit thread to be processed asynchronously", "", "\t\t\t\t\t\t\t\t// added. when that transaction commits, that", "\t\t\t\t\t\t\t\tif (work[i].performWork(xc.getContextManager())", " == Serviceable.DONE)", " {", " }" ], "header": "@@ -2073,36 +2083,43 @@ public class Xact extends RawTransaction implements Limit, LockOwner {", "removed": [ "\t\t\t\t\t// to avoid confusion, copy the post commit work to an array if this", "\t\t\t\t\t// is going to do some work now", "\t\t\t\t\twork = (Serviceable[])postCommitWorks.toArray(work);", "\t\t\t\t\t// clear this for post commit processing to queue its own post", "\t\t\t\t\t// commit works - when it commits, it will send all its post", "\t\t\t\t\t// commit request to the daemon instead of dealing with it here.", "\t\t\t\t\tpostCommitWorks.clear();", "\t\t\t\t\t//All the post commit work that is part of the database creation", "\t\t\t\t\t//should be done on this thread immediately.", "\t\t\t\t\tboolean doWorkInThisThread = xactFactory.inDatabaseCreation();", "\t\t\t\t\t\t//All the other work should be submitted ", "\t\t\t\t\t\t//to the post commit thread to be processed asynchronously", "\t\t\t\t\t\t\t\t// added. when that transaction commits, those", "\t\t\t\t\t\t\t\tif (work[i].performWork(xc.getContextManager()) == Serviceable.DONE)" ] }, { "added": [ "\t\t\t\t\t\t\t\t// try to handle it here. ", " // If we fail, then let the error percolate.", "" ], "header": "@@ -2112,7 +2129,9 @@ public class Xact extends RawTransaction implements Limit, LockOwner {", "removed": [ "\t\t\t\t\t\t\t\t// try to handle it here. If we fail, then let the error percolate." ] }, { "added": [ "\t\t\t\t\t\t// Servicable is well mannered, it can change itself ", " // from serviceASAP to not serviceASAP if it returns ", " // REQUEUE.", "", "\t\t\t\t\t\t\tboolean needHelp = ", " xactFactory.submitPostCommitWork(work[i]);", "" ], "header": "@@ -2121,11 +2140,15 @@ public class Xact extends RawTransaction implements Limit, LockOwner {", "removed": [ "\t\t\t\t\t\t// Servicable is well mannered, it can change itself from", "\t\t\t\t\t\t// serviceASAP to not serviceASAP if it returns REQUEUE.", "\t\t\t\t\t\t\tboolean needHelp = xactFactory.submitPostCommitWork(work[i]);" ] }, { "added": [ "\t\t\t\t\tif (work_list != null)", "\t\t\t\t\t\twork_list.clear();" ], "header": "@@ -2137,8 +2160,8 @@ public class Xact extends RawTransaction implements Limit, LockOwner {", "removed": [ "\t\t\t\t\tif (postCommitWorks != null)", "\t\t\t\t\t\tpostCommitWorks.clear();" ] } ] } ]
derby-DERBY-4057-dbde907f
DERBY-6774 org.apache.derbyTesting.functionTests.tests.lang.AlterTableTest.testAddIdentityColumn failed with assert in nightlys Temporarily removing assert that is failing, looks like code should handle the condition anyway. Will renable after figuring out what is going on, hoping this will allow for clean runs from others while I work on this issue. My current theory is that there is a long time problem with alter table and the conglomerate cache that has been uncovered by this relatively new test and the new background work introduced by DERBY-4057. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1641753 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/store/access/heap/Heap.java", "hunks": [ { "added": [ " // DERBY-6774 - temp disabling this ASSERT while working on", " // why it is firing in trunk. The hope is that trunk tests", " // will pass for others while I work on fixing DERBY-6774.", " //", " // The code always adds the column after the last field even", " // if there is a mismatch with what is requested. The store", " // only supports adding a column at the end anyway.", " //", " // I believe the issue is some sort of timing with background", " // threads and alter table and the conglomerate cache. The", " // conglomerate cache was created assuming the data was static,", " // but in the case of alter table add and drop column this is", " // not true. There are some attempts in the access layer to", " // invalidate the cache after an abort of a alter table, but", " // I think there is still a race condition.", "", " /*" ], "header": "@@ -416,6 +416,23 @@ public class Heap", "removed": [] }, { "added": [ " */" ], "header": "@@ -430,6 +447,7 @@ public class Heap", "removed": [] } ] } ]
derby-DERBY-4057-f56c601d
DERBY-4057 Add disabled test case (xtestReclamationOnRollback() ) for the issue that space is not reclaimed on rollback.This does not fix the issue git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@744169 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4060-aeb43258
DERBY-4060: Blob.getBinaryStream(long,long) is off by one for the pos+len check. Changed the pos/length checks to allow obtaining a stream reading the last byte/char from the LOB. The JavaDoc for Blob.getBinaryStream(long,long) and Clob.getCharacterStream(long,long) (JDBC 4.0) incorrectly states that the position plus the requested length of the stream cannot be larger than the length of the LOB. Since positions in JDBC are 1-based, this makes it impossible to read the last byte/char in the LOB. Derby adhered to the spec. The changes to CharAlphabet/LoopingAlphabetReader were done to allow passing an alphabet object around for constructing streams. Patch file: derby-4060-1b-sub_stream_fix.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@749235 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/Lob.java", "hunks": [ { "added": [ " * d) (pos -1) + length > (length of LOB)" ], "header": "@@ -317,7 +317,7 @@ public abstract class Lob implements UnitOfWorkListener {", "removed": [ " * d) pos + length > (length of LOB)" ] } ] }, { "file": "java/testing/org/apache/derbyTesting/functionTests/util/streams/LoopingAlphabetReader.java", "hunks": [ { "added": [ " this.alphabet = alphabet.getClone();" ], "header": "@@ -112,7 +112,7 @@ public class LoopingAlphabetReader", "removed": [ " this.alphabet = alphabet;" ] } ] } ]
derby-DERBY-4061-28365b04
DERBY-4061:InputStream returned from Blob.getBinaryStream(long, long) terminates the stream by returning 0, should return -1. Fixed bug where calling code would enter infinite loop because 0 was returned instead of -1 (EOF). The problem occurred because the actual number of bytes to read was calculated and the case where zero bytes remained wasn't handled properly. Patch file: derby-4061-1b.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@746236 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/jdbc/UpdatableBlobStream.java", "hunks": [ { "added": [ " long remaining = maxPos - pos;", " // Return EOF if the maximum allowed position has been reached,", " // and we're trying to read at least one byte.", " if (remaining == 0 && len > 0) {", " return -1;", " }", " int actualLength = (int) Math.min(len, remaining);" ], "header": "@@ -174,7 +174,13 @@ class UpdatableBlobStream extends InputStream {", "removed": [ " int actualLength = (int) Math.min(len, maxPos - pos);" ] } ] } ]
derby-DERBY-4066-356155c5
DERBY-4066: Add test cases for large (>1M) LOB inout args. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@984916 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4066-75a4806f
DERBY-4066: Allow LOBs as function/procedure input arguments and return values. LOBs still can't be used as output and inout arguments to procedures. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@982936 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/types/DataType.java", "hunks": [ { "added": [ "import java.sql.Blob;", "import java.sql.Clob;" ], "header": "@@ -34,6 +34,8 @@ import org.apache.derby.iapi.services.sanity.SanityManager;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/iapi/types/DataValueDescriptor.java", "hunks": [ { "added": [ "import java.sql.Blob;", "import java.sql.Clob;" ], "header": "@@ -29,6 +29,8 @@ import org.apache.derby.iapi.services.io.Storable;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/CreateAliasNode.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.reference.JDBC30Translation;" ], "header": "@@ -32,6 +32,7 @@ import org.apache.derby.iapi.types.StringDataValue;", "removed": [] } ] } ]
derby-DERBY-4066-94df7fbd
DERBY-4066: Allow LOBs as OUT/INOUT procedure arguments. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@984393 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/net/NetCursor.java", "hunks": [ { "added": [ " if ( netResultSet_ != null ) { netResultSet_.markLOBAsPublished(column); }" ], "header": "@@ -1083,7 +1083,7 @@ public class NetCursor extends org.apache.derby.client.am.Cursor {", "removed": [ " netResultSet_.markLOBAsPublished(column);" ] } ] }, { "file": "java/drda/org/apache/derby/impl/drda/DRDAConnThread.java", "hunks": [ { "added": [ " writer.endDdm();", " writer.endDdmAndDss();", "", " if (stmt.getExtDtaObjects() != null)", " {", " // writeScalarStream() ends the dss", " writeEXTDTA(stmt);", " }" ], "header": "@@ -4179,8 +4179,14 @@ class DRDAConnThread extends Thread {", "removed": [ "\t\t\t\twriter.endDdm();", "\t\t\t\twriter.endDdmAndDss();" ] }, { "added": [ "" ], "header": "@@ -4195,7 +4201,7 @@ class DRDAConnThread extends Thread {", "removed": [ "\t\t" ] }, { "added": [ "\t\t " ], "header": "@@ -4238,7 +4244,7 @@ class DRDAConnThread extends Thread {", "removed": [ "\t\t" ] }, { "added": [ "" ], "header": "@@ -6897,7 +6903,7 @@ class DRDAConnThread extends Thread {", "removed": [ "\t\t" ] }, { "added": [ " case DRDAConstants.DRDA_TYPE_NLOBBYTES:", " case DRDAConstants.DRDA_TYPE_NLOBCMIXED:", " return EXTDTAInputStream.getEXTDTAStream(cs, index, drdaType);" ], "header": "@@ -7304,6 +7310,9 @@ class DRDAConnThread extends Thread {", "removed": [] }, { "added": [ "" ], "header": "@@ -7922,7 +7931,7 @@ class DRDAConnThread extends Thread {", "removed": [ "\t\t\t\t " ] } ] }, { "file": "java/drda/org/apache/derby/impl/drda/EXTDTAInputStream.java", "hunks": [ { "added": [ "import java.sql.CallableStatement;" ], "header": "@@ -24,6 +24,7 @@ import java.io.IOException;", "removed": [] }, { "added": [ " /** DRDA Type of column/parameter */", " int ndrdaType;", "", " //", " // Used when this class wraps a ResultSet", " //", "", " //", " // Used when this class wraps a CallableStatement", " //", " private Clob _clob;", " private Blob _blob;", " " ], "header": "@@ -48,13 +49,24 @@ class EXTDTAInputStream extends InputStream {", "removed": [ " /** DRDA Type of column */", " int ndrdaType;" ] }, { "added": [ "\tprivate EXTDTAInputStream(Clob clob, int ndrdaType ) ", " {", " _clob = clob;", " this.ndrdaType = ndrdaType;", " }", "", "\tprivate EXTDTAInputStream(Blob blob, int ndrdaType ) ", " {", " _blob = blob;", " this.ndrdaType = ndrdaType;", " }", "" ], "header": "@@ -67,6 +79,18 @@ class EXTDTAInputStream extends InputStream {", "removed": [] }, { "added": [ "\t/**", "\t * Create a new EXTDTAInputStream from a CallableStatement.", "\t * ", "\t * ", "\t * @param cs", "\t * CallableStatement from which to retrieve the lob", "\t * @param column", "\t * column number", "\t * @param drdaType", "\t * FD:OCA type of object one of", "\t * \t\t\t DRDAConstants.DRDA_TYPE_NLOBBYTES", "\t * \t\t\t DRDAConstants.DRDA_TYPE_LOBBYTES", "\t * \t\t\t DRDAConstants.DRDA_TYPE_NLOBCMIXED", "\t * \t\t DRDAConstants.DRDA_TYPE_LOBCMIXED", "\t */", "\tpublic static EXTDTAInputStream getEXTDTAStream(CallableStatement cs, int column, int drdaType)", " throws SQLException", " {", " \t ", "\t\tint ndrdaType = drdaType | 1; //nullable drdaType", "", " switch ( ndrdaType )", " {", " case DRDAConstants.DRDA_TYPE_NLOBBYTES:", " return new EXTDTAInputStream( cs.getBlob( column ), ndrdaType );", " case DRDAConstants.DRDA_TYPE_NLOBCMIXED:", " return new EXTDTAInputStream( cs.getClob( column ), ndrdaType );", " default:", " badDRDAType( ndrdaType );", "\t\t\treturn null;", " }", "\t}", "" ], "header": "@@ -102,6 +126,39 @@ class EXTDTAInputStream extends InputStream {", "removed": [] }, { "added": [ " protected boolean isEmptyStream() throws SQLException", " {", " return (length() == 0);", " }", " private long length() throws SQLException", " {", " if ( rs != null ) { return rs.getLength(columnNumber); }", " else if ( _clob != null ) { return _clob.length(); }", " else { return _blob.length(); }", " }" ], "header": "@@ -212,10 +269,16 @@ class EXTDTAInputStream extends InputStream {", "removed": [ " protected boolean isEmptyStream() throws SQLException{", " return (rs.getLength(columnNumber) == 0);", " ", " }" ] }, { "added": [ "\t is = getBinaryStream();", "\t if (is == null) { return; }", "\t r = getCharacterStream();" ], "header": "@@ -235,16 +298,15 @@ class EXTDTAInputStream extends InputStream {", "removed": [ "\t is = this.rs.getBinaryStream(this.columnNumber);", "\t if (is == null) ", " return;", "\t r = this.rs.getCharacterStream(this.columnNumber);" ] }, { "added": [ " else { badDRDAType( ndrdaType ); }", " private InputStream getBinaryStream() throws SQLException", " {", " if ( rs != null ) { return rs.getBinaryStream(this.columnNumber); }", " else { return _blob.getBinaryStream(); }", " }", " private Reader getCharacterStream() throws SQLException", " {", " if ( rs != null ) { return rs.getCharacterStream(this.columnNumber); }", " else { return _clob.getCharacterStream(); }", " }", " private static void badDRDAType( int drdaType )", " {", " if (SanityManager.DEBUG)", " {", " SanityManager.THROWASSERT(\"NDRDAType: \" + drdaType +", " \" not valid EXTDTA object type\");", " }", " }" ], "header": "@@ -261,20 +323,31 @@ class EXTDTAInputStream extends InputStream {", "removed": [ "\t else", "\t\t{", "\t\t if (SanityManager.DEBUG)", "\t\t\t{", "\t\t\t SanityManager.THROWASSERT(\"NDRDAType: \" + ndrdaType +", "\t\t\t\t\t\t \" not valid EXTDTA object type\");", "\t\t\t}", "\t\t}" ] }, { "added": [ " * EngineResultSet or LOB, so that it can be determined before the stream" ], "header": "@@ -283,7 +356,7 @@ class EXTDTAInputStream extends InputStream {", "removed": [ " * EngineResultSet, so that it can be determined before the stream" ] } ] } ]
derby-DERBY-4067-b3bfe123
DERBY-4067: ClientConnectionPoolDataSource.getPooledConnection and ClientXADataSource.getXAConnection ignore connection attributes DERBY-2468: would be nice if traceFile=filename connection attribute would be supported with ClientConnectionPoolDataSource and ClientXADataSource Made two changes: - moved the parsing of the connection attribute string to before the log writer is constructed, since the construction may depend on some attributes specified in the connection attribute string - added parsing of the connection attribute string to the client ConnectionPool and XA data sources Also re-enabled 'testClientMessageTextConnectionAttribute' and removed a work-around for the issue fixed by this commit. Patch file: derby-4067-1a-update_attrs.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@965793 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/jdbc/ClientConnectionPoolDataSource.java", "hunks": [ { "added": [ " updateDataSourceValues(", " tokenizeAttributes(getConnectionAttributes(), null));" ], "header": "@@ -74,6 +74,8 @@ public class ClientConnectionPoolDataSource extends ClientDataSource", "removed": [] } ] }, { "file": "java/client/org/apache/derby/jdbc/ClientDataSource.java", "hunks": [ { "added": [], "header": "@@ -25,12 +25,9 @@ import java.sql.Connection;", "removed": [ "import org.apache.derby.client.am.ClientMessageId;", "import org.apache.derby.client.net.NetConnection;", "import org.apache.derby.shared.common.error.ExceptionUtil;" ] }, { "added": [ " LogWriter dncLogWriter = null;", " try {", " updateDataSourceValues(", " tokenizeAttributes(getConnectionAttributes(), null));", " dncLogWriter = super.computeDncLogWriterForNewConnection(\"_sds\");", " return getConnectionX(dncLogWriter, getUser(), getPassword());", " } catch (SqlException se) {", " // The method below may throw an exception.", " handleConnectionException(dncLogWriter, se);", " // If the exception wasn't handled so far, re-throw it.", " throw se.getSQLException();", " }" ], "header": "@@ -162,7 +159,18 @@ public class ClientDataSource extends ClientBaseDataSource implements DataSource", "removed": [ " return getConnection(getUser(), getPassword());" ] }, { "added": [ " updateDataSourceValues(", " tokenizeAttributes(getConnectionAttributes(), null));", " return getConnectionX(dncLogWriter, user, password);" ], "header": "@@ -184,11 +192,10 @@ public class ClientDataSource extends ClientBaseDataSource implements DataSource", "removed": [ " updateDataSourceValues(tokenizeAttributes(getConnectionAttributes(), null));", " return ClientDriver.getFactory().newNetConnection", " ((NetLogWriter) dncLogWriter, user,", " password, this, -1, false);" ] } ] }, { "file": "java/client/org/apache/derby/jdbc/ClientXADataSource.java", "hunks": [ { "added": [], "header": "@@ -22,11 +22,9 @@", "removed": [ "import javax.sql.DataSource;", "import org.apache.derby.client.ClientXAConnection;" ] } ] } ]
derby-DERBY-4071-c00561a1
DERBY-4071 AssertFailure when selecting rows from a table with CHARACTER and VARCHAR columns Patch derby-4071 which fixes this issue. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@754579 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/GroupByNode.java", "hunks": [ { "added": [ "\t * In the query rewrite for group by, add the columns on which we are doing", "\t * the group by.", "\t *", "\t * @return havingRefsToSubstitute visitors array. Return any", "\t * havingRefsToSubstitute visitors since it is too early to apply", "\t * them yet; we need the AggregateNodes unmodified until after", "\t * we add the new columns for aggregation (DERBY-4071).", "\t *", "\tprivate ArrayList addUnAggColumns() throws StandardException" ], "header": "@@ -336,12 +336,17 @@ public class GroupByNode extends SingleChildResultSetNode", "removed": [ "\t * In the query rewrite for group by, add the columns on which", "\t * we are doing the group by.", "", "\tprivate void addUnAggColumns() throws StandardException" ] }, { "added": [ "\t\t\t// DERBY-4071 Don't substitute quite yet; we need the AggrateNodes", "\t\t\t// undisturbed until after we have had the chance to build the", "\t\t\t// other columns. (The AggrateNodes are shared via an alias from", "\t\t\t// aggregateVector and from the expression tree under", "\t\t\t// havingClause).", "\t\t}", "\t\treturn havingRefsToSubstitute;" ], "header": "@@ -452,10 +457,13 @@ public class GroupByNode extends SingleChildResultSetNode", "removed": [ "\t\t\tfor (int r = 0; r < havingRefsToSubstitute.size(); r++)", "\t\t\t\thavingClause.accept(", "\t\t\t\t\t(SubstituteExpressionVisitor)havingRefsToSubstitute.get(r));", "}" ] }, { "added": [ "\t\tArrayList havingRefsToSubstitute = null;", "", "\t\t\thavingRefsToSubstitute = addUnAggColumns();", "", "\t\taddAggregateColumns();", "", "", "\t\t\t// Now do the substitution of the group by expressions in the", "\t\t\t// having clause.", "\t\t\tif (havingRefsToSubstitute != null) {", "\t\t\t\tfor (int r = 0; r < havingRefsToSubstitute.size(); r++) {", "\t\t\t\t\thavingClause.accept(", "\t\t\t\t\t\t(SubstituteExpressionVisitor)havingRefsToSubstitute.get(r));", "\t\t\t\t}", "\t\t\t}", "" ], "header": "@@ -535,11 +543,26 @@ public class GroupByNode extends SingleChildResultSetNode", "removed": [ "\t\t\taddUnAggColumns();" ] }, { "added": [ "" ], "header": "@@ -565,7 +588,7 @@ public class GroupByNode extends SingleChildResultSetNode", "removed": [ "\t\taddAggregateColumns();" ] } ] } ]
derby-DERBY-4072-beac4f39
DERBY-4072 Improve error message if log files are not writable. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@749410 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/store/raw/log/LogToFile.java", "hunks": [ { "added": [ "\t\t\t\t\tIOException accessException = null;" ], "header": "@@ -982,7 +982,7 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport", "removed": [ "" ] }, { "added": [ "\t\t\t\t\t\taccessException = ioe;" ], "header": "@@ -990,6 +990,7 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport", "removed": [] }, { "added": [ "\t\t\t\t\t\tMonitor.logTextMessage(MessageId.LOG_CHANGED_DB_TO_READ_ONLY);", "\t\t\t\t\t\tif (accessException != null)", "\t\t\t\t\t\t\tMonitor.logThrowable(accessException);" ], "header": "@@ -998,7 +999,9 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport", "removed": [ "" ] }, { "added": [ "\t\t\t\t\t\tIOException accessException = null;" ], "header": "@@ -1068,6 +1071,7 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport", "removed": [] }, { "added": [ " accessException = ioe;", "\t\t\t\t\t\t\tMonitor.logTextMessage(MessageId.LOG_CHANGED_DB_TO_READ_ONLY);", "\t\t\t\t\t\t\tif (accessException != null)", "\t\t\t\t\t\t\t\tMonitor.logThrowable(accessException);\t", "\t\t\t\t\t\t\t\t\t\t\t" ], "header": "@@ -1078,14 +1082,18 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport", "removed": [ "" ] } ] }, { "file": "java/shared/org/apache/derby/shared/common/reference/MessageId.java", "hunks": [ { "added": [ " String LOG_CHANGED_DB_TO_READ_ONLY = \"L022\"; // a permissions error on log caused us", " \t\t\t\t\t\t\t\t\t\t\t\t\t// to change db to read only.", " " ], "header": "@@ -63,6 +63,9 @@ public interface MessageId {", "removed": [] } ] } ]
derby-DERBY-4073-a7ab05c3
DERBY-4073: Creation/configuration of ClientXDataSource fails because of two setSsl methods. Removed the method setSsl(int). Added constants for the valid values for setSsl(String). Documented setSsl(String) and some other methods. Patch file: derby-4073-1a-add_docs_and_remove_setSsl_int.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@753176 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/jdbc/ClientBaseDataSource.java", "hunks": [ { "added": [ " /** The constant indicating that SSL encryption won't be used. */", " private final static String SSL_OFF_STR = \"off\";", " /** The constant indicating that SSL encryption will be used. */", " private final static String SSL_BASIC_STR = \"basic\";", " /**", " * The constant indicating that SSL encryption with peer authentication", " * will be used.", " */", " private final static String SSL_PEER_AUTHENTICATION_STR =", " \"peerAuthentication\";", " /**", " * Parses the string and returns the corresponding constant for the SSL", " * mode denoted.", " * <p>", " * Valid values are <tt>off</tt>, <tt>basic</tt> and", " * <tt>peerAuthentication</tt>.", " *", " * @param s string denoting the SSL mode", " * @return A constant indicating the SSL mode denoted by the string. If the", " * string is {@code null}, {@link #SSL_OFF} is returned.", " * @throws SqlException if the string has an invalid value", " */", "\t\t\tif (s.equalsIgnoreCase(SSL_OFF_STR)) {", " } else if (s.equalsIgnoreCase(SSL_BASIC_STR)) {", "\t\t\t} else if (s.equalsIgnoreCase(SSL_PEER_AUTHENTICATION_STR)) {", " throw new SqlException(null,", " new ClientMessageId(SQLState.INVALID_ATTRIBUTE),", " Attribute.SSL_ATTR, s, SSL_OFF_STR + \", \" +", " SSL_BASIC_STR + \", \" + SSL_PEER_AUTHENTICATION_STR);" ], "header": "@@ -173,25 +173,48 @@ public abstract class ClientBaseDataSource implements Serializable, Referenceabl", "removed": [ "\t\t\tif (s.equalsIgnoreCase(\"off\")) {", " } else if (s.equalsIgnoreCase(\"basic\")) {", "\t\t\t} else if (s.equalsIgnoreCase(\"peerAuthentication\")) {", " throw new SqlException(null, ", " new ClientMessageId(SQLState.INVALID_ATTRIBUTE),", " Attribute.SSL_ATTR, s, \"off, basic, peerAuthentication\");" ] }, { "added": [ " /**", " * Returns the SSL mode specified by the property object.", " *", " * @param properties data source properties", " * @return A constant indicating the SSL mode to use. Defaults to", " * {@link #SSL_OFF} if the SSL attribute isn't specified.", " * @throws SqlException if an invalid value for the SSL mode is specified", " * in the property object", " */" ], "header": "@@ -199,6 +222,15 @@ public abstract class ClientBaseDataSource implements Serializable, Referenceabl", "removed": [] }, { "added": [ " /**", " * Specifices the SSL encryption mode to use.", " * <p>", " * Valid values are <tt>off</tt>, <tt>basic</tt> and", " * <tt>peerAuthentication</tt>.", " *", " * @param mode the SSL mode to use (<tt>off</tt>, <tt>basic</tt> or", " * <tt>peerAuthentication</tt>)", " * @throws SqlException if the specified mode is invalid", " */", " /**", " * Returns the SSL encryption mode specified for the data source.", " *", " * @return <tt>off</tt>, <tt>basic</tt> or <tt>peerAuthentication</tt>.", " */", " return SSL_OFF_STR;", " return SSL_BASIC_STR;", " return SSL_PEER_AUTHENTICATION_STR;" ], "header": "@@ -877,25 +909,36 @@ public abstract class ClientBaseDataSource implements Serializable, Referenceabl", "removed": [ " public void setSsl(int mode) {", " sslMode = mode;", " }", "", " return \"off\";", " return \"basic\";", " return \"peerAuthentication\";" ] }, { "added": [ " sslMode = getClientSSLMode(prop);" ], "header": "@@ -1106,7 +1149,7 @@ public abstract class ClientBaseDataSource implements Serializable, Referenceabl", "removed": [ " setSsl(getClientSSLMode(prop));" ] } ] } ]
derby-DERBY-4079-69a192ae
DERBY-4079 Add support for SQL:2008 <result offset clause> and <fetch first clause> to limit result set cardinality Added this new feature, corresponding to patch derby-4079-3. Documentation is committed separately. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@754558 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/CursorNode.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.types.DataValueDescriptor;" ], "header": "@@ -34,6 +34,7 @@ import org.apache.derby.iapi.sql.dictionary.DataDictionary;", "removed": [] }, { "added": [ "\tprivate NumericConstantNode offset; // <result offset clause> value", "\tprivate NumericConstantNode fetchFirst; // <fetch first clause> value" ], "header": "@@ -52,6 +53,8 @@ public class CursorNode extends DMLStatementNode", "removed": [] }, { "added": [ "\t * @param offset The value of a <result offset clause> if present", "\t * @param fetchFirst The value of a <fetch first clause> if present" ], "header": "@@ -78,6 +81,8 @@ public class CursorNode extends DMLStatementNode", "removed": [] }, { "added": [ "\t\tObject offset,", "\t\tObject fetchFirst," ], "header": "@@ -92,6 +97,8 @@ public class CursorNode extends DMLStatementNode", "removed": [] }, { "added": [ "\t\tthis.offset = (NumericConstantNode)offset;", "\t\tthis.fetchFirst = (NumericConstantNode)fetchFirst;" ], "header": "@@ -99,6 +106,8 @@ public class CursorNode extends DMLStatementNode", "removed": [] }, { "added": [ "", "\t\tbindOffsetFetch();", "" ], "header": "@@ -266,6 +275,9 @@ public class CursorNode extends DMLStatementNode", "removed": [] }, { "added": [ "", "\tprivate void bindOffsetFetch() throws StandardException {", "", "\t\tif (offset != null) {", "\t\t\tDataValueDescriptor dvd = ((ConstantNode)offset).getValue();", "\t\t\tlong val = dvd.getLong();", "", "\t\t\tif (val < 0) {", "\t\t\t\tthrow StandardException.newException(", "\t\t\t\t\tSQLState.LANG_INVALID_ROW_COUNT_OFFSET,", "\t\t\t\t\tLong.toString(val) );", "\t\t\t}", "\t\t}", "", "\t\tif (fetchFirst != null) {", "\t\t\tDataValueDescriptor dvd = ((ConstantNode)fetchFirst).getValue();", "\t\t\tlong val = dvd.getLong();", "", "\t\t\tif (val < 1) {", "\t\t\t\tthrow StandardException.newException(", "\t\t\t\t\tSQLState.LANG_INVALID_ROW_COUNT_FIRST,", "\t\t\t\t\tLong.toString(val) );", "\t\t\t}", "\t\t}", "\t}", "", "", "" ], "header": "@@ -347,6 +359,34 @@ public class CursorNode extends DMLStatementNode", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/DMLStatementNode.java", "hunks": [ { "added": [ "\t{", "\t\toptimizeStatement(null, null);", "\t}", "", "\t/**", "\t * This overload variant of optimizeStatement is used by subclass", "\t * CursorNode (as well as a minion for the no-arg variant).", "\t *", "\t * @param offset Any OFFSET row count, or null", "\t * @param fetchFirst Any FETCH FIRST row count or null", "\t *", "\t * @exception StandardException\t\tThrown on error", "\t * @see DMLStatementNode#optimizeStatement()", "\t */", "\tprotected void optimizeStatement(ValueNode offset, ValueNode fetchFirst)", "\t\t\tthrows StandardException" ], "header": "@@ -298,6 +298,22 @@ abstract class DMLStatementNode extends StatementNode", "removed": [] }, { "added": [ "\t\t// Any OFFSET/FETCH FIRST narrowing must be done *after* any rewrite of", "\t\t// the query tree (if not, underlying GROUP BY fails), but *before* the", "\t\t// final scroll insensitive result node set is added - that one needs", "\t\t// to sit on top - so now is the time.", "\t\t// ", "\t\t// This example statement fails if we wrap *before* the optimization", "\t\t// above:", "\t\t// select max(a) from t1 group by b fetch first row only", "\t\t//", "\t\t// A java.sql.ResultSet#previous on a scrollable result set will fail", "\t\t// if we don't wrap *after* the ScrollInsensitiveResultSetNode below.", "\t\t//", "\t\t// We need only wrap the RowCountNode set if at least one of the", "\t\t// clauses is present.", "\t\t", "\t\tif (offset != null || fetchFirst != null) {", "\t\t\tresultSet = wrapRowCountNode(resultSet, offset, fetchFirst);", "\t\t}", "" ], "header": "@@ -306,6 +322,25 @@ abstract class DMLStatementNode extends StatementNode", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/ResultColumn.java", "hunks": [ { "added": [ "", "\t/**", "\t * Set the column source's table name", "\t * @param t The source table name", "\t */", "\tpublic void setSourceTableName(String t) {", "\t\tsourceTableName = t;", "\t}", "", "\t/**", "\t * Set the column source's schema name", "\t * @param s The source schema name", "\t */", "\tpublic void setSourceSchemaName(String s) {", "\t\tsourceSchemaName = s;", "\t}", "" ], "header": "@@ -816,6 +816,23 @@ public class ResultColumn extends ValueNode", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/RealResultSetStatisticsFactory.java", "hunks": [ { "added": [ "import org.apache.derby.impl.sql.execute.rts.RealRowCountStatistics;" ], "header": "@@ -106,6 +106,7 @@ import org.apache.derby.impl.sql.execute.rts.RealUnionResultSetStatistics;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/ScrollInsensitiveResultSet.java", "hunks": [ { "added": [ "", "\t\tProjectRestrictResultSet prRS = null;", "", "\t\t\tprRS = (ProjectRestrictResultSet)source;", "\t\t} else if (source instanceof RowCountResultSet) {", "\t\t\t// To do any projection in the presence of an intervening", "\t\t\t// RowCountResultSet, we get its child.", "\t\t\tprRS = ((RowCountResultSet)source).getUnderlyingProjectRestrictRS();", "\t\t}", "", "\t\tif (prRS != null) {", "\t\t\tnewRow = prRS.doBaseRowProjection(row);", "" ], "header": "@@ -1101,12 +1101,22 @@ public class ScrollInsensitiveResultSet extends NoPutResultSetImpl", "removed": [ "\t\t", "\t\t\tnewRow = ((ProjectRestrictResultSet)source).", "\t\t\t\tdoBaseRowProjection(row);" ] } ] }, { "file": "java/shared/org/apache/derby/shared/common/reference/SQLState.java", "hunks": [ { "added": [ "\tString LANG_INVALID_ROW_COUNT_OFFSET = \"2201X\";", "\tString LANG_INVALID_ROW_COUNT_FIRST = \"2201W\";" ], "header": "@@ -711,7 +711,8 @@ public interface SQLState {", "removed": [ "" ] }, { "added": [ "\tString LANG_INTEGER_LITERAL_EXPECTED = \"42X20\";" ], "header": "@@ -816,6 +817,7 @@ public interface SQLState {", "removed": [] } ] } ]
derby-DERBY-4081-4f376647
DERBY-4081: BTreeController.comparePreviousRecord() may fail to release latch on left-most leaf When a new value is inserted into a nullable unique index, the rows immediately to the left and the right of the insertion point are checked for duplicates. comparePreviousRecord() checks the rows to the left of the insertion point. If the row immediately to the left is marked as deleted, it will have to go further to the left until it finds a row that is not deleted. Now this check may cross page boundaries, and if it does so, and all the rows to the left of the insertion point are deleted, comparePreviousRecord() will return NO_MATCH before the latch left-most leaf in the index has been released. This means that the left-most leaf is still latched when comparePreviousRecord() returns, which it is not supposed to be unless the new value was actually inserted into the left-most leaf. The unexpected extra latch may cause deadlocks or livelocks. This patch adds a test that runs into a livelock because of the bug, and it fixes the bug by releasing the latch on the left-most leaf before returning NO_MATCH from comparePreviousRecord(). git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@805696 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4083-571f201a
DERBY-4083: BTreeScan.delete() throws AM_RECORD_NOT_FOUND if record is found Changed the logic so that the exception is thrown if the record is not found. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@952131 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/unitTests/store/T_b2i.java", "hunks": [ { "added": [ "\t\t\t\tt_020(tc) &&", "\t\t\t\tt_021(tc)" ], "header": "@@ -173,7 +173,8 @@ public class T_b2i extends T_MultiIterations", "removed": [ "\t\t\t\tt_020(tc) " ] }, { "added": [ " boolean useUpdateLocks)", " int openmode = TransactionController.OPENMODE_FORUPDATE;", " if (useUpdateLocks) {", " openmode |= TransactionController.OPENMODE_USE_UPDATE_LOCKS;", " }", "", " openmode," ], "header": "@@ -434,17 +435,22 @@ public class T_b2i extends T_MultiIterations", "removed": [ " DataValueDescriptor[] template) ", " TransactionController.OPENMODE_FORUPDATE," ] }, { "added": [ " tc, create_ret.index_conglomid, delete_key, false))" ], "header": "@@ -1573,7 +1579,7 @@ public class T_b2i extends T_MultiIterations", "removed": [ " tc, create_ret.index_conglomid, delete_key, index_row.getRow()))" ] }, { "added": [ " tc, create_ret.index_conglomid, delete_key, false))" ], "header": "@@ -1584,7 +1590,7 @@ public class T_b2i extends T_MultiIterations", "removed": [ " tc, create_ret.index_conglomid, delete_key, index_row.getRow()))" ] }, { "added": [ " tc, create_ret.index_conglomid, delete_key, false))" ], "header": "@@ -1636,7 +1642,7 @@ public class T_b2i extends T_MultiIterations", "removed": [ " tc, create_ret.index_conglomid, delete_key, index_row.getRow()))" ] }, { "added": [ " delete_key, false))" ], "header": "@@ -2216,7 +2222,7 @@ public class T_b2i extends T_MultiIterations", "removed": [ " delete_key, create_ret.index_template_row))" ] }, { "added": [ " delete_key, false))" ], "header": "@@ -2284,7 +2290,7 @@ public class T_b2i extends T_MultiIterations", "removed": [ " delete_key, create_ret.index_template_row))" ] }, { "added": [ " index_row1.getRow(), false))" ], "header": "@@ -2782,7 +2788,7 @@ public class T_b2i extends T_MultiIterations", "removed": [ " index_row1.getRow(), create_ret.index_template_row))" ] }, { "added": [ " /**", " * Test latch release at critical time during delete on an index scan that", " * uses update locks.", " */", " protected boolean t_021(TransactionController tc)", " throws StandardException, T_Fail", " {", " REPORT(\"Starting t_021\");", "", " boolean ret_val = true;", "", " T_CreateConglomRet create_ret = new T_CreateConglomRet();", "", " // Create the btree so that it only allows 2 rows per page.", " createCongloms(tc, 2, false, false, 2, create_ret);", "", " // Open the base table", " ConglomerateController base_cc =", " tc.openConglomerate(", " create_ret.base_conglomid,", " false,", " TransactionController.OPENMODE_FORUPDATE,", " TransactionController.MODE_RECORD,", " TransactionController.ISOLATION_SERIALIZABLE);", "", " // Open the secondary index", " ConglomerateController index_cc =", " tc.openConglomerate(", " create_ret.index_conglomid,", " false,", " TransactionController.OPENMODE_FORUPDATE,", " TransactionController.MODE_RECORD,", " TransactionController.ISOLATION_SERIALIZABLE);", "", " // objects used to insert rows into base and index tables.", " DataValueDescriptor[] r1 = TemplateRow.newU8Row(2);", " T_SecondaryIndexRow index_row1 = new T_SecondaryIndexRow();", " RowLocation base_rowloc1 = base_cc.newRowLocationTemplate();", "", " index_row1.init(r1, base_rowloc1, 3);", "", " // insert one row into the table/index", " ((SQLLongint)r1[0]).setValue(1);", " ((SQLLongint)r1[1]).setValue(1);", "", " // Insert the row into the base table;remember its location.", " base_cc.insertAndFetchLocation(r1, base_rowloc1);", "", " // Insert the row into the secondary index.", " if (index_cc.insert(index_row1.getRow()) != 0)", " throw T_Fail.testFailMsg(\"insert failed\");", "", " // Commit the create of the tables.", " tc.commit();", "", " // Enable the debug code that releases the latch at critical time.", " if (SanityManager.DEBUG) {", " SanityManager.DEBUG_SET(\"BTreeScan_delete_useUpdateLocks1\");", " }", "", " // Delete the row using the index and update locks. Before DERBY-4083,", " // the call to delete() would fail with record not found if the latch", " // was released.", " DataValueDescriptor[] delete_key = TemplateRow.newU8Row(2);", " ((SQLLongint)delete_key[0]).setValue(1);", " ((SQLLongint)delete_key[1]).setValue(1);", " if (!t_delete(tc, create_ret.index_conglomid, delete_key, true)) {", " ret_val = false;", " }", "", " // Disable the debug code that releases the latch at critical time.", " if (SanityManager.DEBUG) {", " SanityManager.DEBUG_CLEAR(\"BTreeScan_delete_useUpdateLocks1\");", " }", "", " tc.commit();", " REPORT(\"Ending t_021\");", "", " return ret_val;", " }", "" ], "header": "@@ -4994,6 +5000,87 @@ public class T_b2i extends T_MultiIterations", "removed": [] } ] } ]
derby-DERBY-4087-14fde85b
DERBY-4087 Clean up debug printing of the abstract syntax trees after parsing, binding and optimization Reverting use of class Patter as it is not available in Foundation 1.2, so it cannot be sued in engine code. Originally committed as part of svn 808523. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@808945 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/util/StringUtil.java", "hunks": [ { "added": [], "header": "@@ -24,9 +24,6 @@ package org.apache.derby.iapi.util;", "removed": [ "import java.util.regex.Pattern;", "import java.util.regex.Matcher;", "" ] }, { "added": [], "header": "@@ -519,43 +516,4 @@ public class StringUtil", "removed": [ "", "", "\t/**", "\t * Utility for formatting which bends a multi-line string into shape for", "\t * outputting it in a context where there is <i>depth</i> tabs. Trailing", "\t * newlines are discarded as well.", "\t * <p>", "\t * Replace \"^[\\t]*\" with \"depth\" number of tabs.<br>", "\t * Replace \"\\n+$\" with \"\".", "\t * Replace all \"\\n[\\t]*\" with \"\\n\" + \"depth\" number of tabs.<br>", "\t * </p>", "\t * @param formatted string to sanitize", "\t * @param depth the string is to be printed at", "\t */", "\tpublic static String ensureIndent(String formatted, int depth) {", "\t\tStringBuffer buf = new StringBuffer();", "\t\tStringBuffer indent = new StringBuffer();", "", "\t\twhile (depth-- > 0) {", "\t\t\tindent.append(\"\\t\");", "\t\t}", "", "", "\t\tPattern pat_a = Pattern.compile(\"\\\\A\\\\t*\");", "\t\tMatcher m_a = pat_a.matcher(formatted);", "", "\t\tformatted = m_a.replaceFirst(indent.toString());", "", "\t\tPattern pat_b = Pattern.compile(\"\\\\n+\\\\Z\");", "\t\tMatcher m_b = pat_b.matcher(formatted);", "\t\tformatted = m_b.replaceFirst(\"\");", "", "\t\tPattern pat_c = Pattern.compile(\"\\\\n\\\\t*\");", "\t\tMatcher m_c = pat_c.matcher(formatted);", "\t\tformatted = m_c.replaceAll(\"\\n\" + indent.toString());", "", "", "\t\treturn formatted;", "\t}" ] } ] } ]
derby-DERBY-4087-996c8945
DERBY-4087 Clean up debug printing of the abstract syntax trees after parsing, binding and optimization Temporary roll-back of changes (808523) to the statistics printing, because they broke some old harness tests to change. It will take a while to establish correctness of new masters, so rolling back for now. Expect to roll forward again soon, though. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@808601 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealDistinctScanStatistics.java", "hunks": [ { "added": [], "header": "@@ -28,7 +28,6 @@ import org.apache.derby.catalog.UUID;", "removed": [ "import org.apache.derby.iapi.util.StringUtil;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealHashScanStatistics.java", "hunks": [ { "added": [], "header": "@@ -23,7 +23,6 @@ package org.apache.derby.impl.sql.execute.rts;", "removed": [ "import org.apache.derby.iapi.util.StringUtil;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealTableScanStatistics.java", "hunks": [ { "added": [ " /*" ], "header": "@@ -1,4 +1,4 @@", "removed": [ "/*" ] }, { "added": [], "header": "@@ -23,7 +23,6 @@ package org.apache.derby.impl.sql.execute.rts;", "removed": [ "import org.apache.derby.iapi.util.StringUtil;" ] }, { "added": [ "\t\t\t\t\": \\n\" + startPosition + ", "\t\t\t\t\": \\n\" + stopPosition +", "\t\t\t\t\":\\n\" + qualifiers + \"\\n\" +" ], "header": "@@ -218,13 +217,12 @@ public class RealTableScanStatistics", "removed": [ "\t\t\t\": \\n\" + StringUtil.ensureIndent(startPosition, depth + 2) + \"\\n\" +", "\t\t\t\": \\n\" + StringUtil.ensureIndent(stopPosition, depth + 2) + \"\\n\" +", "\t\t\t\": \\n\" + StringUtil.ensureIndent(qualifiers, depth + 2) + \"\\n\" +", "" ] } ] } ]
derby-DERBY-4087-ae848fda
DERBY-4087 Clean up debug printing of the abstract syntax trees after parsing, binding and optimization Follow-up patch, derby-4087-statistics. This fixes the wrong indentation seen in runtimestatistics for scan qualifiers. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@809632 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealDistinctScanStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.StringUtil;" ], "header": "@@ -28,6 +28,7 @@ import org.apache.derby.catalog.UUID;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealHashScanStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.StringUtil;" ], "header": "@@ -23,6 +23,7 @@ package org.apache.derby.impl.sql.execute.rts;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealTableScanStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.StringUtil;" ], "header": "@@ -23,6 +23,7 @@ package org.apache.derby.impl.sql.execute.rts;", "removed": [] }, { "added": [ "\t\t\t\t\t\t\":\\n\" +" ], "header": "@@ -192,7 +193,7 @@ public class RealTableScanStatistics", "removed": [ "\t\t\t\t\t\t\": \\n\" +" ] } ] }, { "file": "java/testing/org/apache/derbyTesting/junit/RuntimeStatisticsParser.java", "hunks": [ { "added": [ "", " StringTokenizer t2 = new StringTokenizer(s, \"\\t \");", "", " if (t2.nextToken().equals(\"Operator:\")) {", " String operator = t2.nextToken();", "", "", " t2 = new StringTokenizer(s, \"\\t \");", " String neg = t2.nextToken();", "", " if (!neg.equals(\"Negate\")) {", " \"Expected to find \\\"Negate comparison result\\\", \" +", " \"found: \" + neg);", " t2.nextToken(); // skip \"comparison\"", " t2.nextToken(); // skip \"result:\"", "", " Boolean.valueOf(t2.nextToken()).booleanValue();" ], "header": "@@ -121,19 +121,31 @@ public class RuntimeStatisticsParser {", "removed": [ " if (s.startsWith(\"Operator: \")) {", " String operator = s.substring(10);", " if (!s.startsWith(\"Negate comparison result: \")) {", " \"Expected to find \\\"Negate comparison result\\\"\");", " Boolean.valueOf(s.substring(26)).booleanValue();" ] } ] } ]
derby-DERBY-4087-c9a12062
DERBY-4087 Clean up debug printing of the abstract syntax trees after parsing, binding and optimization Patch DERBY-4087-b, which improves debug-time printing of query trees. Much of the changes concerns cleaning up when to use toString and when to use printSubNodes, a pattern which is well-defined but was severely broken, as well as including more data for each node, and sub-trees not hitherto printed at all. Please see JIRA for mroe detail. A new feature highlights aliases by suppressing printing of already printed subtrees (the query tree is a DAG, not a pure tree). git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@808523 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/sql/conn/LanguageConnectionContext.java", "hunks": [ { "added": [ "import java.util.AbstractMap;" ], "header": "@@ -48,6 +48,7 @@ import org.apache.derby.iapi.sql.execute.RunTimeStatistics;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/iapi/sql/dictionary/TableDescriptor.java", "hunks": [ { "added": [ "\t\t\tString tempString =", "\t\t\t\t\"\\n\" + \"schema: \" + schema + \"\\n\" +", "\t\t\t\t\"tableName: \" + tableName + \"\\n\" +" ], "header": "@@ -878,7 +878,9 @@ public class TableDescriptor extends TupleDescriptor", "removed": [ "\t\t\tString tempString = \"SCHEMA:\\n\" + schema + \"\\ntableName: \" + tableName + \"\\n\" +" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/util/StringUtil.java", "hunks": [ { "added": [ "import java.util.regex.Pattern;", "import java.util.regex.Matcher;", "" ], "header": "@@ -24,6 +24,9 @@ package org.apache.derby.iapi.util;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/GenericStatement.java", "hunks": [ { "added": [ "\t\t\t\t\t\tSanityManager.GET_DEBUG_STREAM().print(", "\t\t\t\t\t\t\t\"\\n\\n============PARSE===========\\n\\n\");", "\t\t\t\t\t\tlcc.getPrintedObjectsMap().clear();" ], "header": "@@ -285,7 +285,10 @@ public class GenericStatement", "removed": [] }, { "added": [ "\t\t\t\t\t\t\tSanityManager.GET_DEBUG_STREAM().print(", "\t\t\t\t\t\t\t\t\"\\n\\n============BIND===========\\n\\n\");", "\t\t\t\t\t\t\tlcc.getPrintedObjectsMap().clear();" ], "header": "@@ -320,7 +323,10 @@ public class GenericStatement", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/AlterTableNode.java", "hunks": [ { "added": [ "\t\t\t\t\"objectName: \" + getObjectName() + \"\\n\" +", "\t\t\t\t\"lockGranularity: \" + lockGranularity + \"\\n\" +", "\t\t\t\t\"compressTable: \" + compressTable + \"\\n\" +", "\t\t\t\t\"sequential: \" + sequential + \"\\n\" +", "\t\t\t\t\"truncateTable: \" + truncateTable + \"\\n\" +", "\t\t\t\t\"purge: \" + purge + \"\\n\" +", "\t\t\t\t\"defragment: \" + defragment + \"\\n\" +", "\t\t\t\t\"truncateEndOfTable: \" + truncateEndOfTable + \"\\n\" +", "\t\t\t\t\"updateStatistics: \" + updateStatistics + \"\\n\" +", "\t\t\t\t\"updateStatisticsAll: \" + updateStatisticsAll + \"\\n\" +", "\t\t\t\t\"indexNameForUpdateStatistics: \" +", "\t\t\t\t indexNameForUpdateStatistics + \"\\n\";" ], "header": "@@ -258,18 +258,18 @@ public class AlterTableNode extends DDLStatementNode", "removed": [ "\t\t\t\t\"objectName: \" + \"\\n\" + getObjectName() + \"\\n\" +", "\t\t\t\t\"tableElementList: \" + \"\\n\" + tableElementList + \"\\n\" +", "\t\t\t\t\"lockGranularity: \" + \"\\n\" + lockGranularity + \"\\n\" +", "\t\t\t\t\"compressTable: \" + \"\\n\" + compressTable + \"\\n\" +", "\t\t\t\t\"sequential: \" + \"\\n\" + sequential + \"\\n\" +", "\t\t\t\t\"truncateTable: \" + \"\\n\" + truncateTable + \"\\n\" +", "\t\t\t\t\"purge: \" + \"\\n\" + purge + \"\\n\" +", "\t\t\t\t\"defragment: \" + \"\\n\" + defragment + \"\\n\" +", "\t\t\t\t\"truncateEndOfTable: \" + \"\\n\" + truncateEndOfTable + \"\\n\" +", "\t\t\t\t\"updateStatistics: \" + \"\\n\" + updateStatistics + \"\\n\" +", "\t\t\t\t\"updateStatisticsAll: \" + \"\\n\" + updateStatisticsAll + \"\\n\" +", "\t\t\t\t\"indexNameForUpdateStatistics: \" + \"\\n\" + indexNameForUpdateStatistics + \"\\n\";" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/CoalesceFunctionNode.java", "hunks": [ { "added": [ "\t\t\treturn", "\t\t\t\t\"functionName: \" + functionName + \"\\n\" +", "\t\t\t\t\"firstNonParameterNodeIdx: \" + firstNonParameterNodeIdx + \"\\n\" +", "\t\t\t\tsuper.toString();" ], "header": "@@ -320,7 +320,10 @@ public class CoalesceFunctionNode extends ValueNode", "removed": [ "\t\t\treturn super.toString()+functionName+\"(\"+argumentsList+\")\\n\";" ] }, { "added": [ "\t/**", "\t * Prints the sub-nodes of this object. See QueryTreeNode.java for", "\t * how tree printing is supposed to work.", "\t *", "\t * @param depth\t\tThe depth of this node in the tree", "\t */", "", "\tpublic void printSubNodes(int depth)", "\t{", "\t\tif (SanityManager.DEBUG)", "\t\t{", "\t\t\tsuper.printSubNodes(depth);", "", "\t\t\tprintLabel(depth, \"argumentsList: \");", "\t\t\targumentsList.treePrint(depth + 1);", "\t\t}", "\t}", "", "" ], "header": "@@ -328,6 +331,25 @@ public class CoalesceFunctionNode extends ValueNode", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/CreateTableNode.java", "hunks": [ { "added": [ "\t\t\tString tempString = \"\";", "\t\t\t\ttempString = tempString +", "\t\t\t\t\t(properties != null ?", "\t\t\t\t\t \"properties: \" + \"\\n\" + properties + \"\\n\" :", "\t\t\t\t\t \"\") +", "\t\t\t\t\t\"lockGranularity: \" + lockGranularity + \"\\n\";" ], "header": "@@ -195,13 +195,17 @@ public class CreateTableNode extends DDLStatementNode", "removed": [ "\t\t\tString tempString = \"tableElementList: \" + \"\\n\" + tableElementList + \"\\n\";", "\t\t\t\ttempString = tempString + \"properties: \" + \"\\n\" + properties + \"\\n\" + \"lockGranularity: \" + \"\\n\" + lockGranularity + \"\\n\";" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/DefaultNode.java", "hunks": [ { "added": [ "\t\t\treturn \"columnName: \" + columnName + \"\\n\" +", "\t\t\t\t\"defaultText: \" + defaultText + \"\\n\" +" ], "header": "@@ -109,8 +109,8 @@ public class DefaultNode extends ValueNode", "removed": [ "\t\t\treturn \"defaultTree: \" + defaultTree + \"\\n\" +", "\t\t\t\t \"defaultText: \" + defaultText + \"\\n\" +" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/GroupByColumn.java", "hunks": [ { "added": [], "header": "@@ -51,24 +51,6 @@ public class GroupByColumn extends OrderedColumn", "removed": [ "\t/**", "\t * Convert this object to a String. See comments in QueryTreeNode.java", "\t * for how this should be done for tree printing.", "\t *", "\t * @return\tThis object as a String", "\t */", "\tpublic String toString() ", "\t{", "\t\tif (SanityManager.DEBUG)", "\t\t{", "\t\t\treturn \"Column Expression: \"+columnExpression+super.toString();", "\t\t}", "\t\telse", "\t\t{", "\t\t\treturn \"\";", "\t\t}", "\t}", "" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/GroupByList.java", "hunks": [ { "added": [], "header": "@@ -74,21 +74,6 @@ public class GroupByList extends OrderedColumnList", "removed": [ "\t/**", "\t\tPrint the list.", "", "\t\t@param depth\t\tThe depth at which to indent the sub-nodes", "\t */", "\tpublic void printSubNodes(int depth)", "\t{", "\t\tif (SanityManager.DEBUG)", "\t\t{", "\t\t\tfor (int index = 0; index < size(); index++)", "\t\t\t{", "\t\t\t\t( (GroupByColumn) elementAt(index) ).treePrint(depth);", "\t\t\t}", "\t\t}", "\t}" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/GroupByNode.java", "hunks": [ { "added": [ "\t\t\t\tsuper.toString();" ], "header": "@@ -866,7 +866,7 @@ public class GroupByNode extends SingleChildResultSetNode", "removed": [ "\t\t\t\tchildResult.toString() + \"\\n\" + super.toString();" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/OrderByList.java", "hunks": [ { "added": [], "header": "@@ -111,22 +111,6 @@ public class OrderByList extends OrderedColumnList", "removed": [ "\t/**", "\t\tPrint the list.", "\t", "\t\t@param depth\t\tThe depth at which to indent the sub-nodes", "\t */", "\tpublic void printSubNodes(int depth) {", "", "\t\tif (SanityManager.DEBUG) ", "\t\t{", "\t\t\tfor (int index = 0; index < size(); index++)", "\t\t\t{", "\t\t\t\t( (OrderByColumn) (elementAt(index)) ).treePrint(depth);", "\t\t\t}", "\t\t}", "\t}", "" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/QueryTreeNode.java", "hunks": [ { "added": [ "import java.util.Map;" ], "header": "@@ -22,6 +22,7 @@", "removed": [] }, { "added": [ "\t\t\tString thisStr = formatNodeString(this.toString(), 0);", "", "\t\t\tif (containsInfo(thisStr) &&", "\t\t\t\t\t!SanityManager.DEBUG_ON(\"DumpBrief\")) {", "\t\t\t\tdebugPrint(thisStr);", "\t\t\t}", "" ], "header": "@@ -328,7 +329,13 @@ public abstract class QueryTreeNode implements Visitable", "removed": [ "\t\t\tdebugPrint(formatNodeString(this.toString(), 0));" ] }, { "added": [ "\t\t\tMap printed =", "\t\t\t\tgetLanguageConnectionContext().getPrintedObjectsMap();", "", "\t\t\tif (printed.containsKey(this)) {", "\t\t\t\tdebugPrint(formatNodeString(nodeHeader(), depth));", "\t\t\t\tdebugPrint(formatNodeString(\"***truncated***\\n\", depth));", "\t\t\t} else {", "\t\t\t\tprinted.put(this, null);", "\t\t\t\tdebugPrint(formatNodeString(nodeHeader(), depth));", "\t\t\t\tString thisStr = formatNodeString(this.toString(), depth);", "", "\t\t\t\tif (containsInfo(thisStr) &&", "\t\t\t\t\t\t!SanityManager.DEBUG_ON(\"DumpBrief\")) {", "\t\t\t\t\tdebugPrint(thisStr);", "\t\t\t\t}", "", "\t\t\t\tprintSubNodes(depth);", "\t\t\t}", "", "", "\tprivate static boolean containsInfo(String str) {", "\t\tfor (int i = 0; i < str.length(); i++) {", "\t\t\tif (str.charAt(i) != '\\t' && str.charAt(i) != '\\n') {", "\t\t\t\treturn true;", "\t\t\t}", "\t\t}", "\t\treturn false;", "\t}", "" ], "header": "@@ -347,12 +354,38 @@ public abstract class QueryTreeNode implements Visitable", "removed": [ "\t\t\tdebugPrint(formatNodeString(nodeHeader(), depth));", "\t\t\tdebugPrint(formatNodeString(this.toString(), depth));", "\t\t\tprintSubNodes(depth);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/ResultColumnList.java", "hunks": [ { "added": [], "header": "@@ -665,24 +665,6 @@ public class ResultColumnList extends QueryTreeNodeVector", "removed": [ "\t/**", "\t * This class needs a treePrint method, even though it is not a", "\t * descendant of QueryTreeNode, because its members contain tree", "\t * nodes, and these have to be printed and indented properly.", "\t *", "\t * @param depth\t\tThe depth at which to indent the sub-nodes", "\t */", "", "\tpublic void treePrint(int depth)", "\t{", "\t\tif (SanityManager.DEBUG)", "\t\t{", "\t\t\tfor (int index = 0; index < size(); index++)", "\t\t\t{", "\t\t\t\t((ResultColumn) elementAt(index) ).treePrint(depth);", "\t\t\t}", "\t\t}", "\t}" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/SelectNode.java", "hunks": [ { "added": [], "header": "@@ -162,10 +162,6 @@ public class SelectNode extends ResultSetNode", "removed": [ "\t\t \t\"groupByList: \" +", "\t\t\t\t(groupByList != null ? groupByList.toString() : \"null\") + \"\\n\" +", "\t\t\t\t\"orderByList: \" + ", "\t\t\t\t(orderByList != null ? orderByList.toString() : \"null\") + \"\\n\" +" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/SetOperatorNode.java", "hunks": [ { "added": [], "header": "@@ -529,8 +529,6 @@ abstract class SetOperatorNode extends TableOperatorNode", "removed": [ "\t\t\t\t\"orderByList: \" + ", "\t\t\t\t(orderByList != null ? orderByList.toString() : \"null\") + \"\\n\" +" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/TableOperatorNode.java", "hunks": [ { "added": [], "header": "@@ -238,8 +238,6 @@ abstract class TableOperatorNode extends FromTable", "removed": [ "\t\t\t\tleftResultSet.toString() + \"\\n\" +", "\t\t\t\trightResultSet.toString() + \"\\n\" + " ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/conn/GenericLanguageConnectionContext.java", "hunks": [ { "added": [ "import java.util.AbstractMap;", "import java.util.IdentityHashMap;" ], "header": "@@ -82,6 +82,8 @@ import org.apache.derby.iapi.reference.Property;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/IndexColumnOrder.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.services.sanity.SanityManager;", "" ], "header": "@@ -23,6 +23,8 @@ package org.apache.derby.impl.sql.execute;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealDistinctScanStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.StringUtil;" ], "header": "@@ -28,6 +28,7 @@ import org.apache.derby.catalog.UUID;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealHashScanStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.StringUtil;" ], "header": "@@ -23,6 +23,7 @@ package org.apache.derby.impl.sql.execute.rts;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealTableScanStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.StringUtil;" ], "header": "@@ -23,6 +23,7 @@ package org.apache.derby.impl.sql.execute.rts;", "removed": [] }, { "added": [ "\t\t\t\": \\n\" + StringUtil.ensureIndent(startPosition, depth + 2) + \"\\n\" +", "\t\t\t\": \\n\" + StringUtil.ensureIndent(stopPosition, depth + 2) + \"\\n\" +", "\t\t\t\": \\n\" + StringUtil.ensureIndent(qualifiers, depth + 2) + \"\\n\" +", "" ], "header": "@@ -217,12 +218,13 @@ public class RealTableScanStatistics", "removed": [ "\t\t\t\t\": \\n\" + startPosition + ", "\t\t\t\t\": \\n\" + stopPosition +", "\t\t\t\t\":\\n\" + qualifiers + \"\\n\" +" ] } ] } ]
derby-DERBY-4088-7062abe7
DERBY-4088: DDMReader readBytes ArrayIndexOutOfBoundsException When ensureBLayerDataInBuffer() calls compressBLayerData() the number of bytes in the buffer is reduced (header bytes are stripped out) and there might be too little data in the buffer when it returns. This patch fixes the problem by ensuring that compressBLayerData() fills the buffer with enough extra bytes to compensate for the removal of the header bytes. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@752813 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/drda/org/apache/derby/impl/drda/DDMReader.java", "hunks": [ { "added": [ " // Offset from the start of the valid region of the byte buffer,", " // pointing to the start of the DSS we're looking at.", " int tempOffset = 0;", "", "\t\t\t\ttempOffset = dssLength;", "\t\t\t\ttempOffset += DssConstants.MAX_DSS_LENGTH;" ], "header": "@@ -1695,21 +1695,23 @@ class DDMReader", "removed": [ "\t\t", "\t\tint tempPos = 0;", "\t\t\t\ttempPos = pos + dssLength;", "\t\t\t\ttempPos += DssConstants.MAX_DSS_LENGTH;" ] }, { "added": [ " // Get the length of the DSS. Make sure that we have enough data", " // in the buffer to actually see the length (may not have enough", " // bytes if this is not the first DSS).", " ensureALayerDataInBuffer(tempOffset + 1);", " continueHeaderLength =", " ((buffer[pos + tempOffset] & 0xff) << 8) +", " (buffer[pos + tempOffset + 1] & 0xff);" ], "header": "@@ -1725,8 +1727,13 @@ class DDMReader", "removed": [ "\t\t\tcontinueHeaderLength = ((buffer[tempPos] & 0xff) << 8) +", "\t\t\t\t((buffer[tempPos + 1] & 0xff) << 0);" ] }, { "added": [ "", " // Make sure we have all of the last DSS in the buffer", " // (DERBY-4088). Since we look at the last DSS first,", " // we don't need to do this for the other DSSs, as they", " // will also be fetched into the buffer when we fetch the", " // last one.", " ensureALayerDataInBuffer(tempOffset + continueHeaderLength);" ], "header": "@@ -1746,6 +1753,13 @@ class DDMReader", "removed": [] } ] } ]
derby-DERBY-4088-f582a777
DERBY-4088: DDMReader readBytes ArrayIndexOutOfBoundsException Reverted the previous fix (revision 752813) in compressBLayerData() and instead changed ensureBLayerDataInBuffer() to match the corresponding method in Reply.java on the client side. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@755866 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/drda/org/apache/derby/impl/drda/DDMReader.java", "hunks": [ { "added": [ " if (dssIsContinued && (desiredDataSize > dssLength)) {", " // The data that we want is split across multiple DSSs", " int continueDssHeaderCount =", " (desiredDataSize - dssLength) / DssConstants.MAX_DSS_LENGTH + 1;", " // Account for the extra header bytes (2 length bytes per DSS)", " ensureALayerDataInBuffer(", " desiredDataSize + 2 * continueDssHeaderCount);", " compressBLayerData(continueDssHeaderCount);", " } else {", " ensureALayerDataInBuffer(desiredDataSize);", " }", "" ], "header": "@@ -1669,16 +1669,18 @@ class DDMReader", "removed": [ "\t\tensureALayerDataInBuffer (desiredDataSize);", "\t\tif (dssIsContinued) ", "\t\t{", "\t\t\tif (desiredDataSize > dssLength) ", "\t\t\t{", "\t\t\t\tint continueDssHeaderCount =", "\t\t\t\t\t(((desiredDataSize - dssLength) / DssConstants.MAX_DSS_LENGTH) + 1);", "\t\t\t\tcompressBLayerData (continueDssHeaderCount);", "\t\t\t}", "\t\t}" ] }, { "added": [ "\t\t", "\t\tint tempPos = 0;", "\t\t\t\ttempPos = pos + dssLength;", "\t\t\t\ttempPos += DssConstants.MAX_DSS_LENGTH;" ], "header": "@@ -1695,23 +1697,21 @@ class DDMReader", "removed": [ " // Offset from the start of the valid region of the byte buffer,", " // pointing to the start of the DSS we're looking at.", " int tempOffset = 0;", "", "\t\t\t\ttempOffset = dssLength;", "\t\t\t\ttempOffset += DssConstants.MAX_DSS_LENGTH;" ] }, { "added": [ "\t\t\tcontinueHeaderLength = ((buffer[tempPos] & 0xff) << 8) +", "\t\t\t\t((buffer[tempPos + 1] & 0xff) << 0);" ], "header": "@@ -1727,13 +1727,8 @@ class DDMReader", "removed": [ " // Get the length of the DSS. Make sure that we have enough data", " // in the buffer to actually see the length (may not have enough", " // bytes if this is not the first DSS).", " ensureALayerDataInBuffer(tempOffset + 1);", " continueHeaderLength =", " ((buffer[pos + tempOffset] & 0xff) << 8) +", " (buffer[pos + tempOffset + 1] & 0xff);" ] }, { "added": [], "header": "@@ -1753,13 +1748,6 @@ class DDMReader", "removed": [ "", " // Make sure we have all of the last DSS in the buffer", " // (DERBY-4088). Since we look at the last DSS first,", " // we don't need to do this for the other DSSs, as they", " // will also be fetched into the buffer when we fetch the", " // last one.", " ensureALayerDataInBuffer(tempOffset + continueHeaderLength);" ] }, { "added": [ "\t\t\ttempPos -= (bytesToShift - 2);", "\t\t\tSystem.arraycopy(buffer, tempPos - shiftSize, buffer, tempPos,", "\t\t\t\t\t\t\t bytesToShift);", "\t\tpos = tempPos;" ], "header": "@@ -1798,12 +1786,12 @@ class DDMReader", "removed": [ "\t\t\ttempOffset -= (bytesToShift - 2);", "\t\t\tSystem.arraycopy(buffer, pos + tempOffset - shiftSize,", " buffer, pos + tempOffset, bytesToShift);", "\t\tpos += tempOffset;" ] } ] } ]
derby-DERBY-4092-425fcc91
DERBY-4092: Don't allow invocations of table functions in contexts which expect a scalar function return value. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@805443 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/JavaToSQLValueNode.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.TypeDescriptor;" ], "header": "@@ -21,6 +21,7 @@", "removed": [] } ] } ]
derby-DERBY-4095-c7e05da0
DERBY-4095 Trigger in fails with ERROR 38000: The exception 'java.sql.SQLException: ResultSet not open during VTIResultSet.getNextRowCore() Reinitialize the ResultSet on executeQuery of TriggerNewTransitionRows and TriggerOldTransitionRows so that for a nested loop join, reopening the ResultSet does not result in an exception. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@756516 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/catalog/TriggerNewTransitionRows.java", "hunks": [ { "added": [ "\t\tinitializeResultSet();", "\t}", "", "\tprivate ResultSet initializeResultSet() throws SQLException {", "\t\tif (resultSet != null)", "\t\t\tresultSet.close();", "\t\t" ], "header": "@@ -62,6 +62,13 @@ public final class TriggerNewTransitionRows extends org.apache.derby.vti.Updatab", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/catalog/TriggerOldTransitionRows.java", "hunks": [ { "added": [ "\t\tinitializeResultSet();", "\t}", "", "\tprivate ResultSet initializeResultSet() throws SQLException {", "\t\tif (resultSet != null)", "\t\t\tresultSet.close();", "\t\t" ], "header": "@@ -63,6 +63,13 @@ public class TriggerOldTransitionRows extends org.apache.derby.vti.UpdatableVTIT", "removed": [] } ] } ]
derby-DERBY-4097-465c7c7d
DERBY-4097: BatchUpdateException in NullableUniqueConstraintTest Improve error reporting when a BatchUpdateException is thrown by the client driver. Use initCause() to put the underlying exception in the chain of exceptions printed by printStackTrace(). Note that there may be more than one underlying exception for a BatchUpdateException and only one of them will be printed by printStackTrace(). To see the rest of the exceptions, getNextException() has to be used (as before). git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@755147 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/BatchUpdateException.java", "hunks": [ { "added": [ " Object[] args, int[] updateCounts, SqlException cause)" ], "header": "@@ -37,7 +37,7 @@ public class BatchUpdateException extends java.sql.BatchUpdateException {", "removed": [ " Object[] args, int[] updateCounts)" ] } ] } ]
derby-DERBY-4102-e0939287
DERBY-4102: Assert failure or ClassCastException in EmbedBlob when retrieving BLOB >= 32K. Made Derby store the stream content to a temporary location when it knows the stream isn't resetable. Small streams (< 32K) will be stored in memory, larger streams will be written to disk (the switch happens transparently). Added the repro as a regression test. Patch file: derby-4102-1a.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@835286 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedBlob.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.types.RawToBinaryFormatStream;" ], "header": "@@ -27,6 +27,7 @@ import org.apache.derby.iapi.error.StandardException;", "removed": [] }, { "added": [ " /*", " We support three scenarios at this point:", " a) The Blob value is already represented as bytes in memory.", " This is the case for small Blobs (less than 32 KB).", " b) The Blob value is represented as a resetable stream.", " This is the case for Blobs coming from the store", " (note the comment about SQLBit below).", " c) The Blob value is represented as a wrapped user stream.", " This stream cannot be reset, which means we have to drain the", " stream and store it temporarily until it is either discarded or", " inserted into the database.", " */", " if (dvdStream == null) { // a) Blob already materialized in memory" ], "header": "@@ -159,9 +160,20 @@ final class EmbedBlob extends ConnectionChild implements Blob, EngineLOB", "removed": [ " if (dvdStream == null)", " {" ] }, { "added": [ " } else if (dvdStream instanceof Resetable) { // b) Resetable stream" ], "header": "@@ -177,9 +189,7 @@ final class EmbedBlob extends ConnectionChild implements Blob, EngineLOB", "removed": [ " }", " else", " {" ] } ] } ]
derby-DERBY-4110-190e5231
DERBY-4110: Deleting from a table with its synonym throws an exception When building the ResultColumnList that is used to tie together the search results from the WHERE clause with the processing of the DELETE, the result columns will be bound against the exposed table name of the table named in the DELETE statement, so if the statement used a synonymTableName, then the result column list should manufacture column references which use a synonymTableName as well. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@896722 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4115-a6a07338
DERBY-4115 Provide a way to drop statistics information The details of all the changes in this commit are listed below. 1)Added a new routine SYSCS_DROP_STATISTICS, with public access similar to SYSCS_UPDATE_STATISTICS. This happens in DataDictionaryImpl, where SYSCS_DROP_STATISTICS is added to the list of public access procedures in sysUtilProceduresWithPublicAccess 2)The new stored procedure implementation is similar to update statistics, ie allow the routine to go through ALTER TABLE where permission/privilege checking, table/schema/index name validations happen automatically and we implement the routine logic through extension of ALTER TABLE syntax. This new syntax for ALTER TABLE syntax(same as we did for update statistics) is an internal syntax only and won't be available to an end user directly. 3)This commit changes sqlgrammar.jj to recognize the following internal syntaxes for ALTER TABLE a)ALTER TABLE tablename ALL DROP STATISTICS The existing(corresponding syntax) for update statistics is as follows ALTER TABLE tablename ALL UPDATE STATISTICS b)ALTER TABLE tablename STATISTICS DROP indexname The existing(corresponding syntax) for update statistics is as follows ALTER TABLE tablename UPDATE STATISTICS indexname Notice the two syntaxes for index level statistics are different for drop vs update.(the reason for the syntax difference is explained above) 4)After the statistics are dropped, we send invalidation signal to dependent statements so they would get recompiled when they are executed next time. This will make sure that they pick the correct plan given the statistics for the table. 5)The commit takes care of some of the test failures(expected failures because of the addition of a new system procedure). 6)The commit adds basic upgrade test for the new procedure. This test ensures that drop statistics procedure is available only after hard upgrade. 7)While writing the upgrade tests, I found that a meaningful test for drop statistics could only be written for Derby releases 10.5 and higher. We have found that when constraints end up sharing same backing index, Derby won't create statistics for them. This is issue DERBY-5702. But if we run update statistics on that constraint, we will be able to get the statistics for such a constraint. Later, when the constraint is dropped, because of DERBY-5681, the statistics row for such a constraint(one that shares it's backing index with another constraint) is never dropped. We can use drop statistics procedure introduced in this jira to take care of such hanging indexes. But since update statistics procedure is only available in 10.5 and higher, I couldn't demonstrate use of drop statistics to drop hanging statistics rows. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1338017 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/catalog/SystemProcedures.java", "hunks": [ { "added": [ "\t * @exception SQLException" ], "header": "@@ -733,7 +733,7 @@ public class SystemProcedures {", "removed": [ "\t * @exception StandardException Standard exception policy." ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/catalog/DataDictionaryImpl.java", "hunks": [ { "added": [ "\t\t\t\t\t\t\t\t\t\t\t\t\"SYSCS_DROP_STATISTICS\", " ], "header": "@@ -464,6 +464,7 @@ public final class\tDataDictionaryImpl", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/AlterTableNode.java", "hunks": [ { "added": [ "\t * dropStatistics will indicate that we are here for dropping the", "\t * statistics. It could be statistics of just one index or all the", "\t * indexes on a given table. ", "\t */", "\tprivate\t boolean\t\t\t\t\t dropStatistics;", "\t/**", "\t * The flag dropStatisticsAll will tell if we are going to drop the ", "\t * statistics of all indexes or just one index on a table. ", "\t */", "\tprivate\t boolean\t\t\t\t\t dropStatisticsAll;", "\t/**", "\t * If statistic is getting updated/dropped for just one index, then ", "\t * indexNameForStatistics will tell the name of the specific index ", "\t * whose statistics need to be updated/dropped.", "\tprivate\tString\t\t\t\tindexNameForStatistics;" ], "header": "@@ -70,11 +70,22 @@ public class AlterTableNode extends DDLStatementNode", "removed": [ "\t * If statistic is getting updated for just one index, then ", "\t * indexNameForUpdateStatistics will tell the name of the specific index ", "\t * whose statistics need to be updated.", "\tprivate\tString\t\t\t\tindexNameForUpdateStatistics;" ] }, { "added": [], "header": "@@ -116,33 +127,6 @@ public class AlterTableNode extends DDLStatementNode", "removed": [ "", "\t/**", "\t * Initializer for a AlterTableNode for updating the statistics. The user", "\t * can ask for update statistic of all the indexes or only a specific index", "\t *", "\t * @param objectName\t\tThe name of the table whose index(es) will have", "\t * their statistics updated.", "\t * @param updateStatisticsAll\tIf true then update the statistics of all ", "\t * the indexes on the table. If false, then update", "\t * the statistics of only the index provided as", "\t * 3rd parameter here", "\t * @param indexName\t\t\tOnly used if updateStatisticsAll is set to ", "\t * false. ", "\t *", "\t * @exception StandardException\t\tThrown on error", "\t */", "\tpublic void init(Object objectName,", "\t\t\tObject updateStatisticsAll,", "\t\t\tObject indexName)", "\tthrows StandardException", "\t{", "\t\tinitAndCheck(objectName);", "\t\tthis.updateStatisticsAll = ((Boolean) updateStatisticsAll).booleanValue();", "\t\tthis.indexNameForUpdateStatistics = (String)indexName;", "\t\tschemaDescriptor = getSchemaDescriptor();", "\t\tupdateStatistics = true;", "\t}" ] }, { "added": [ "\t * Initializer for a AlterTableNode. The parameter values have different", "\t * meanings based on what kind of ALTER TABLE is taking place. ", "\t * ", "\t * @param changeType\t\tADD_TYPE or DROP_TYPE or UPDATE_STATISTICS or", "\t * or DROP_STATISTICS", "\t * @param param1 \t\t\tFor ADD_TYPE or DROP_TYPE, param1 gives the", "\t * elements impacted by ALTER TABLE.", "\t * For UPDATE_STATISTICS or or DROP_STATISTICS,", "\t * param1 is boolean - true means update or drop", "\t * the statistics of all the indexes on the table.", "\t * False means, update or drop the statistics of", "\t * only the index name provided by next parameter.", "\t * @param param2 \t\t\tFor ADD_TYPE or DROP_TYPE, param2 gives the", "\t * new lock granularity, if any", "\t * For UPDATE_STATISTICS or DROP_STATISTICS,", "\t * param2 can be the name of the specific index", "\t * whose statistics will be dropped/updated. This", "\t * param is used only if param1 is set to false", "\t * @param param3\t\t\tFor DROP_TYPE, param3 can indicate if the drop", "\t * column is CASCADE or RESTRICTED. This param is", "\t * ignored for all the other changeType.", "\t\t\t\t\t\t\tObject param1,", "\t\t\t\t\t\t\tObject param2,", "\t\t\t\t\t\t\tObject param3 )", "\t\t", "\t\tint[]\tct = (int[]) changeType;", "\t\t", "\t\t\t\tthis.tableElementList = (TableElementList) param1;", "\t\t\t\tthis.lockGranularity = ((Character) param2).charValue();", "\t\t\t\tint[]\tbh = (int[]) param3;", "\t\t\t\tthis.behavior = bh[0];", "\t\t\t\tbreak;", "", "\t\t case UPDATE_STATISTICS:", "\t\t\t\tthis.updateStatisticsAll = ((Boolean) param1).booleanValue();", "\t\t\t\tthis.indexNameForStatistics = (String)param2;", "\t\t\t\tupdateStatistics = true;", "\t\t\t\tbreak;", "\t\t case DROP_STATISTICS:", "\t\t\t\tthis.dropStatisticsAll = ((Boolean) param1).booleanValue();", "\t\t\t\tthis.indexNameForStatistics = (String)param2;", "\t\t\t\tdropStatistics = true;" ], "header": "@@ -194,39 +178,67 @@ public class AlterTableNode extends DDLStatementNode", "removed": [ "\t * Initializer for a AlterTableNode", "\t *", "\t * @param tableElementList\tThe alter table action", "\t * @param lockGranularity\tThe new lock granularity, if any", "\t * @param changeType\t\tADD_TYPE or DROP_TYPE", "\t * @param behavior\t\t\tIf drop column is CASCADE or RESTRICTED", "", "\t\t\t\t\t\t\tObject tableElementList,", "\t\t\t\t\t\t\tObject lockGranularity,", "\t\t\t\t\t\t\tObject behavior )", "\t\tthis.tableElementList = (TableElementList) tableElementList;", "\t\tthis.lockGranularity = ((Character) lockGranularity).charValue();", "\t\tint[]\tct = (int[]) changeType, bh = (int[]) behavior;", "\t\tthis.behavior = bh[0];" ] }, { "added": [ "\t\t\t\t\"dropStatistics: \" + dropStatistics + \"\\n\" +", "\t\t\t\t\"dropStatisticsAll: \" + dropStatisticsAll + \"\\n\" +", "\t\t\t\t\"indexNameForStatistics: \" +", "\t\t\t\tindexNameForStatistics + \"\\n\";" ], "header": "@@ -259,8 +271,10 @@ public class AlterTableNode extends DDLStatementNode", "removed": [ "\t\t\t\t\"indexNameForUpdateStatistics: \" +", "\t\t\t\t indexNameForUpdateStatistics + \"\\n\";" ] }, { "added": [ "\t\t//Check if we are in alter table to update/drop the statistics. If yes,", "\t\t// then check if we are here to update/drop the statistics of a specific", "\t\t// index. If yes, then verify that the indexname provided is a valid one.", "\t\tif ((updateStatistics && !updateStatisticsAll) || (dropStatistics && !dropStatisticsAll))", "\t\t\t\tcd = dd.getConglomerateDescriptor(indexNameForStatistics, schemaDescriptor, false);", "\t\t\t\t\t\tschemaDescriptor.getSchemaName() + \".\" + indexNameForStatistics);" ], "header": "@@ -433,20 +447,20 @@ public String statementToString()", "removed": [ "\t\t//Check if we are in alter table to update the statistics. If yes, then", "\t\t//check if we are here to update the statistics of a specific index. If", "\t\t//yes, then verify that the indexname provided is a valid one.", "\t\tif (updateStatistics && !updateStatisticsAll)", "\t\t\t\tcd = dd.getConglomerateDescriptor(indexNameForUpdateStatistics, schemaDescriptor, false);", "\t\t\t\t\t\tschemaDescriptor.getSchemaName() + \".\" + indexNameForUpdateStatistics);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/AlterTableConstantAction.java", "hunks": [ { "added": [ "\t * dropStatistics will indicate that we are here for dropping the", "\t * statistics. It could be statistics of just one index or all the", "\t * indexes on a given table. ", "\t */", " private\t boolean\t\t\t\t\t dropStatistics;", "\t/**", "\t * The flag dropStatisticsAll will tell if we are going to drop the ", "\t * statistics of all indexes or just one index on a table. ", "\t */", " private\t boolean\t\t\t\t\t dropStatisticsAll;", "\t/**", "\t * If statistic is getting updated/dropped for just one index, then ", "\t * indexNameForStatistics will tell the name of the specific index ", "\t * whose statistics need to be updated/dropped.", " private\t String\t\t\t\t\t\tindexNameForStatistics;", "" ], "header": "@@ -131,11 +131,23 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction", "removed": [ "\t * If statistic is getting updated for just one index, then ", "\t * indexNameForUpdateStatistics will tell the name of the specific index ", "\t * whose statistics need to be updated.", " private\t String\t\t\t\t\t\tindexNameForUpdateStatistics;" ] }, { "added": [ "\t * @param dropStatistics\t\tTRUE means we are here to drop statistics", "\t * @param dropStatisticsAll\tTRUE means we are here to drop statistics", "\t * \tof all the indexes. False means we are here to drop statistics of", "\t * \tonly one index.", "\t * @param indexNameForStatistics\tWill name the index whose statistics", "\t * \twill be updated/dropped. This param is looked at only if ", "\t * \tupdateStatisticsAll/dropStatisticsAll is set to false and", "\t * \tupdateStatistics/dropStatistics is set to true." ], "header": "@@ -196,8 +208,14 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction", "removed": [ "\t * @param indexNameForUpdateStatistics\tWill name the index whose statistics", "\t * \twill be updated" ] }, { "added": [ " boolean dropStatistics,", " boolean dropStatisticsAll,", " String indexNameForStatistics)" ], "header": "@@ -217,7 +235,9 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction", "removed": [ " String\t indexNameForUpdateStatistics)" ] }, { "added": [ "\t\tthis.dropStatistics \t= dropStatistics;", "\t\tthis.dropStatisticsAll = dropStatisticsAll;", "\t\tthis.indexNameForStatistics = indexNameForStatistics;" ], "header": "@@ -236,7 +256,9 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction", "removed": [ "\t\tthis.indexNameForUpdateStatistics = indexNameForUpdateStatistics;" ] }, { "added": [ "", " if (dropStatistics) {", " dropStatistics();", " return;", "\t\t}" ], "header": "@@ -330,6 +352,11 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction", "removed": [] }, { "added": [ "\t/**", "\t * Drop statistics of either all the indexes on the table or only one", "\t * specific index depending on what user has requested.", "\t * ", "\t * @throws StandardException", "\t */", " private void dropStatistics()", " throws StandardException {", " td = dd.getTableDescriptor(tableId);", "", " dd.startWriting(lcc);", " dm.invalidateFor(td, DependencyManager.UPDATE_STATISTICS, lcc);", "", " if (dropStatisticsAll) {", " dd.dropStatisticsDescriptors(td.getUUID(), null, tc);", " } else {", " ConglomerateDescriptor cd = ", " dd.getConglomerateDescriptor(", " indexNameForStatistics, sd, false);", " dd.dropStatisticsDescriptors(td.getUUID(), cd.getUUID(), tc);", " }", " }", "" ], "header": "@@ -649,6 +676,29 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/GenericConstantActionFactory.java", "hunks": [ { "added": [ "\t * @param dropStatistics\t\tTRUE means we are here to drop statistics", "\t * @param dropStatisticsAll\tTRUE means we are here to drop statistics", "\t * \tof all the indexes. False means we are here to drop statistics of", "\t * \tonly one index.", "\t * @param indexNameForStatistics\tWill name the index whose statistics", "\t * \twill be updated/dropped. This param is looked at only if ", "\t * \tupdateStatisticsAll/dropStatisticsAll is set to false and", "\t * \tupdateStatistics/dropStatistics is set to true.", "\t * ." ], "header": "@@ -137,9 +137,15 @@ public class GenericConstantActionFactory", "removed": [ "\t * @param indexNameForUpdateStatistics\tWill name the index whose statistics", "\t * \twill be updated. This param is looked at only if updateStatisticsAll", "\t * \tis set to false." ] }, { "added": [ "\t\tboolean\t\t\t\t\t\tdropStatistics,", "\t\tboolean\t\t\t\t\t\tdropStatisticsAll,", "\t\tString\t\t\t\t\t\tindexNameForStatistics" ], "header": "@@ -161,7 +167,9 @@ public class GenericConstantActionFactory", "removed": [ "\t\tString\t\t\t\t\t\tindexNameForUpdateStatistics" ] } ] } ]
derby-DERBY-4115-c1e0f8ee
DERBY-4115/DERBY-5681: Provide a way to drop statistics information Moved upgrade test from BasicSetup to Changes10_9. Includes some simplifications that could be made because of the move. Patch file: derby-4115-7a-move_test.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1341059 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4116-02f8e49b
DERBY-4116 SYSCS_UTIL.SYSCS_UPDATE_STATISTICS should update the store estimated row count for the table git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@760497 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/execute/AlterTableConstantAction.java", "hunks": [ { "added": [ "\t\t//initialize numRows to -1 so we can tell if we scanned an index.\t", "\t\tlong numRows = -1;\t\t", "\t\t" ], "header": "@@ -645,7 +645,9 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction", "removed": [ "" ] }, { "added": [ "\t\t\tnumRows = 0;" ], "header": "@@ -702,7 +704,7 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction", "removed": [ "\t\t\tlong numRows = 0;" ] }, { "added": [ "\t\t\t\tgsc.setEstimatedRowCount(numRows);" ], "header": "@@ -744,6 +746,7 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction", "removed": [] }, { "added": [ "\t\t\t\t * to sys.sysstatstics" ], "header": "@@ -754,7 +757,7 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction", "removed": [ "\t\t\t\t * to sys.systatstics." ] } ] } ]
derby-DERBY-4119-cf2105b3
DERBY-4119: Compress on a large table fails with IllegalArgumentException - Illegal Capacity Use long arithmetic to prevent overflow in intermediate results when increasing the capacity of NodeAllocator. Also make sure that the Node array does not exceed maxSize, and allow the sort to continue even if a larger array cannot be allocated. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@760422 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/store/access/sort/NodeAllocator.java", "hunks": [ { "added": [ " // Calculate the new length. The new array should be no longer", " // than maxSize. Use a long for the intermediate result to prevent", " // newLength from going negative due to integer overflow when", " // array.length is close to Integer.MAX_VALUE.", " int newLength = (int) Math.min(", " (long) array.length * GROWTH_MULTIPLIER,", " (long) maxSize);", "", " // nodes available. The allocation may fail if there's", " // not enough memory to allocate a new array, or if the", " // JVM doesn't support that big arrays (some JVMs have", " // a limit on the array length that is different from", " // Integer.MAX_VALUE --- DERBY-4119).", " Node[] newArray;", " try {", " newArray = new Node[newLength];", " } catch (OutOfMemoryError oome) {", " // Could not allocate a larger array, so tell the caller that", " // there are no nodes available.", " return null;", " }" ], "header": "@@ -75,12 +75,29 @@ final class NodeAllocator", "removed": [ "\t\t\t// nodes available.", "\t\t\tNode[] newArray = new Node[array.length * GROWTH_MULTIPLIER];", "\t\t\tif (newArray == null)", "\t\t\t\treturn null;" ] }, { "added": [ " if (percent > 0) { // cannot shrink", " // Calculate the new maximum size. Use long arithmetic so that", " // intermediate results don't overflow and make maxSize go", " // negative (DERBY-4119).", " maxSize = (int) Math.min(", " (long) maxSize * (100 + percent) / 100,", " (long) Integer.MAX_VALUE);", " }" ], "header": "@@ -152,8 +169,14 @@ final class NodeAllocator", "removed": [ "\t\tif (percent > 0)\t\t// cannot shrink", "\t\t\tmaxSize = maxSize * (100+percent)/100;" ] } ] } ]
derby-DERBY-412-e214c072
DERBY-412 - Connection toString should show type information and the meaning of the identifier that it prints - Patch by David Van Couvering(David.Vancouvering@Sun.COM) git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@230523 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/jdbc/BrokeredConnection.java", "hunks": [ { "added": [ " private String connString;" ], "header": "@@ -51,6 +51,7 @@ public class BrokeredConnection implements Connection", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedConnection.java", "hunks": [ { "added": [ " private String connString;" ], "header": "@@ -145,7 +145,7 @@ public class EmbedConnection implements java.sql.Connection", "removed": [ " private String idString;" ] }, { "added": [ " * this connection. Include the same information that is", " * printed in the log for various trace and error messages." ], "header": "@@ -1919,7 +1919,8 @@ public class EmbedConnection implements java.sql.Connection", "removed": [ " * this connection" ] } ] }, { "file": "java/engine/org/apache/derby/jdbc/EmbedPooledConnection.java", "hunks": [ { "added": [ " /** the connection string */", " private String connString;" ], "header": "@@ -63,8 +63,8 @@ class EmbedPooledConnection implements javax.sql.PooledConnection, BrokeredConne", "removed": [ " /** String representation of id */", " private String idString;" ] } ] } ]
derby-DERBY-4122-5b9a6d9b
DERBY-4122: java/testing/org/apache/derbyTesting/unitTests/junit/ReaderToUTF8StreamTest.java. Added mark/reset functionality to ReaderToUTF8Stream. Made SQLClob use mark/reset to rewind the data stream when too many bytes are read as part of the stream header parsing. This happens when reading Clobs written with the pre-10.5 header format, either in soft or hard upgraded databases. Added a new error message. Added unit tests for mark/reset. Patch file: derby-4122-4c-classcast_fix_mark_reset.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@762384 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/types/ReaderToUTF8Stream.java", "hunks": [ { "added": [ "import org.apache.derby.shared.common.reference.MessageId;" ], "header": "@@ -30,6 +30,7 @@ import org.apache.derby.iapi.services.i18n.MessageService;", "removed": [] }, { "added": [ "//@NotThreadSafe" ], "header": "@@ -39,6 +40,7 @@ import org.apache.derby.iapi.services.sanity.SanityManager;", "removed": [] }, { "added": [ " * Constant indicating that no mark is set in the stream, or that the read", " * ahead limit of the mark has been exceeded.", " private final static int MARK_UNSET_OR_EXCEEDED = -1;", " /**", " * Buffer to hold the data read from stream and converted to the modified", " * UTF-8 format. The initial size is 32 KB, but it may grow if the", " * {@linkplain #mark(int)} is invoked.", " */", " private byte[] buffer = new byte[32*1024];", " /** Stream mark, set through {@linkplain #mark(int)}. */", " private int mark = MARK_UNSET_OR_EXCEEDED;", " /** Read ahead limit for mark, set through {@linkplain #mark(int)}. */", " private int readAheadLimit;" ], "header": "@@ -50,13 +52,22 @@ public final class ReaderToUTF8Stream", "removed": [ " * Size of buffer to hold the data read from stream and converted to the", " * modified UTF-8 format.", " private final static int BUFSIZE = 32768;", " private byte[] buffer = new byte[BUFSIZE];" ] }, { "added": [ "\t\tint off = startingOffset;", " // In the case of a mark, the offset may be adjusted.", " // Do not change boff in the encoding loop. Before the encoding loop", " // starts, it shall point at the next byte the stream will deliver on", " // the next iteration of read or skip.", " boff = 0;", " // If we have a mark set, see if we have to expand the buffer, or if we", " // are going to read past the read ahead limit and can invalidate the", " // mark and discard the data currently in the buffer.", " if (mark >= 0) {", " // Add 6 bytes reserved for one 3 byte character encoding and the", " // 3 byte Derby EOF marker (see encoding loop further down).", " int spaceRequired = readAheadLimit + 6;", " if (mark + spaceRequired > buffer.length) {", " if (blen != -1) {", " // Calculate the new offset, as we may have to shift bytes", " // we have already delivered to the left.", " boff = off = blen - mark;", " }", " byte[] oldBuf = buffer;", " if (spaceRequired > buffer.length) {", " // We have to allocate a bigger buffer to save the bytes.", " buffer = new byte[spaceRequired];", " }", " System.arraycopy(oldBuf, mark, buffer, 0, off);", " mark = 0;", " } else if (blen != -1) {", " // Invalidate the mark.", " mark = MARK_UNSET_OR_EXCEEDED;", " }", " }", "" ], "header": "@@ -298,11 +309,42 @@ public final class ReaderToUTF8Stream", "removed": [ "\t\tint off = boff = startingOffset;" ] }, { "added": [], "header": "@@ -332,8 +374,6 @@ public final class ReaderToUTF8Stream", "removed": [ "\t\tboff = 0;", "" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/types/SQLClob.java", "hunks": [ { "added": [ "import org.apache.derby.shared.common.reference.SQLState;", "" ], "header": "@@ -32,6 +32,8 @@ import org.apache.derby.iapi.services.io.StoredFormatIds;", "removed": [] }, { "added": [ " // Expect at least two header bytes.", " if (SanityManager.DEBUG) {", " SanityManager.ASSERT(read > 1,", " \"Too few header bytes: \" + read);", " }" ], "header": "@@ -331,6 +333,11 @@ public class SQLClob", "removed": [] }, { "added": [ " // Check here to see if the root cause is a container closed", " // exception. If so, this most likely means that the Clob was", " // accessed after a commit or rollback on the connection.", " Throwable rootCause = ioe;", " while (rootCause.getCause() != null) {", " rootCause = rootCause.getCause();", " }", " if (rootCause instanceof StandardException) {", " StandardException se = (StandardException)rootCause;", " if (se.getMessageId().equals(", " SQLState.DATA_CONTAINER_CLOSED)) {", " throw StandardException.newException(", " SQLState.BLOB_ACCESSED_AFTER_COMMIT, ioe);", " }", " }" ], "header": "@@ -346,6 +353,21 @@ public class SQLClob", "removed": [] }, { "added": [ " // For small values, just materialize the value.", " // NOTE: Using streams for the empty string (\"\") isn't supported", " // down this code path when in soft upgrade mode, because the code", " // reading the header bytes ends up reading zero bytes (i.e., it", " // doesn't get the header / EOF marker).", " if (vcl < 32*1024) {", " setValue(vc.getSubString(1, (int)vcl));", " } else {", " ReaderToUTF8Stream utfIn = new ReaderToUTF8Stream(", " vc.getCharacterStream(), (int) vcl, 0, TypeId.CLOB_NAME,", " getStreamHeaderGenerator());", " setValue(utfIn, (int) vcl);", " }" ], "header": "@@ -512,11 +534,19 @@ public class SQLClob", "removed": [ "", " ReaderToUTF8Stream utfIn = new ReaderToUTF8Stream(", " vc.getCharacterStream(), (int) vcl, 0, TypeId.CLOB_NAME,", " getStreamHeaderGenerator());", " setValue(utfIn, (int) vcl);" ] }, { "added": [ " final boolean markSet = stream.markSupported();", " if (markSet) {", " stream.mark(MAX_STREAM_HEADER_LENGTH);", " }", " // Expect at least two header bytes.", " if (SanityManager.DEBUG) {", " SanityManager.ASSERT(read > 1, \"Too few header bytes: \" + read);", " }", " // First see if we set a mark on the stream and can reset it.", " // If not, try using the Resetable interface.", " if (markSet) {", " // Stream is not a store Resetable one, use mark/reset", " // functionality instead.", " stream.reset();", " InputStreamUtil.skipFully(stream, hdrInfo.headerLength());", " } else if (stream instanceof Resetable) {", " // We have a store stream.", " rewindStream(hdrInfo.headerLength());", " }", " // Subtract the header length from the byte length if there is a byte", " // encoded in the header, otherwise the decode routine will try to read", " // too many bytes.", " int byteLength = 0; // zero is interpreted as unknown / unset", " if (hdrInfo.byteLength() != 0) {", " byteLength = hdrInfo.byteLength() - hdrInfo.headerLength();", " }", " super.readExternal(in, byteLength, hdrInfo.charLength());" ], "header": "@@ -658,17 +688,42 @@ public class SQLClob", "removed": [ " rewindStream(hdrInfo.headerLength());", " super.readExternal(in, hdrInfo.byteLength(), hdrInfo.charLength());" ] } ] }, { "file": "java/testing/org/apache/derbyTesting/unitTests/junit/_Suite.java", "hunks": [ { "added": [ " suite.addTest(ReaderToUTF8StreamTest.suite());" ], "header": "@@ -57,6 +57,7 @@ public class _Suite extends BaseTestCase {", "removed": [] } ] } ]
derby-DERBY-4124-2ca6c802
DERBY-4124: Remove double-checked-locking from EmbeddedDataSource.findDriver This change ensures that EmbeddedDataSource.findDriver always synchronizes its access to the 'driver' member variable. The previous code performed unsynchronized access to the field, and then, depending on the results of the unsynchronized access, performed synchronized access. This double-checked-locking idiom is unsafe and incorrect, and this change replaces it with a simpler always-synchronized access. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@833888 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/jdbc/EmbeddedDataSource.java", "hunks": [ { "added": [ "\t\tsynchronized(this)", "\t\t\t// The driver has either never been booted, or it has been", "\t\t\t// shutdown by a 'jdbc:derby:;shutdown=true'", "\t\t\tif (driver == null || !driver.acceptsURL(url))", "\t\t\t\tnew org.apache.derby.jdbc.EmbeddedDriver();", "\t\t\t\t// If we know the driver, we loaded it. Otherwise only", "\t\t\t\t// work if DriverManager has already loaded it.", "\t\t\t\tAutoloadedDriver\tautoloadedDriver =", "\t\t\t\t\t(AutoloadedDriver) DriverManager.getDriver(url);", "\t\t\t\tdriver = (InternalDriver) autoloadedDriver.getDriverModule();", "\t\t\t\t// DriverManager will throw an exception if it cannot find the driver" ], "header": "@@ -490,25 +490,22 @@ public class EmbeddedDataSource extends ReferenceableDataSource implements", "removed": [ "\t\tif (driver == null || !driver.acceptsURL(url))", "\t\t\tsynchronized(this)", "\t\t\t\t// The driver has either never been booted, or it has been", "\t\t\t\t// shutdown by a 'jdbc:derby:;shutdown=true'", "\t\t\t\tif (driver == null || !driver.acceptsURL(url))", "\t\t\t\t{", "\t\t\t\t\tnew org.apache.derby.jdbc.EmbeddedDriver();", "\t\t\t\t\t// If we know the driver, we loaded it. Otherwise only", "\t\t\t\t\t// work if DriverManager has already loaded it.", "\t\t\t\t\tAutoloadedDriver\tautoloadedDriver =", "\t\t\t\t\t\t(AutoloadedDriver) DriverManager.getDriver(url);", "\t\t\t\t\tdriver = (InternalDriver) autoloadedDriver.getDriverModule();", "\t\t\t\t\t// DriverManager will throw an exception if it cannot find the driver", "\t\t\t\t}" ] } ] } ]
derby-DERBY-4125-20e986d5
DERBY-4125: The in-memory storage back end doesn't work on Windows. Changed the in-memory storage factory to rely much more on java.io.File to handle paths and names. The data store now stores all files and directories with normalized paths. Also fixed a bug in DataStore.deleteEntry for deletion of directories. Added two more tests; testListChildren and testCreateRoot. Patch file: derby-4125-1a-improved_path_handling.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@759176 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/io/VFMemoryStorageFactory.java", "hunks": [ { "added": [ " store.purge();" ], "header": "@@ -58,7 +58,7 @@ public class VFMemoryStorageFactory", "removed": [ " store.deleteAll(\"/\");" ] }, { "added": [ " !new File(databaseName).isAbsolute()) {" ], "header": "@@ -104,11 +104,8 @@ public class VFMemoryStorageFactory", "removed": [ " // TODO: Is using java.io.File the right thing to do?", " // Should we just set the canonical name equal to the", " // specified database name instead?", " !databaseName.startsWith(String.valueOf(getSeparator()))) {" ] }, { "added": [ " } else if (!new File(dir).isAbsolute()) {" ], "header": "@@ -307,7 +304,7 @@ public class VFMemoryStorageFactory", "removed": [ " } else if (dir.charAt(0) != getSeparator()) {" ] } ] }, { "file": "java/engine/org/apache/derby/impl/io/vfmem/DataStore.java", "hunks": [ { "added": [ "import java.io.File;", "" ], "header": "@@ -21,6 +21,8 @@", "removed": [] }, { "added": [ " * <p>", " * A newly created data store doesn't contain a single existing directory." ], "header": "@@ -31,6 +33,8 @@ import org.apache.derby.io.StorageFile;", "removed": [] }, { "added": [], "header": "@@ -64,9 +68,6 @@ public final class DataStore {", "removed": [ " // Create the absolute root.", " createEntry(String.valueOf(SEP), true);", "" ] }, { "added": [ " // Normalize the path.", " final String nPath = new File(iPath).getPath();", " if (files.containsKey(nPath)) {", " String[] parents = getParentList(nPath);", " for (int i=parents.length -1; i >= 0; i--) {", " DataStoreEntry entry = (DataStoreEntry)files.get(parents[i]);", " DataStoreEntry newEntry = new DataStoreEntry(nPath, isDir);", " files.put(nPath, newEntry);" ], "header": "@@ -91,23 +92,24 @@ public final class DataStore {", "removed": [ " if (files.containsKey(iPath)) {", " String parent = PathUtil.getParent(iPath);", " while (parent != null) {", " DataStoreEntry entry = (DataStoreEntry)files.get(parent);", " parent = PathUtil.getParent(parent);", " DataStoreEntry newEntry = new DataStoreEntry(iPath, isDir);", " files.put(iPath, newEntry);" ] }, { "added": [ " final String nPath = new File(path).getPath();", " // Iterate through the list and create the missing parents.", " String[] parents = getParentList(nPath);", " for (int i=parents.length -1; i >= 0; i--) {", " String subPath = parents[i];", " // Fail if one of the parents is a regular file." ], "header": "@@ -119,25 +121,19 @@ public final class DataStore {", "removed": [ " if (path.charAt(path.length() -1) == SEP) {", " path = path.substring(0, path.length() -1);", " }", " // If there is no path separator, only one entry will be created.", " if (path.indexOf(SEP) == -1) {", " return true;", " }", " int index = path.indexOf(SEP, 1); // The root always exists", "", " while (index > 0) {", " String subPath = path.substring(0, index);", " index = path.indexOf(SEP, index +1);" ] }, { "added": [ " final String nPath = new File(iPath).getPath();", " entry = (DataStoreEntry)files.remove(nPath);", " String[] children = listChildren(nPath);", " if (children.length > 0) {", " files.put(nPath, entry);", " entry.release();" ], "header": "@@ -153,21 +149,20 @@ public final class DataStore {", "removed": [ " entry = (DataStoreEntry)files.remove(iPath);", " String[] children = listChildren(iPath);", " if (children == null || children.length == 0){", " entry.release();", " files.put(iPath, entry);", " } else {", " entry.release();" ] }, { "added": [ " // Use java.io.File to normalize the path.", " return (DataStoreEntry)files.get(new File(iPath).getPath());" ], "header": "@@ -182,7 +177,8 @@ public final class DataStore {", "removed": [ " return (DataStoreEntry)files.get(iPath);" ] }, { "added": [ " final String nPath = new File(iPath).getPath();", " DataStoreEntry entry = (DataStoreEntry)files.remove(nPath);", " return _deleteAll(nPath);" ], "header": "@@ -194,14 +190,15 @@ public final class DataStore {", "removed": [ " DataStoreEntry entry = (DataStoreEntry)files.remove(iPath);", " return _deleteAll(iPath);" ] }, { "added": [ " String nPath = new File(iPath).getPath();", " if (nPath.charAt(nPath.length() -1) != SEP) {", " nPath += SEP;" ], "header": "@@ -222,9 +219,10 @@ public final class DataStore {", "removed": [ " if (iPath.charAt(iPath.length() -1) != SEP) {", " iPath += SEP;" ] }, { "added": [ " if (candidate.startsWith(nPath)) {", " children.add(candidate.substring(nPath.length()));" ], "header": "@@ -232,8 +230,8 @@ public final class DataStore {", "removed": [ " if (candidate.startsWith(iPath)) {", " children.add(candidate.substring(iPath.length()));" ] }, { "added": [ " final String currentPath = new File(currentFile.getPath()).getPath();", " final String newPath = new File(newFile.getPath()).getPath();", " if (files.containsKey(newPath)) {", " files.remove(currentPath);", " files.put(newPath, current);", " /**", " * Purges the database and releases all files associated with it.", " */", " public void purge() {", " synchronized (LOCK) {", " Iterator fileIter = files.values().iterator();", " while (fileIter.hasNext()) {", " ((DataStoreEntry)fileIter.next()).release();", " }", " // Clear all the mappings.", " files.clear();", " }", " }", "", " * @param prefixPath the normalized root path to start deleting from", " //@GuardedBy(\"LOCK\")", " // Make sure the search path ends with the separator.", " if (prefixPath.charAt(prefixPath.length() -1) != SEP) {", " prefixPath += SEP;", " }" ], "header": "@@ -249,30 +247,51 @@ public final class DataStore {", "removed": [ " if (files.containsKey(newFile.getPath())) {", " files.remove(currentFile.getPath());", " files.put(newFile.getPath(), current);", " * @param prefixPath the root path to start deleting from" ] }, { "added": [ " entry.release();" ], "header": "@@ -289,9 +308,7 @@ public final class DataStore {", "removed": [ " if (!entry.isDirectory()) {", " entry.release();", " }" ] } ] } ]
derby-DERBY-4126-4d0fc78c
DERBY-4126: Find table functions even when they are in jar files in the database. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@759360 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/FromVTI.java", "hunks": [ { "added": [ " Class vtiClass = lookupClass( className );", " Class vtiCostingClass = lookupClass( VTICosting.class.getName() );", " try {", " if ( !vtiCostingClass.isAssignableFrom( vtiClass ) ) { return false; }" ], "header": "@@ -1632,12 +1632,11 @@ public class FromVTI extends FromTable implements VTIEnvironment", "removed": [ " Class vtiClass = null;", " ", " try {", " vtiClass = Class.forName( className );", " if ( !VTICosting.class.isAssignableFrom( vtiClass ) ) { return false; }" ] }, { "added": [ " String className = methodCall.getJavaClassName();", " Class vtiClass = lookupClass( className );", " " ], "header": "@@ -1669,9 +1668,10 @@ public class FromVTI extends FromTable implements VTIEnvironment", "removed": [ " String className = methodCall.getJavaClassName();", " Class vtiClass = Class.forName( className );" ] } ] } ]
derby-DERBY-4127-1cd97e82
DERBY-4127: Port 764471 from 10.5 branch to main. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@764481 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/catalog/DD_Version.java", "hunks": [ { "added": [ "import java.util.HashSet;" ], "header": "@@ -61,6 +61,7 @@ import java.io.ObjectInput;", "removed": [] }, { "added": [ " HashSet newlyCreatedRoutines = new HashSet();", " " ], "header": "@@ -364,6 +365,8 @@ public\tclass DD_Version implements\tFormatable", "removed": [] }, { "added": [ " tc,", " newlyCreatedRoutines,", " tc,", " newlyCreatedRoutines," ], "header": "@@ -391,23 +394,18 @@ public\tclass DD_Version implements\tFormatable", "removed": [ " tc, ", " if (fromMajorVersionNumber <= DataDictionary.DD_VERSION_DERBY_10_4)", " {", " // On ugrade from versions before 10.5, create system procedures", " // added in 10.5.", " bootingDictionary.create_10_5_system_procedures(tc);", " }", "", " tc, " ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/catalog/DataDictionaryImpl.java", "hunks": [ { "added": [ "import java.util.HashSet;" ], "header": "@@ -141,6 +141,7 @@ import java.util.Calendar;", "removed": [] }, { "added": [ "", " HashSet newlyCreatedRoutines = new HashSet();", " ", "\t\t\t\tcreate_SYSIBM_procedures(bootingTC, newlyCreatedRoutines );", " create_SYSCS_procedures(bootingTC, newlyCreatedRoutines );", " // now grant execute permission on some of these routines", " grantPublicAccessToSystemRoutines( newlyCreatedRoutines, bootingTC, authorizationDatabaseOwner );" ], "header": "@@ -710,15 +711,19 @@ public final class\tDataDictionaryImpl", "removed": [ "\t\t\t", "\t\t\t\tcreate_SYSIBM_procedures(bootingTC);", " create_SYSCS_procedures(bootingTC);" ] }, { "added": [ "" ], "header": "@@ -743,9 +748,7 @@ public final class\tDataDictionaryImpl", "removed": [ "\t\t\t\tSchemaDescriptor sd = locateSchemaRow(SchemaDescriptor.IBM_SYSTEM_SCHEMA_NAME,", "\t\t\t\t\t\t\t\t bootingTC);", "\t\t\t\tauthorizationDatabaseOwner = sd.getAuthorizationId();" ] }, { "added": [ "", " // now reset our understanding of who owns the database", " resetDatabaseOwner( tc );" ], "header": "@@ -6358,6 +6361,9 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " resetDatabaseOwner( tc );", " ", "\t/**", "\t *\tReset the database owner according to what is stored in the catalogs.", " * This can change at upgrade time so we have factored this logic into", " * a separately callable method.", "\t *", "\t *", "\t *\t@param\ttc\t\tTransactionController", " *", "\t * @exception StandardException\t\tThrown on error", "\t */", " public void resetDatabaseOwner( TransactionController tc )", " throws StandardException", " {", " SchemaDescriptor sd = locateSchemaRow", " (SchemaDescriptor.IBM_SYSTEM_SCHEMA_NAME, tc );", " authorizationDatabaseOwner = sd.getAuthorizationId();", " }", " " ], "header": "@@ -6899,9 +6905,29 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " * @param newlyCreatedRoutines evolving set of routines, some of which may need permissions later on" ], "header": "@@ -9390,6 +9416,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " HashSet newlyCreatedRoutines," ], "header": "@@ -9409,6 +9436,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines.add( routine_name );", "" ], "header": "@@ -9477,6 +9505,8 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " * @param newlyCreatedRoutines evolving set of routines, some of which may need permissions later on" ], "header": "@@ -9507,6 +9537,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " HashSet newlyCreatedRoutines,", " return_type, newlyCreatedRoutines, tc, \"org.apache.derby.catalog.SystemProcedures\");" ], "header": "@@ -9523,13 +9554,14 @@ public final class\tDataDictionaryImpl", "removed": [ " return_type, tc, \"org.apache.derby.catalog.SystemProcedures\");" ] }, { "added": [ " * @param newlyCreatedRoutines evolving set of routines which may need to be given permissions later on", " TransactionController tc, HashSet newlyCreatedRoutines )", "" ], "header": "@@ -9543,16 +9575,18 @@ public final class\tDataDictionaryImpl", "removed": [ " TransactionController tc)" ] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -9588,6 +9622,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -9615,9 +9650,8 @@ public final class\tDataDictionaryImpl", "removed": [ "", "\t\t\tcreateRoutinePermPublicDescriptor(routine_uuid, tc);" ] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -9632,6 +9666,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -9647,6 +9682,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -9662,6 +9698,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -9686,6 +9723,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -9712,6 +9750,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -9733,6 +9772,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -9754,9 +9794,8 @@ public final class\tDataDictionaryImpl", "removed": [ "", "\t\t\tcreateRoutinePermPublicDescriptor(routine_uuid, tc);" ] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -9777,9 +9816,8 @@ public final class\tDataDictionaryImpl", "removed": [ "", "\t\t\tcreateRoutinePermPublicDescriptor(routine_uuid, tc);" ] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -9810,6 +9848,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -9834,6 +9873,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -9851,6 +9891,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [], "header": "@@ -9859,8 +9900,6 @@ public final class\tDataDictionaryImpl", "removed": [ "", "\t\t\tcreateRoutinePermPublicDescriptor(routine_uuid, tc);" ] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -9893,6 +9932,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -9916,6 +9956,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -9938,6 +9979,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -9976,6 +10018,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10012,6 +10055,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10053,6 +10097,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10097,6 +10142,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines,", " create_10_1_system_procedures(tc, newlyCreatedRoutines, sysUtilUUID);", " create_10_2_system_procedures(tc, newlyCreatedRoutines, sysUtilUUID);", " create_10_3_system_procedures(tc, newlyCreatedRoutines );", " create_10_5_system_procedures(tc, newlyCreatedRoutines );" ], "header": "@@ -10132,17 +10178,18 @@ public final class\tDataDictionaryImpl", "removed": [ " create_10_1_system_procedures(tc, sysUtilUUID);", " create_10_2_system_procedures(tc, sysUtilUUID);", " create_10_3_system_procedures(tc);", " create_10_5_system_procedures(tc);" ] }, { "added": [ " * @param newlyCreatedRoutines evolving set of routines which we're adding (some may need permissions later on)", " TransactionController tc, HashSet newlyCreatedRoutines )" ], "header": "@@ -10153,13 +10200,14 @@ public final class\tDataDictionaryImpl", "removed": [ " TransactionController tc)" ] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10223,6 +10271,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10253,6 +10302,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10283,6 +10333,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10313,6 +10364,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10345,6 +10397,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10378,6 +10431,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10410,6 +10464,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10442,6 +10497,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10474,6 +10530,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10511,6 +10568,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10548,6 +10606,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10574,6 +10633,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10609,6 +10669,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10624,6 +10685,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " * done for some routines in SYSCS_UTIL schema. We grant access to routines", " * which we have just added. Doing it this way lets us declare these", " * routines in one place and re-use this logic during database creation and", " * during upgrade.", " public void grantPublicAccessToSystemRoutines(HashSet newlyCreatedRoutines, TransactionController tc, ", "", " String routineName = sysUtilProceduresWithPublicAccess[i];", " if ( !newlyCreatedRoutines.contains( routineName ) ) { continue; }", " ", " \t\t\t\troutineName, ", " ", " String routineName = sysUtilFunctionsWithPublicAccess[i];", " if ( !newlyCreatedRoutines.contains( routineName ) ) { continue; }", " ", " \t\t\t\troutineName, " ], "header": "@@ -10631,28 +10693,39 @@ public final class\tDataDictionaryImpl", "removed": [ " * done for some routines in SYSCS_UTIL schema. ", " public void grantPublicAccessToSystemRoutines(TransactionController tc, ", " \t\t\t\tsysUtilProceduresWithPublicAccess[i], ", " \t\t\t\tsysUtilFunctionsWithPublicAccess[i], " ] }, { "added": [ " //", " // When upgrading from 10.1, it can happen that we haven't yet created", " // all public procedures. We forgive that possibility here and just return.", " //", " if ( ad == null ) { return; }" ], "header": "@@ -10680,34 +10753,17 @@ public final class\tDataDictionaryImpl", "removed": [ " \t", " \tif (SanityManager.DEBUG) {", "\t\t\tSanityManager.ASSERT((ad != null), \"Failed to get AliasDescriptor\" ", "\t\t\t\t\t\t\t\t\t\t\t+ \" of the routine\");", " \t}", "\t/**", "\t * Create RoutinePermDescriptor to grant access to PUBLIC for", "\t * this system routine. Currently only SYSUTIL routines need access", "\t * granted to execute them when a database is created/upgraded.", "\t *", "\t * @param routineUUID uuid of the routine", "\t * @param tc\t\t\tTransactionController to use", "\t *", "\t * @exception StandardException Standard exception policy.", "\t */", "\tvoid createRoutinePermPublicDescriptor(", "\tUUID routineUUID,", "\tTransactionController tc) throws StandardException", "\t{", "\t\tcreateRoutinePermPublicDescriptor(routineUUID, tc, authorizationDatabaseOwner);", "\t}", "" ] }, { "added": [ " * @param tc booting transaction", " * @param newlyCreatedRoutines set of routines we are creating (used to add permissions later on)", " HashSet newlyCreatedRoutines," ], "header": "@@ -10740,12 +10796,15 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10786,9 +10845,8 @@ public final class\tDataDictionaryImpl", "removed": [ "", "\t\t\tcreateRoutinePermPublicDescriptor(routine_uuid, tc);" ] }, { "added": [ " * @param tc booting transaction", " * @param newlyCreatedRoutines set of routines we are creating (used to add permissions later on)", " HashSet newlyCreatedRoutines," ], "header": "@@ -10800,12 +10858,15 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10834,6 +10895,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10864,6 +10926,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10895,6 +10958,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10928,6 +10992,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " * @param newlyCreatedRoutines set of routines we are creating (used to add permissions later on)", " TransactionController tc,", " HashSet newlyCreatedRoutines )" ], "header": "@@ -10939,11 +11004,13 @@ public final class\tDataDictionaryImpl", "removed": [ " TransactionController tc)" ] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10963,6 +11030,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -10982,6 +11050,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11008,6 +11077,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11033,6 +11103,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11053,6 +11124,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11079,6 +11151,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11105,6 +11178,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11128,6 +11202,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11149,6 +11224,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11168,6 +11244,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11194,6 +11271,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11219,6 +11297,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11241,6 +11320,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11267,6 +11347,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11293,6 +11374,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11316,6 +11398,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " * @param newlyCreatedRoutines set of routines we are creating (used to add permissions later on)", " void create_10_5_system_procedures(TransactionController tc, HashSet newlyCreatedRoutines )" ], "header": "@@ -11325,9 +11408,10 @@ public final class\tDataDictionaryImpl", "removed": [ " void create_10_5_system_procedures(TransactionController tc)" ] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11356,9 +11440,8 @@ public final class\tDataDictionaryImpl", "removed": [ "", " createRoutinePermPublicDescriptor(routine_uuid, tc);" ] }, { "added": [ " * @param newlyCreatedRoutines set of routines we are creating (used to add permissions later on)", " void create_10_3_system_procedures(TransactionController tc, HashSet newlyCreatedRoutines ) ", " create_10_3_system_procedures_SYSCS_UTIL(tc, newlyCreatedRoutines );", " create_10_3_system_procedures_SYSIBM(tc, newlyCreatedRoutines );" ], "header": "@@ -11366,14 +11449,15 @@ public final class\tDataDictionaryImpl", "removed": [ " void create_10_3_system_procedures(TransactionController tc) ", " create_10_3_system_procedures_SYSCS_UTIL(tc);", " create_10_3_system_procedures_SYSIBM(tc);" ] }, { "added": [ " * @param newlyCreatedRoutines set of routines we are creating (used to add permissions later on)", " void create_10_3_system_procedures_SYSCS_UTIL( TransactionController tc, HashSet newlyCreatedRoutines )" ], "header": "@@ -11384,10 +11468,10 @@ public final class\tDataDictionaryImpl", "removed": [ " void create_10_3_system_procedures_SYSCS_UTIL(", " TransactionController tc)" ] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11429,6 +11513,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11470,6 +11555,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11509,6 +11595,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11555,6 +11642,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11571,6 +11659,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11589,6 +11678,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11606,6 +11696,7 @@ public final class\tDataDictionaryImpl", "removed": [] }, { "added": [ " newlyCreatedRoutines," ], "header": "@@ -11621,6 +11712,7 @@ public final class\tDataDictionaryImpl", "removed": [] } ] } ]
derby-DERBY-4137-849dab23
DERBY-4137: OOM issue using XA with timeouts Reduce the memory footprint when executing successful XA transactions that have a timeout set by nulling out the reference to the XATransactionState object in the timer task. The timer task will stay around in the timer queue until reaching the scheduled time - even if the task is canceled. Added a regression test (run as part of 'ant junit-lowmem'). Patch file: derby-4137-2a-reduce_memory_footprint.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1136363 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/jdbc/XATransactionState.java", "hunks": [ { "added": [ " private static class CancelXATransactionTask extends TimerTask {", " private XATransactionState xaState; ", "", " /**", " * Creates the cancellation task to be passed to a timer.", " *", " * @param xaState the XA state object for the transaction to cancel", " */", " public CancelXATransactionTask(XATransactionState xaState) {", " this.xaState = xaState;", " }", " ", " public boolean cancel() {", " // nullify reference to reduce memory footprint of canceled tasks", " xaState = null;", " return super.cancel();", " xaState.cancel(MessageId.CONN_XA_TRANSACTION_TIMED_OUT);" ], "header": "@@ -90,17 +90,29 @@ final class XATransactionState extends ContextImpl {", "removed": [ " private class CancelXATransactionTask extends TimerTask {", " /** Creates the cancelation object to be passed to a timer. */", " public CancelXATransactionTask() {", " XATransactionState.this.timeoutTask = this;", " XATransactionState.this.cancel(MessageId.CONN_XA_TRANSACTION_TIMED_OUT);" ] }, { "added": [ " timeoutTask = new CancelXATransactionTask(this);", " timer.schedule(timeoutTask, timeoutMillis);" ], "header": "@@ -313,10 +325,10 @@ final class XATransactionState extends ContextImpl {", "removed": [ " TimerTask cancelTask = new CancelXATransactionTask();", " timer.schedule(cancelTask, timeoutMillis);" ] } ] } ]
derby-DERBY-4141-31ba8d0a
DERBY-4141 XAExceptions thrown by Derby can have errorCode 0 git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@764495 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/net/NetXAResource.java", "hunks": [ { "added": [ " rc = getSqlExceptionXAErrorCode(sqle);" ], "header": "@@ -167,7 +167,7 @@ public class NetXAResource implements XAResource {", "removed": [ " rc = XAException.XAER_RMERR;" ] }, { "added": [ " /**", " * Get XAException.errorCode from SqlException", " * For disconnect exception, return XAER_RMFAIL", " * For other exceptions return XAER_RMERR", " * ", " * For server side SQLExceptions during ", " * XA operations the errorCode has already been determined", " * and wrapped in an XAException for return to the client.", " * see EmbedXAResource.wrapInXAException", " * ", " * @param sqle SqlException to evaluate.", " * @return XAException.XAER_RMFAIL for disconnect exception,", " * XAException.XAER_RMERR for other exceptions.", " */", " private int getSqlExceptionXAErrorCode(SqlException sqle) { ", " int seErrorCode = sqle.getErrorCode();", " return (seErrorCode == 40000 ? XAException.XAER_RMFAIL : XAException.XAER_RMERR);", " }", "" ], "header": "@@ -178,6 +178,25 @@ public class NetXAResource implements XAResource {", "removed": [] }, { "added": [ " rc = getSqlExceptionXAErrorCode(sqle);" ], "header": "@@ -228,7 +247,7 @@ public class NetXAResource implements XAResource {", "removed": [ " rc = XAException.XAER_RMERR;" ] }, { "added": [ " throwXAException(getSqlExceptionXAErrorCode(sqle));" ], "header": "@@ -290,7 +309,7 @@ public class NetXAResource implements XAResource {", "removed": [ " throwXAException(XAException.XAER_RMERR);" ] }, { "added": [ " rc = getSqlExceptionXAErrorCode(sqle);" ], "header": "@@ -371,7 +390,7 @@ public class NetXAResource implements XAResource {", "removed": [ " rc = XAException.XAER_RMERR;" ] }, { "added": [ " rc = getSqlExceptionXAErrorCode(sqle);" ], "header": "@@ -442,7 +461,7 @@ public class NetXAResource implements XAResource {", "removed": [ " rc = XAException.XAER_RMERR;" ] }, { "added": [ " rc = getSqlExceptionXAErrorCode(sqle);" ], "header": "@@ -495,7 +514,7 @@ public class NetXAResource implements XAResource {", "removed": [ " rc = XAException.XAER_RMERR;" ] }, { "added": [ " \trc = getSqlExceptionXAErrorCode(sqle);" ], "header": "@@ -569,7 +588,7 @@ public class NetXAResource implements XAResource {", "removed": [ " \trc = XAException.XAER_RMERR;" ] } ] }, { "file": "java/engine/org/apache/derby/jdbc/EmbedXAResource.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.error.ExceptionSeverity;" ], "header": "@@ -29,6 +29,7 @@ import javax.transaction.xa.XAResource;", "removed": [] } ] } ]
derby-DERBY-4145-7168d7da
DERBY-4145: Look for illegal references in generation clauses before binding the clauses. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@762520 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/TableElementList.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.dictionary.ColumnDescriptorList;" ], "header": "@@ -36,6 +36,7 @@ import org.apache.derby.iapi.types.TypeId;", "removed": [] }, { "added": [ "import java.util.ArrayList;", "import java.util.HashSet;" ], "header": "@@ -57,7 +58,9 @@ import org.apache.derby.iapi.sql.dictionary.ColumnDescriptor;", "removed": [] }, { "added": [ "\t * @param baseTable Table descriptor if this is an ALTER TABLE statement.", "\tvoid bindAndValidateGenerationClauses( SchemaDescriptor sd, FromList fromList, FormatableBitSet generatedColumns, TableDescriptor baseTable )" ], "header": "@@ -741,10 +744,11 @@ public class TableElementList extends QueryTreeNodeVector", "removed": [ "\tvoid bindAndValidateGenerationClauses( SchemaDescriptor sd, FromList fromList, FormatableBitSet generatedColumns )" ] }, { "added": [ " // complain if a generation clause references another generated column", " findIllegalGenerationReferences( fromList, baseTable );", "" ], "header": "@@ -753,6 +757,9 @@ public class TableElementList extends QueryTreeNodeVector", "removed": [] }, { "added": [ " ", "\t}", "", "\t/**", "\t * Complain if a generation clause references other generated columns. This", "\t * is required by the SQL Standard, part 2, section 4.14.8.", "\t *", "\t * @param fromList\t\tThe FromList in question.", "\t * @param baseTable Table descriptor if this is an ALTER TABLE statement.", "\t * @exception StandardException\t\tThrown on error", "\t */", "\tvoid findIllegalGenerationReferences( FromList fromList, TableDescriptor baseTable )", "\t\tthrows StandardException", "\t{", " ArrayList generatedColumns = new ArrayList();", " HashSet names = new HashSet();", "\t\tint size = size();", "", " // add in existing generated columns if this is an ALTER TABLE statement", " if ( baseTable != null )", " {", " ColumnDescriptorList cdl = baseTable.getGeneratedColumns();", " int count = cdl.size();", " for ( int i = 0; i < count; i++ )", " {", " names.add( cdl.elementAt( i ).getColumnName() );", " }", " }", " ", " // find all of the generated columns", "\t\t\tTableElementNode element = (TableElementNode) elementAt(index);" ], "header": "@@ -892,15 +899,40 @@ public class TableElementList extends QueryTreeNodeVector", "removed": [ " //", " // Now verify that none of the generated columns reference other", " // generated columns.", " //", " ResultColumnList rcl = table.getResultColumns();", "\t\t\tTableElementNode element = (TableElementNode) elementAt(index);" ] } ] } ]
derby-DERBY-4146-a8c09662
DERBY-4146: Bind a column descriptor to a generated result column for INSERTs without target lists. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@763230 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4149-58355bbd
DERBY-4149 test failure in jdbcapi.InternationalConnectSimpleDSTest fixture testSimpleDSConnect on IBM iseries - Database '?' not found. Fix the test so that it cleans up the database directory even if connection fails. This does not correct the actual test failure, just the cleanup. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@822027 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4149-6c42ee6d
DERBY-4149; test failure in jdbcapi.InternationalConnectSimpleDSTest - skipping the fixtures in InternationalConnectSimpleDSTest and InternationalConnectTest on iseries. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1058707 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4149-cd5b86ca
DERBY-4149; test failure in jdbcapi.InternationalConnectSimpleDSTest fixture testSimpleDSConnect on IBM iseries - Database '?' not found. reinstating the skipped test cases after fix of jvm on iseries git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1245283 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-415-32803bce
DERBY-415 : sysinfo with -cp client should not complain about DB2 Driver This change modifies the behavior of the "-cp client" argument to the sysinfo tool. Formerly, this argument checked for both the DerbyNetClient and the DB2 JCC driver, and complained if both drivers were not present. Increasingly, users of Derby are using just the DerbyNetClient, and the use of the JCC driver is less common, so it makes sense that "-cp client" should focus on the DerbyNetClient, and the DB2 JCC driver can be treated separately. So, "-cp client" now only checks for the DerbyNetClient, and a new argument "-cp db2driver" is added to check for the DB2 JCC driver. The new behavior is: java sysinfo -cp: checks all the various components of the classpath java sysinfo -cp client: just checks the network client java sysinfo -cp db2driver: just checks the JCC driver git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@431741 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/tools/org/apache/derby/impl/tools/sysinfo/Main.java", "hunks": [ { "added": [ "\tprivate static final String DB2DRIVER = \"db2driver\";", "\tprivate static final String USAGESTRINGPARTA = MAINUSAGESTRING + \" [ [ \" + EMBEDDED + \" ][ \" + NET + \" ][ \" + CLIENT + \"] [ \" + DB2DRIVER + \" ] [ \" + TOOLS + \" ] [ \";" ], "header": "@@ -454,12 +454,13 @@ public static void getMainInfo (java.io.PrintWriter aw, boolean pause) {", "removed": [ "\tprivate static final String USAGESTRINGPARTA = MAINUSAGESTRING + \" [ [ \" + EMBEDDED + \" ][ \" + NET + \" ][ \" + CLIENT + \"] [ \" + TOOLS + \" ] [ \";" ] }, { "added": [ "\t\t tryDB2DriverClasspath(successes, failures);" ], "header": "@@ -507,6 +508,7 @@ public static void getMainInfo (java.io.PrintWriter aw, boolean pause) {", "removed": [] }, { "added": [ "\t\tif (argumentsContain(args,DB2DRIVER)) {", "\t\t\ttryDB2DriverClasspath(successes, failures);", "\t\t\tseenArg =true;", "\t\t}" ], "header": "@@ -542,6 +544,10 @@ public static void getMainInfo (java.io.PrintWriter aw, boolean pause) {", "removed": [] }, { "added": [ " private static void tryDB2DriverClasspath(StringBuffer successes,", " StringBuffer failures)", " {", " tryMyClasspath(\"com.ibm.db2.jcc.DB2Driver\",", " Main.getTextMessage(\"SIF08.L\", \"db2jcc.jar\"),", " successes, failures);", " }" ], "header": "@@ -584,9 +590,15 @@ public static void getMainInfo (java.io.PrintWriter aw, boolean pause) {", "removed": [ "\t\ttryMyClasspath(\"com.ibm.db2.jcc.DB2Driver\", Main.getTextMessage(\"SIF08.L\", \"db2jcc.jar\"), successes, failures);" ] } ] } ]
derby-DERBY-4154-7feaf9c0
DERBY-4154 DboPowersTest should not hard upgrade the system/wombat database The patch makes the hard upgrade fixture use another database than "system/wombat", by wrapping the offending test cases in a singleUseDatabaseDecorator. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@788369 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4157-6409b407
DERBY-4157: Add a test for the integrity of metadata along all upgrade trajectories--the test must be run standalone and is not wired into our regression test suite. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@785826 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4157-e65365e6
DERBY-4157 (partial) Create a test to verify that virgin metadata is identical to hard-upgraded metadata Contributed by Rick Hillegas Attaching derby-4157-01-aa-refactor.diff. This patch is a first step toward writing the new test. This patch factors out some of the existing upgrade test machinery so that the new test can re-use that machinery: 1) logic to discover old versions 2) logic to create class loaders out of those old versions 3) logic to use those class loaders Touches the following files: A java/testing/org/apache/derbyTesting/functionTests/tests/upgradeTests/OldVersions.java A java/testing/org/apache/derbyTesting/functionTests/tests/upgradeTests/UpgradeClassLoader.java New classes to hold the logic which has been factored out. OldVersions used to be an inner class of _Suite. UpgradeClassLoader consists of code extracted from UpgradeRun and PhaseChanger. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@764912 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4160-fed9c08f
DERBY-4160: getMetaData().getIndexInfo crashes with "ERROR X0Y68: Column 'PARAM1' already exists." Use a shared code path for adding parameters to SYS.SYSCOLUMNS on the first compilation and subsequent compilations of a meta-data query. Previously, the first compilation took a different code path, but that caused problems if two threads compiled a meta-data query at the same time, and both threads thought they were first. Set a savepoint before attempting to write a stored prepared statement to the system tables in a nested transaction, and roll back to the savepoint if an error happens. This prevents partially stored prepared statements from lying around in the system tables after an error. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1570488 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/sql/dictionary/SPSDescriptor.java", "hunks": [ { "added": [ " private final boolean initiallyCompilable;" ], "header": "@@ -137,7 +137,7 @@ public class SPSDescriptor extends UniqueSQLObjectDescriptor", "removed": [ "\tprivate\tboolean\t\t\t\t\tinitiallyCompilable;" ] }, { "added": [ " final String savepoint = lcc.getUniqueSavepointName();", "" ], "header": "@@ -696,6 +696,8 @@ public class SPSDescriptor extends UniqueSQLObjectDescriptor", "removed": [] }, { "added": [ "", " // Set a savepoint so that the work in the nested", " // transaction can be rolled back on error without", " // aborting the parent transaction.", " nestedTC.setSavePoint(savepoint, null);" ], "header": "@@ -719,6 +721,11 @@ public class SPSDescriptor extends UniqueSQLObjectDescriptor", "removed": [] }, { "added": [], "header": "@@ -727,12 +734,6 @@ public class SPSDescriptor extends UniqueSQLObjectDescriptor", "removed": [ "\t\t\t\t// DERBY-2584: If the first attempt to compile the query fails,", "\t\t\t\t// we need to reset initiallyCompilable to make sure the", "\t\t\t\t// prepared plan is fully stored to disk. Save the initial", "\t\t\t\t// value here.", "\t\t\t\tfinal boolean compilable = initiallyCompilable;", "" ] }, { "added": [ " if (nestedTC != null)", " {", " // Roll back to savepoint to undo any work done by", " // the nested transaction. We cannot abort the nested", " // transaction in order to achieve the same, since", " // that would also abort the parent transaction.", " nestedTC.rollbackToSavePoint(savepoint, false, null);", " }", "", " if (nestedTC != null && se.isLockTimeout())" ], "header": "@@ -740,7 +741,16 @@ public class SPSDescriptor extends UniqueSQLObjectDescriptor", "removed": [ "\t\t\t\t\tif (se.isLockTimeout())" ] }, { "added": [ " nestedTC.commit();", " nestedTC.destroy();", " nestedTC = null;" ], "header": "@@ -748,18 +758,14 @@ public class SPSDescriptor extends UniqueSQLObjectDescriptor", "removed": [ "\t\t\t\t\t\tif (nestedTC != null)", "\t\t\t\t\t\t{", " nestedTC.commit();", " nestedTC.destroy();", " nestedTC = null;", "\t\t\t\t\t\t}", "\t\t\t\t\t\tinitiallyCompilable = compilable;" ] }, { "added": [], "header": "@@ -1106,24 +1112,6 @@ public class SPSDescriptor extends UniqueSQLObjectDescriptor", "removed": [ "\t\tboolean\t\t\t\t\tupdateSYSCOLUMNS, recompile;", "\t\tboolean firstCompilation = false;", "\t\tif (mode == RECOMPILE)", "\t\t{", "\t\t\trecompile = true;", "\t\t\tupdateSYSCOLUMNS = true;", "\t\t\tif(!initiallyCompilable)", "\t\t\t{", "\t\t\t\tfirstCompilation = true;", "\t\t\t\tinitiallyCompilable = true;", "\t\t\t}", "\t\t}", "\t\telse", "\t\t{", "\t\t\trecompile = false;", "\t\t\tupdateSYSCOLUMNS = false;", "\t\t}", "" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/catalog/DataDictionaryImpl.java", "hunks": [ { "added": [ " @Override", " boolean recompile)" ], "header": "@@ -4405,19 +4405,14 @@ public final class\tDataDictionaryImpl", "removed": [ "\t * @param updateParamDescriptors If true, will update the", "\t *\t\t\t\t\t\tparameter descriptors in SYS.SYSCOLUMNS.", "\t * @param firstCompilation true, if Statement is getting compiled for first", "\t * time and SPS was created with NOCOMPILE option.", "\t\t\tboolean recompile,", "\t\t\tboolean\t\t\t\t\tupdateParamDescriptors,", "\t\t\tboolean firstCompilation)" ] }, { "added": [ " updCols = new int[] {", " SYSSTATEMENTSRowFactory.SYSSTATEMENTS_VALID,", " SYSSTATEMENTSRowFactory.SYSSTATEMENTS_TEXT,", " SYSSTATEMENTSRowFactory.SYSSTATEMENTS_LASTCOMPILED,", " SYSSTATEMENTSRowFactory.SYSSTATEMENTS_USINGTEXT,", " SYSSTATEMENTSRowFactory.SYSSTATEMENTS_CONSTANTSTATE,", " };" ], "header": "@@ -4428,23 +4423,13 @@ public final class\tDataDictionaryImpl", "removed": [ "\t\t\tif(firstCompilation)", "\t\t\t{", "\t\t\t\tupdCols = new int[] {SYSSTATEMENTSRowFactory.SYSSTATEMENTS_VALID,", "\t\t\t\t\t\t SYSSTATEMENTSRowFactory.SYSSTATEMENTS_TEXT,", "\t\t\t\t\t\t\t\t\t SYSSTATEMENTSRowFactory.SYSSTATEMENTS_LASTCOMPILED,", "\t\t\t\t\t\t\t\t\t SYSSTATEMENTSRowFactory.SYSSTATEMENTS_USINGTEXT,", "\t\t\t\t\t\t\t\t\t SYSSTATEMENTSRowFactory.SYSSTATEMENTS_CONSTANTSTATE,", "\t\t\t\t\t\t\t\t\t SYSSTATEMENTSRowFactory.SYSSTATEMENTS_INITIALLY_COMPILABLE};", "\t\t\t}else", "\t\t\t{", "", "\t\t\t\tupdCols = new int[] {SYSSTATEMENTSRowFactory.SYSSTATEMENTS_VALID,", "\t\t\t\t\t\t SYSSTATEMENTSRowFactory.SYSSTATEMENTS_TEXT,", "\t\t\t\t\t\t\t\t\t\t SYSSTATEMENTSRowFactory.SYSSTATEMENTS_LASTCOMPILED,", "\t\t\t\t\t\t\t\t\t\t SYSSTATEMENTSRowFactory.SYSSTATEMENTS_USINGTEXT,", "\t\t\t\t\t\t\t\t\t\t SYSSTATEMENTSRowFactory.SYSSTATEMENTS_CONSTANTSTATE };", "\t\t\t}" ] }, { "added": [ " // If this is an invalidation request, we don't need to update the", " // parameter descriptors, so we are done.", " if (!recompile)" ], "header": "@@ -4480,11 +4465,9 @@ public final class\tDataDictionaryImpl", "removed": [ "\t\t/*", "\t\t** If we don't need to update the parameter", "\t\t** descriptors, we are done.", "\t\t*/", "\t\tif (!updateParamDescriptors)" ] } ] } ]
derby-DERBY-4166-a216dfd4
DERBY-4166 improvements to the mailjdbc test Contributed by Lily Wei lilywei@yahoo.com - Changed schema to allow multiple attachments. - Increase sleep time between threads to avoid deadlocks. - Restructure refresh thread to more accurately reflect mail retrieval. - maintain 10 - 56 rows during test to keep size predictable. - Make sure all transactions are commited or rolled back to avoid locking issues. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@921070 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/system/mailjdbc/tasks/Browse.java", "hunks": [ { "added": [ "\t\t\t\t//Try to avoid deadlock situation with delete from Refresh thread", "\t\t\t\tThread.sleep(100000);", "\t\t\t\t//Checking whether Refresh thread is running after doing Browse work", "\t\t\t\t//If Refresh is not running, interrupt the thread" ], "header": "@@ -48,10 +48,10 @@ public class Browse extends Thread {", "removed": [ "\t\t\t\tThread.sleep(30000);", "\t\t\t\t//Checking whther Refresh thread is running after doing the", "\t\t\t\t// work", "\t\t\t\t// If not interrupt the thread to do a refresh" ] } ] }, { "file": "java/testing/org/apache/derbyTesting/system/mailjdbc/tasks/Refresh.java", "hunks": [ { "added": [ "import java.sql.SQLException;" ], "header": "@@ -24,6 +24,7 @@ package org.apache.derbyTesting.system.mailjdbc.tasks;", "removed": [] }, { "added": [ "\t\t\t\t\tThread.sleep(150000);", "\t\t\t\t\tconn.commit();", "\t\t\t\t\tMailJdbc.logAct.logMsg(\"#### \" + getName()", "\t\t\t\t\t\t\t+ \"...commit connection...\");" ], "header": "@@ -50,10 +51,13 @@ public class Refresh extends Thread {", "removed": [ "\t\t\t\t\tThread.sleep(60000);" ] } ] }, { "file": "java/testing/org/apache/derbyTesting/system/mailjdbc/utils/DbTasks.java", "hunks": [ { "added": [ "\t" ], "header": "@@ -51,7 +51,7 @@ public class DbTasks {", "removed": [ "" ] }, { "added": [ "\t\t\tMailJdbc.logAct", "\t\t\t.logMsg(\" \\n*****************************************************\");", "\t\t\tMailJdbc.logAct", "\t\t\t.logMsg(\"\\n\\n\\tStarting the test in NetworkServer mode\");", "\t\t\tMailJdbc.logAct", "\t\t\t.logMsg(\"\\n\\n*****************************************************\");" ], "header": "@@ -81,6 +81,12 @@ public class DbTasks {", "removed": [] }, { "added": [ "\t\t\t" ], "header": "@@ -89,12 +95,7 @@ public class DbTasks {", "removed": [ "\t\t\tMailJdbc.logAct", "\t\t\t\t\t.logMsg(\" \\n*****************************************************\");", "\t\t\tMailJdbc.logAct", "\t\t\t\t\t.logMsg(\"\\n\\n\\tStarting the test in NetworkServer mode\");", "\t\t\tMailJdbc.logAct", "\t\t\t\t\t.logMsg(\"\\n\\n*****************************************************\");" ] }, { "added": [ "\tpublic static Connection getConnection(String usr, String passwd){" ], "header": "@@ -127,7 +128,7 @@ public class DbTasks {", "removed": [ "\tpublic static Connection getConnection(String usr, String passwd){" ] }, { "added": [ "\t\tStatement attach_stmt = null;", "\t\tint inbox_count = 0;", "\t\tint attach_count = 0;", "\t\t\tattach_stmt = conn.createStatement();", "\t\t\tResultSet rs1 = attach_stmt.executeQuery(Statements.getRowCountAttach);", "\t\t\t\tinbox_count = rs.getInt(1);", "\t\t\t\tattach_count = rs1.getInt(1);", "\t\t\tif (inbox_count == 0)", "\t\t\t\t\t\t\t+ inbox_count);", "\t\t\t\t\t+ \"The number of mails in the REFRESH.ATTACH are : \"", "\t\t\t\t\t+ attach_count);", "\t\t\tattach_stmt.close();" ], "header": "@@ -152,41 +153,41 @@ public class DbTasks {", "removed": [ "\t\tStatement stmt1 = null;", "\t\tint count = 0;", "\t\tint count1 = 0;", "\t\t\tstmt1 = conn.createStatement();", "\t\t\tResultSet rs1 = stmt1.executeQuery(Statements.getRowCountAtach);", "\t\t\t\tcount = rs.getInt(1);", "\t\t\t\tcount1 = rs1.getInt(1);", "\t\t\tif (count == 0)", "\t\t\t\t\t\t\t+ count);", "\t\t\t\t\t+ \"The number of mails in the attachment table are : \"", "\t\t\t\t\t+ count1);", "\t\t\tstmt1.close();" ] }, { "added": [ "\t\t\tint inbox_id = 0;", "\t\t\tif((inbox_count - 1) <= 0)", "\t\t\t\tinbox_id = 1;", "\t\t\telse {", "\t\t\t\t inbox_id = Rn.nextInt(inbox_count - 1);", "\t\t\t\t if (inbox_id == 0)", "\t\t\t\t\t inbox_id = 1;", "\t\t\t}", "\t\t\tlong start_t = System.currentTimeMillis();", "", "\t\t\tResultSet inbox_rs = stmt", "\t\t\t\t\t.executeQuery(\"select message from REFRESH.INBOX where id = \"", "\t\t\t\t\t\t\t+ inbox_id);", "\t\t\t", "\t\t\tif (inbox_rs.next()) {", "\t\t\t\tsize = inbox_rs.getClob(1).length();", "\t\t\t\tMailJdbc.logAct.logMsg(LogFile.INFO + thread_name + \" : \"", "\t\t\t\t\t\t+ \"size of the message for id \" + inbox_id + \" is : \"", "\t\t\t\t\t\t+ size);", "\t\t\t} else", "\t\t\t\tMailJdbc.logAct.logMsg(LogFile.INFO + thread_name + \" : \"", "\t\t\t\t\t\t+ \"mail with the id \" + inbox_id + \" does not exist\");", "\t\t\tlong end_t = System.currentTimeMillis();", "\t\t\tlog.logMsg(LogFile.INFO + thread_name + \" : \"", "\t\t\t\t\t+ \"Time taken to get the clob :\"", "\t\t\t\t\t+ PerfTime.readableTime(end_t - start_t));", "\t\t\tinbox_rs.close();", "\t\t\tResultSet attach_rs = stmt", "\t\t\t\t\t\t\t+ inbox_id);", "\t\t\tif (attach_rs.next()) {", "\t\t\t\tsize = attach_rs.getBlob(1).length();", "\t\t\t\t\t\t+ \"size of the attachment for id \" + inbox_id", "\t\t\tattach_rs.close();", " \t\t\tstmt.close();" ], "header": "@@ -196,47 +197,51 @@ public class DbTasks {", "removed": [ "\t\t\tint attach_id = 0;", "\t\t\tif((count - 1) <= 0)", "\t\t\t\tattach_id = 0;", "\t\t\telse", "\t\t\t\t attach_id = Rn.nextInt(count - 1);", "\t\t\tResultSet rs = stmt", "\t\t\t\t\t\t\t+ attach_id);", "\t\t\tif (rs.next()) {", "\t\t\t\tsize = rs.getBlob(1).length();", "\t\t\t\t\t\t+ \"size of the attachment for id \" + attach_id", "\t\t\trs.close();", "", "\t\t\trs = stmt", "\t\t\t\t\t.executeQuery(\"select message from REFRESH.INBOX where id = \"", "\t\t\t\t\t\t\t+ attach_id);", "\t\t\tlong start_t = System.currentTimeMillis();", "\t\t\tif (rs.next()) {", "\t\t\t\tsize = rs.getClob(1).length();", "\t\t\t\tMailJdbc.logAct.logMsg(LogFile.INFO + thread_name + \" : \"", "\t\t\t\t\t\t+ \"size of the message for id \" + attach_id + \" is : \"", "\t\t\t\t\t\t+ size);", "\t\t\t} else", "\t\t\t\tMailJdbc.logAct.logMsg(LogFile.INFO + thread_name + \" : \"", "\t\t\t\t\t\t+ \"mail with the id \" + attach_id + \" does not exist\");", "\t\t\tlong end_t = System.currentTimeMillis();", "\t\t\tlog.logMsg(LogFile.INFO + thread_name + \" : \"", "\t\t\t\t\t+ \"Time taken to get the clob :\"", "\t\t\t\t\t+ PerfTime.readableTime(end_t - start_t));", "\t\t\trs.close();", "\t\t\tstmt.close();" ] }, { "added": [ "\t\t// Delete done by the user. The user will mark the mails to be deleted", "\t", "\t\t\t\t\t.executeQuery(\"select max(id) from REFRESH.INBOX \");", "\t\t\trs.close();", "\t\t\tstmt.close();", "\t\t\t{", "\t\t\t id = Rn.nextInt(id_count - 1);", "\t\t\t if (id == 0)", "\t\t\t \tid = 1;", "\t\t\t}", "\t\t\t\tupdateUser.setInt(2, i);", "\t\t\t\t\t+ \" Time taken to mark \" + id + \" mails to be deleted :\"", "\t\t\t\t\t+ \"The number of mails marked to be deleted from REFRESH.INBOX is: \"", "\t\t\t\t\t+ \"Exception while deleting mail REFRESH.INBOX\"" ], "header": "@@ -259,52 +264,51 @@ public class DbTasks {", "removed": [ "\t\t// Delete done by the user. Thre user will mark the mails to be deleted", "\t\t// and then", "\t\tint for_id = 0;", "\t\t\t\t\t.executeQuery(\"select max(id)from REFRESH.INBOX \");", "\t\t\t\tid = Rn.nextInt(id_count - 1);", "\t\t\t\tif((id_count -1) <= 0 )", "\t\t\t\t\tfor_id = id_count;", "\t\t\t\telse", "\t\t\t\t\tfor_id = Rn.nextInt(id_count - 1);", "\t\t\t\tupdateUser.setInt(2, for_id);", "\t\t\t\t\t+ \" Time taken to mark the mails to be deleted :\"", "\t\t\t\t\t+ \"The number of mails marked to be deleted by user:\"", "\t\t\trs.close();", "\t\t\tstmt.close();", "\t\t\t\t\t+ \"Exception while deleting mail by user: \"" ] }, { "added": [], "header": "@@ -325,8 +329,6 @@ public class DbTasks {", "removed": [ "\t\t\tResultSet rs = stmt", "\t\t\t\t\t.executeQuery(\"select count(*) from REFRESH.INBOX where to_delete=1\");" ] }, { "added": [ "\t\t\t\t\t+ \" rows deleted from REFRESH.INBOX\");", "\t\t\t\t\t+ \"Exception while deleting mail from REFRESH.INBOX, REFRESH.ATTACH by Thread: \"" ], "header": "@@ -335,15 +337,14 @@ public class DbTasks {", "removed": [ "\t\t\t\t\t+ \" rows deleted\");", "\t\t\trs.close();", "\t\t\t\t\t+ \"Exception while deleting mail by Thread: \"" ] }, { "added": [ " //If there is zero row, nothing to do\t", "\t\t\t\tif (count==0)", "\t\t\t\t\treturn;", "\t\t\t\t//If there is just one row, id is 1 (start from 1)", "\t\t\t\tif (count <= 1) ", "\t\t\t\t\tmessage_id = 1;", "\t\t\t\tif (message_id == 0)", "\t\t\t\t\tmessage_id = 1;", "\t\t\t\t//Assign the folder to be between 1 to 5", "\t\t\t\tint folder_id = Rn.nextInt(5 - 1);", "\t\t\t\tif (folder_id == 0)", "\t\t\t\t\tfolder_id = 1;" ], "header": "@@ -371,11 +372,20 @@ public class DbTasks {", "removed": [ "\t\t\t\tint folder_id = Rn.nextInt(5 - 1);", "\t\t\t\tif (count == 0)", "\t\t\t\t\tmessage_id = 0;" ] }, { "added": [ "\t\t\t\t\t\t+ \" is moved to folder\" + folder_id);" ], "header": "@@ -386,7 +396,7 @@ public class DbTasks {", "removed": [ "\t\t\t\t\t\t+ \" is moved to folder with id : \" + folder_id);" ] }, { "added": [ "\t\t// Inserting rows to the inbox table. ", "\t\t// inbox table would have random attachments - (attach table)", "\t\t// The random attachment depends on no of rows id in inbox" ], "header": "@@ -408,11 +418,9 @@ public class DbTasks {", "removed": [ "\t\t// Inserting rows to the inbox table. Making attach_id of randomly", "\t\t// selected rows to be one", "\t\t// and for those rows inserting blobs in the attach table", "\t\tStatement stmt = conn.createStatement();", "\t\tint num = Rn.nextInt(10 - 1);" ] }, { "added": [ "\t\t\tlong total_ins_inb = 0;", "\t\t\tlong total_ins_att = 0;", "\t\t\tint row_count = 0;", "\t\t\tint num = Rn.nextInt(10 - 1);", "\t\t\tif (num == 0) ", "\t\t\t\tnum = 1;", "\t\t\t\tlong s_insert = System.currentTimeMillis();" ], "header": "@@ -423,8 +431,14 @@ public class DbTasks {", "removed": [ "\t\t\tlong s_insert = System.currentTimeMillis();" ] }, { "added": [ "\t\t\t\t\t\t\t\t5,", "\t\t\t\tstreamReader.close();", "\t\t\t\tlong e_insert = System.currentTimeMillis();", "\t\t\t\ttotal_ins_inb = total_ins_inb + (e_insert - s_insert);", "\t\t\t\tPreparedStatement insertAttach = conn", "\t\t\t\tStatement stmt1 = conn.createStatement();", "\t\t\t\tResultSet rs = insertFirst.getGeneratedKeys();", "\t\t\t\t//10/1 chance to have attactment", "\t\t\t\tint numa = Rn.nextInt(10 - 1);", "\t\t\t\tif (numa == 0)", "\t\t\t\t\tnuma = 1;", "\t\t\t\tif (i == numa) {", "\t\t\t\t\tint attachid = 0;", "\t\t\t\twhile (rs.next()) {", "\t\t\t\t\tattachid = rs.getInt(1);\t", "\t\t\t\t// insert from 1 to 5 attachments", "\t\t\t\tint num_attach = Rn.nextInt(5 - 1);", "\t\t\t\tif (num_attach == 0)", "\t\t\t\t\tnum_attach = 1;", "\t\t\t\tfor (int j = 0; j < num_attach; j++) { ", "\t\t\t\t\tlong a_start = System.currentTimeMillis();", "\t\t\t\t\tinsertAttach.setInt(1, attachid);", "\t\t\t\t\t//attach_id should be automatically generated", "\t\t\t\t\ttry {", "\t\t\t\t\t\t// to create a stream of random length between 0 and 5M", "\t\t\t\t\t\tint blobLength = Rn.nextInt(5130000 - 0 + 1) + 0;", "\t\t\t\t\t\tstreamIn = new LoopingAlphabetStream(blobLength);", "\t\t\t\t\t\tinsertAttach.setBinaryStream(2, streamIn, blobLength);", "\t\t\t\t\t} catch (Exception e) {", "\t\t\t\t\t\tMailJdbc.logAct.logMsg(LogFile.ERROR + thread_name + \" : \"", "\t\t\t\t\t\t\t+ \"Exception : \" + e.getMessage());", "\t\t\t\t\t\terrorPrint(e);", "\t\t\t\t\t\tthrow e;", "\t\t\t\t\t}", "\t\t\t\t\tint result_attach = insertAttach.executeUpdate();", "\t\t\t\t\tstreamIn.close();", "\t\t\t\t\tif (result_attach != 0) {", "\t\t\t\t\t\tblob_count = blob_count + 1;", "\t\t\t\t\t\trow_count++;", "\t\t\t\t\t}", "\t\t\t\t\tlong a_end = System.currentTimeMillis();", "\t\t\t\t\ttotal_ins_att = total_ins_att + (a_end - a_start);", "\t\t\t\t }", "\t\t\t} ", "\t\t\tlog.logMsg(LogFile.INFO + thread_name + \" : \"", "\t\t\t\t\t+ \"Time taken to insert \" + num + \" rows to REFRESH.INBOX :\"", "\t\t\t\t\t+ PerfTime.readableTime(total_ins_inb));", "\t\t\tlog.logMsg(LogFile.INFO + thread_name + \" : \"", "\t\t\t\t\t+ \"Time taken to insert \" + row_count + \" rows to REFRESH.ATTACH :\"\t\t\t", "\t\t\t\t\t+ PerfTime.readableTime(total_ins_att));", "\t\t\tinsertFirst.close();", "\t\t}", "\t\tcatch (SQLException sqe) {", "\t\t\tMailJdbc.logAct.logMsg(LogFile.ERROR + thread_name + \" : \"", "\t\t\t\t\t+ \"Error while inserting REFRESH.ATTACH:\" + sqe.getMessage());" ], "header": "@@ -445,93 +459,78 @@ public class DbTasks {", "removed": [ "\t\t\t\tint rand_num = Rn.nextInt(10 - 1);", "\t\t\t\tif (i == rand_num) {", "\t\t\t\t\tResultSet rs = stmt", "\t\t\t\t\t\t\t.executeQuery(\"select count(*) from REFRESH.INBOX where attach_id>0\");", "\t\t\t\t\twhile (rs.next()) {", "\t\t\t\t\t\tid_count = rs.getInt(1);", "\t\t\t\t\t\tinsertFirst.setInt(5, rs.getInt(1) + 1);", "\t\t\t\t\t}", "", "\t\t\t\t\trs.close();", "\t\t\t\t\tconn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);", "\t\t\t\t} else", "\t\t\t\t\tinsertFirst.setInt(5, 0);", "\t\t\t\t\t\t\t\t6,", "\t\t\t}", "\t\t\tinsertFirst.close();", "\t\t\tconn.commit();", "\t\t\tstreamReader.close();", "", "\t\t\tlong e_insert = System.currentTimeMillis();", "\t\t\tlog.logMsg(LogFile.INFO + thread_name + \" : \"", "\t\t\t\t\t+ \"Time taken to insert \" + num + \"rows :\"", "\t\t\t\t\t+ PerfTime.readableTime(e_insert - s_insert));", "\t\t\tMailJdbc.logAct.logMsg(LogFile.INFO + thread_name + \" : \"", "\t\t\t\t\t+ \"number of mails inserted : \" + num);", "\t\t} catch (SQLException sqe) {", "\t\t\tMailJdbc.logAct.logMsg(LogFile.INFO + thread_name + \" : \"", "\t\t\t\t\t+ \"Error while inserting into REFRESH.INBOX:\"", "\t\t\t\t\t+ sqe.getMessage());", "\t\t\tsqe.printStackTrace();", "\t\t\terrorPrint(sqe);", "\t\t\tconn.rollback();", "\t\t\tthrow sqe;", "\t\t}", "\t\ttry {", "\t\t\tPreparedStatement insertAttach = conn", "\t\t\tStatement stmt1 = conn.createStatement();", "\t\t\tResultSet rs = stmt1", "\t\t\t\t\t.executeQuery(\"select id,attach_id from REFRESH.INBOX where attach_id >\"", "\t\t\t\t\t\t\t+ id_count);", "\t\t\tint row_count = 0;", "\t\t\tlong a_start = System.currentTimeMillis();", "\t\t\twhile (rs.next()) {", "\t\t\t\tinsertAttach.setInt(1, rs.getInt(1));", "\t\t\t\tinsertAttach.setInt(2, rs.getInt(2));", "\t\t\t\ttry {", "\t\t\t\t\t// to create a stream of random length between 0 and 5M", "\t\t\t\t\tint blobLength = Rn.nextInt(5130000 - 0 + 1) + 0;", "\t\t\t\t\tstreamIn = new LoopingAlphabetStream(blobLength);", "\t\t\t\t\tinsertAttach.setBinaryStream(3, streamIn, blobLength);", "\t\t\t\t} catch (Exception e) {", "\t\t\t\t\tMailJdbc.logAct.logMsg(LogFile.ERROR + thread_name + \" : \"", "\t\t\t\t\t\t\t+ \"Exception : \" + e.getMessage());", "\t\t\t\t\terrorPrint(e);", "\t\t\t\t\tthrow e;", "\t\t\t\tint result_attach = insertAttach.executeUpdate();", "\t\t\t\tstreamIn.close();", "\t\t\t\tif (result_attach != 0) {", "\t\t\t\t\tblob_count = blob_count + 1;", "\t\t\t\t\trow_count++;", "", "\t\t\t}", "\t\t\tlong a_end = System.currentTimeMillis();", "\t\t\tlog.logMsg(LogFile.INFO + thread_name + \" : \"", "\t\t\t\t\t+ \"Time taken to insert \" + row_count + \"attachments :\"", "\t\t\t\t\t+ PerfTime.readableTime(a_end - a_start));", "\t\t\tstmt.close();", "\t\t} catch (SQLException sqe) {", "\t\t\tMailJdbc.logAct.logMsg(LogFile.INFO + thread_name + \" : \"", "\t\t\t\t\t+ \"Error while inserting attachments:\" + sqe.getMessage());" ] }, { "added": [ "\t\t\t\t+ \"delete mails which are older than 1 day with sleep 250000 \");", "\t\t\tselExp.close();", "\t\t\tconn.commit();", "\t\t\t\t\t+ PerfTime.readableTime(s_delExp) + \" : \"", "\t\t\t\t\t+ \"Time taken to delete \" + count + \" mails :\"" ], "header": "@@ -550,22 +549,20 @@ public class DbTasks {", "removed": [ "\t\t\tPreparedStatement deleteExp = conn", "\t\t\t\t\t.prepareStatement(Statements.delExp);", "\t\t\t\t\t+ \"delete mails which are older than 1 day\");", "\t\t\t\t\t+ \"Time taken to delete \" + count + \"mails :\"", "\t\t\tdeleteExp.close();", "\t\t\tselExp.close();", "\t\t\tconn.commit();" ] }, { "added": [ "\t\t\t\t+ \" \" + PerfTime.readableTime(e_compress - s_compress));" ], "header": "@@ -651,7 +648,7 @@ public class DbTasks {", "removed": [ "\t\t\t\t+ PerfTime.readableTime(e_compress - s_compress));" ] }, { "added": [ "\t\t\t//Generate the random number between (count - 36)==>24 to 1", "\t\t\tif (count > 50) {", "\t\t\t\tdiff = Rn.nextInt((count - 36) - 1);", "\t\t\t\tif (diff == 0) ", "\t\t\t\t\tdiff = 1;" ], "header": "@@ -670,13 +667,15 @@ public class DbTasks {", "removed": [ "\t\t\tStatement stmt3 = conn.createStatement();", "\t\t\tif (count > 12) {", "\t\t\t\tdiff = count - 12;" ] }, { "added": [ "\t\t\t\t\t\t\t+ stmt2" ], "header": "@@ -685,7 +684,7 @@ public class DbTasks {", "removed": [ "\t\t\t\t\t\t\t+ stmt3" ] }, { "added": [], "header": "@@ -697,7 +696,6 @@ public class DbTasks {", "removed": [ "\t\t\tstmt3.close();" ] } ] }, { "file": "java/testing/org/apache/derbyTesting/system/mailjdbc/utils/Statements.java", "hunks": [ { "added": [ "\tpublic static String insertStr = \"insert into REFRESH.INBOX(from_name,to_name,date,Message,size_problem) values (?,?,?,?,?)\";", "\t", "\tpublic static String insertStrAttach = \"insert into REFRESH.ATTACH (id,attachment) values (?,?)\";", "\tpublic static String deleteStr = \"delete from REFRESH.INBOX where to_delete = 1\";", "\t", "\t", "\tpublic static String getRowCount = \"select count(*) from REFRESH.INBOX\";", "\tpublic static String getRowCountAttach = \"select count(*) from REFRESH.ATTACH\";" ], "header": "@@ -23,15 +23,17 @@ package org.apache.derbyTesting.system.mailjdbc.utils;", "removed": [ "\tpublic static String insertStr = \"insert into REFRESH.INBOX(from_name,to_name,date,Message,attach_id,size_problem) values (?,?,?,?,?,?)\";", "", "\tpublic static String deleteStr = \"delete from REFRESH.INBOX where to_delete = 1\";", "\tpublic static String getRowCount = \"select count(*)from REFRESH.INBOX\";", "", "\tpublic static String getRowCountAtach = \"select count(*) from REFRESH.ATTACH\";" ] }, { "added": [], "header": "@@ -47,12 +49,8 @@ public class Statements {", "removed": [ "\tpublic static String insertStrAttach = \"insert into REFRESH.ATTACH (id,attach_id,attachment) values (?,?,?)\";", "", "\tpublic static String getRowCountAttach = \"select count(*) from REFRESH.ATTACH\";", "" ] } ] }, { "file": "java/testing/org/apache/derbyTesting/system/mailjdbc/utils/ThreadUtils.java", "hunks": [ { "added": [ "\tpublic static ThreadUtils threadutil = new ThreadUtils();", "\t\tthreadutil.run();" ], "header": "@@ -34,11 +34,11 @@ import org.apache.derbyTesting.system.mailjdbc.tasks.Refresh;", "removed": [ "\tpublic static ThreadUtils util = new ThreadUtils();", "\t\tutil.run();" ] }, { "added": [ "\t\t\tint sleep_time = (int) 150000; //Due the cascade constriant", "\t\t\t\t\t\t\t\t\t // This is the number that", "\t\t\t\t\t\t\t\t\t\t // make sure insert attachment has been finished", "\t\t\tMailJdbc.logAct.logMsg(LogFile.INFO + \"Started: \" + t.getName() + \" with 150000 sleep time\");" ], "header": "@@ -56,12 +56,12 @@ public class ThreadUtils {", "removed": [ "\t\t\tint sleep_time = (int) (Math.random() * 500);//TODO : Change this", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t // number to a", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t // meaningful one)", "\t\t\tMailJdbc.logAct.logMsg(LogFile.INFO + \"Started: \" + t.getName());" ] }, { "added": [ "\t\t} if (name.equalsIgnoreCase(\"Purging Thread\")) {", "\t\t\treturn (Purge) userThreads.get(2);", "\t\treturn threadutil;" ], "header": "@@ -90,12 +90,14 @@ public class ThreadUtils {", "removed": [ "\t\treturn util;" ] } ] } ]
derby-DERBY-4166-d9e100c4
DERBY-4166 (partial) improvements to the mailjdbc test Make databaseSize a recursive method to calculate the disk usage properly git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@795310 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/system/mailjdbc/utils/DbTasks.java", "hunks": [ { "added": [ "\t\tlong dbsize = databaseSize(new File(\"mailsdb\"));" ], "header": "@@ -621,7 +621,7 @@ public class DbTasks {", "removed": [ "\t\tlong dbsize = databaseSize(\"mailsdb/seg0\");" ] }, { "added": [ "\t\tdbsize = databaseSize(new File(\"mailsdb\"));" ], "header": "@@ -652,7 +652,7 @@ public class DbTasks {", "removed": [ "\t\tdbsize = databaseSize(\"mailsdb/seg0\");" ] }, { "added": [ "\tpublic static long databaseSize(File dbname) {", "\t long length = 0;", "\t if (dbname.isDirectory()) {", "\t String[] children = dbname.list();", "\t for (int i=0; i<children.length; i++) ", "\t length = length + databaseSize(new File(dbname, children[i]));", "\t return length;", "\t }", "\t else", "\t return dbname.length();\t" ], "header": "@@ -746,16 +746,16 @@ public class DbTasks {", "removed": [ "\tpublic static long databaseSize(String dbname) {", "\t\tFile dir = new File(dbname);", "\t\tFile[] files = dir.listFiles();", "\t\tlong length = 0;", "\t\tint count = 0;", "\t\tfor (int i = 0; i < files.length; i++) {", "\t\t\tlength = length + files[i].length();", "\t\t\tcount++;", "\t\t}", "\t\treturn length;" ] } ] } ]
derby-DERBY-417-fcfc9254
DERBY-417: Remove hardcoded references to encryption providers in Beetle6038 and let Derby choose the correct encryption provider. This allows the test to pass on Mac OS X and any other JVM where the vendor is not Sun or IBM. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@380287 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4171-5bef31e2
DERBY-4171: Connections to on-disk db go to in-memory db if in-memory db with same name is booted. If the storage factory isn't the default one (DIRECTORY), don't allow connections without a subsubprotocol specified. Added a test. Patch file: derby-4171-1b-fix.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@782954 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4175-84fa650c
DERBY-4175 Instability in some replication tests under load, since tests don't wait long enough for final state or anticipate intermediate states Patch derby-4175-3 (+ resolved some conflicts arising from commit of DERBY-3417). It makes three replication tests less sensitive to load by making them accept intermediate states without failing or wait for longer before giving up on seeing the final end state of a replication state change. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@769602 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4177-8654374d
DERBY-4177: Javadoc for BTreeLockingPolicy should not mention "scan lock" The fix for DERBY-2991 removed the concept of a "scan lock" and RECORD_ID_PROTECTION_HANDLE, so the javadoc for the BTreeLockingPolicy class hierarchy should not mention them anymore. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@775937 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/store/access/btree/BTreeLockingPolicy.java", "hunks": [ { "added": [], "header": "@@ -24,7 +24,6 @@ package org.apache.derby.impl.store.access.btree;", "removed": [ "import org.apache.derby.iapi.store.raw.RecordHandle;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/access/btree/index/B2IRowLocking3.java", "hunks": [ { "added": [ " // If we need to release the latches while searching left,", " // a new key may have appeared in the range that we've already", " // searched, or the tree may have been rearranged, so the" ], "header": "@@ -818,14 +818,9 @@ class B2IRowLocking3 implements BTreeLockingPolicy", "removed": [ " // RESOLVE RLL (mikem) - do I need to do the ", " // RECORD_ID_PROTECTION_HANDLE lock.", " // First guarantee that record id's will not move off this", " // current page while searching for previous key, by getting", " // the RECORD_ID_PROTECTION_HANDLE lock on the current page.", " // Since we have a latch on the cur", "", " // RESOLVE RLL (mikem) - NO RECORD_ID PROTECTION IN EFFECT." ] } ] } ]
derby-DERBY-4179-0c7cea74
DERBY-4179 bootLock.java fails with missing exception on z/OS with pmz3160sr2ifix-20081021_01(SR2+IZ32776+IZ33456), and Windows Vista Followup comment fix. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@942480 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4179-6c65c08d
DERBY-4179 bootLock.java fails with missing exception on z/OS with pmz3160sr2ifix-20081021_01(SR2+IZ32776+IZ33456), and Windows Vista Follow-up simplification patch, which replaces the home-grown system property setup/teardown code with the standard SystemPropertyTestSetup. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@942587 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4179-6e930481
DERBY-4179 bootLock.java fails with missing exception on z/OS with pmz3160sr2ifix-20081021_01(SR2+IZ32776+IZ33456), and Windows Vista Followup patch, doc and comment changes only. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@942476 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4179-8badf993
DERBY-4179 bootLock.java fails with missing exception on z/OS with pmz3160sr2ifix-20081021_01(SR2+IZ32776+IZ33456), and Windows Vista Patch derby-4179-junit-5: * Rewrite of boot lock test to JUnit, this also solves the problem with running with jars in relative classpaths. * Added test of effectiveness of derby.database.forceDatabaseLock for phoneME platforms, until we implement DERBY-4646. (I could not find any other such test). * Fixed JUnit harness to correctly fork a Sun phoneME vm (image name is cvm, not java), * Added logic to capture what happens to minion (if it fails) by capturing its stderr (if it ever gets started, that is), cf. example enclosed. * Added socket logic to communicate to parent when minion has booted the "dual boot" candidate to avoid having to wait for 30 seconds or more to be sure it has done so. Test now runs in a few seconds. * Skips test for j9 for now, since this platform fails on the fork operation according to Kathey, cf DERBY-4647. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@942286 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/junit/TestConfiguration.java", "hunks": [ { "added": [ "", " /**", " * Decorate a test to use a new database that is created upon the first", " * connection request to the database and shutdown & deleted at", " * tearDown. The configuration differs only from the current configuration", " * by the list of used databases. The generated database name is added at", " * the end of <code>usedDbNames</code> and assigned as a default database", " * name. This decorator expects the database file to be local so it can be", " * removed.", " * @param test Test to be decorated", " * @param dbName We sometimes need to know outside to be able topass it on", " * to other VMs/processes.", " * @return decorated test.", " */", " public static TestSetup singleUseDatabaseDecorator(Test test, String dbName)", " {", " return new DatabaseChangeSetup(", " new DropDatabaseSetup(test, dbName), dbName, dbName, true);", " }", "" ], "header": "@@ -664,6 +664,26 @@ public class TestConfiguration {", "removed": [] } ] } ]
derby-DERBY-418-68c8e7d8
DERBY-418: outofmemory error when running large query in autocommit=false mode DERBY-1142: Metadata calls leak memory Notify GenericLanguageConnectionContext when activations are marked as unused, and clean up unused activations when a new one is added. Patch contributed by Mayuresh Nirhali. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@439279 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/conn/GenericLanguageConnectionContext.java", "hunks": [ { "added": [ "\tprivate volatile boolean unusedActs=false;" ], "header": "@@ -116,6 +116,7 @@ public class GenericLanguageConnectionContext", "removed": [] }, { "added": [ "\tpublic void addActivation(Activation a) ", "\t\tthrows StandardException {", "\t\t// DERBY-418. Activations which are marked unused,", "\t\t// are closed here. Activations Vector is iterated ", "\t\t// to identify and close unused activations, only if ", "\t\t// unusedActs flag is set to true and if the total ", "\t\t// size exceeds 20.", "\t\tif( (unusedActs) && (acts.size() > 20) ) {", "\t\t\tunusedActs = false;", "\t\t\tfor (int i = acts.size() - 1; i >= 0; i--) {", "", "\t\t\t\t// it maybe the case that a Activation's reset() ends up", "\t\t\t\t// closing one or more activation leaving our index beyond", "\t\t\t\t// the end of the array", "\t\t\t\tif (i >= acts.size())", "\t\t\t\t\tcontinue;", "", "\t\t\t\tActivation a1 = (Activation) acts.elementAt(i);", "\t\t\t\tif (!a1.isInUse()) {", "\t\t\t\t\ta1.close();", "\t\t\t\t}", "\t\t\t}", "\t\t}", "" ], "header": "@@ -429,9 +430,32 @@ public class GenericLanguageConnectionContext", "removed": [ "\tpublic void addActivation(Activation a) {" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/BaseActivation.java", "hunks": [ { "added": [ "", "\t// This flag is declared volatile to ensure it is ", "\t// visible when it has been modified by the finalizer thread.", "\tprivate volatile boolean inUse;" ], "header": "@@ -125,7 +125,10 @@ public abstract class BaseActivation implements CursorActivation, GeneratedByteC", "removed": [ "\tprivate boolean inUse;" ] }, { "added": [ "\tpublic final void initFromContext(Context context) ", "\t\tthrows StandardException {" ], "header": "@@ -192,7 +195,8 @@ public abstract class BaseActivation implements CursorActivation, GeneratedByteC", "removed": [ "\tpublic final void initFromContext(Context context) {" ] }, { "added": [ "\t\tif(isInUse()) {", "\t\t\tinUse = false;", "\t\t\tlcc.notifyUnusedActivation();", "\t\t}" ], "header": "@@ -786,7 +790,10 @@ public abstract class BaseActivation implements CursorActivation, GeneratedByteC", "removed": [ "\t\tinUse = false;" ] } ] } ]
derby-DERBY-4182-8b553f1c
DERBY-4182 Before this fix abort of inserts that included clob or blob chains would destroy the links of the allocated pages of the chains. This would leave allocated pages that could never be reclaimed either by subsequent post commit processing or inplace compress. Only offline compress could reclaim the space. This fix changes insert abort processing to automatically put all pieces of long columns except for the head page on the free list as part of the abort. Note this does not fix existing tables that have had this problem happen in the past, only stops it from happening. One must run an offline compress to reclaim this space to fix any instances of this bug prior to this fix. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@778926 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/store/raw/data/BasePage.java", "hunks": [ { "added": [ " /**", " * Routine to insert a long column.", " * <p>", " * This code inserts a long column as a linked list of rows on overflow", " * pages. This list is pointed to by a small pointer in the main page", " * row column. The operation does the following:", " * allocate new overflow page", " * insert single row filling overflow page", " * while (more of column exists)", " * allocate new overflow page", " * insert single row with next piece of row", " * update previous piece to point to this new piece of row", " *", " * Same code is called both from an initial insert of a long column and", " * from a subsequent update that results in a long column.", " *", " * @return The recordHandle of the first piece of the long column chain.", " *", " * @param mainChainPage The parent page with row piece containing column", " * that will eventually point to this long column", " * chain.", " * @param lce The LongColumnException thrown when we recognized", " * that the column being inserted was \"long\", this ", " * structure is used to cache the info that we have", " * read so far about column. In the case of an insert", " * of the stream it will have a copy of just the first", " * page of the stream that has already been read once.", " * @param insertFlag flags for insert operation. ", " *", " *", " * @exception StandardException Standard exception policy.", " **/", "\tprotected RecordHandle insertLongColumn(", " BasePage mainChainPage,", " LongColumnException lce, ", " byte insertFlag)", "\t\trow[0] = lce.getColumn();", "\t\tRecordHandle handle = null;", "\t\tRecordHandle prevHandle = null;", "\t\tBasePage curPage = mainChainPage;", "\t\tBasePage prevPage = null;", "\t\tboolean isFirstPage = true;", " ", " // undo inserts as purges of all pieces of the overflow column", " // except for the 1st overflow page pointed at by the main row. ", " //", " // Consider a row with one column which is a long column", " // that takes 2 pages for itself plus an entry in main parent page.", " // the log records in order for this look something like:", " // insert overflow page 1", " // insert overflow page 2", " // update overflow page 1 record to have pointer to overflow page 2", " // insert main row (which has pointer to overflow page 1)", " //", " // If this insert gets aborted then something like the following ", " // happens:", " // main row is marked deleted (but ptr to overflow 1 still exists)", " // update is aborted so link on page 2 to page 1 is lost", " // overflow row on page 2 is marked deleted", " // overflow row on page 1 is marked deleted", " //", " // There is no way to reclaim page 2 later as the abort of the update", " // has now lost the link from overflow page 1 to overflow 2, so ", " // the system has to do it as part of the abort of the insert. But, ", " // it can't for page 1 as the main page will attempt to follow", " // it's link in the deleted row during it's space reclamation and it ", " // can't tell the difference ", " // between a row that has been marked deleted as part of an aborted ", " // insert or as part of a committed delete. When it follows the link", " // it could find no page and that could be coded against, but it could ", " // be that the page is now used by some other overflow row which would ", " // lead to lots of different kinds of problems.", " //", " // So the code leaves the 1st overflow page to be cleaned up with the", " // main page row is purged, but goes ahead and immediately purges all", " // the segments that will be lost as part of the links being lost due", " // to aborted updates.", " byte after_first_page_insertFlag = ", " (byte) (insertFlag | Page.INSERT_UNDO_WITH_PURGE);", "", "\t\t// when inserting a long column startColumn is just used" ], "header": "@@ -815,31 +815,93 @@ abstract class BasePage implements Page, Observer, TypedFormat", "removed": [ "\t/**", "\t ", "\t\tWhen we update a column, it turned into a long column. Need to change", "\t\tthe update to effectively insert a new long column chain.", "", "\t\t@exception StandardException Unexpected exception from the implementation", "\t */", "\tprotected RecordHandle insertLongColumn(BasePage mainChainPage,", "\t\t\tLongColumnException lce, byte insertFlag)", "\t\t// Object[] row = new Object[1];", "\t\t// row[0] = (Object) lce.getColumn();", "\t\trow[0] = lce.getColumn();", "\t\tRecordHandle handle = null;", "\t\tRecordHandle prevHandle = null;", "\t\tBasePage curPage = mainChainPage;", "\t\tBasePage prevPage = null;", "\t\tboolean isFirstPage = true;", "", "\t\t// when inserting a long column startCOlumn is just used" ] }, { "added": [ "\t\t\tstartColumn = ", " owner.getActionSet().actionInsert(", " t, curPage, slot, recordId, row, (FormatableBitSet)null, ", " (LogicalUndo) null, ", " (isFirstPage ? insertFlag : after_first_page_insertFlag), ", " startColumn, true, -1, (DynamicByteArrayOutputStream) null, ", " -1, 100);" ], "header": "@@ -873,9 +935,13 @@ abstract class BasePage implements Page, Observer, TypedFormat", "removed": [ "\t\t\tstartColumn = owner.getActionSet().actionInsert(t, curPage, slot, recordId,", "\t\t\t\trow, (FormatableBitSet)null, (LogicalUndo) null, insertFlag,", "\t\t\t\tstartColumn, true, -1, (DynamicByteArrayOutputStream) null, -1, 100);" ] }, { "added": [ "\t\t\t} else {", " }" ], "header": "@@ -885,8 +951,9 @@ abstract class BasePage implements Page, Observer, TypedFormat", "removed": [ "\t\t\t} else" ] } ] } ]
derby-DERBY-4186-72b50c0c
DERBY-4186 After master stop, test fails when it succeeds in connecting (rebooting) shut-down ex-slave Patch derby-4186-2, which solves this issue by a) fixing a bug in SlaveDataBase, which loses and exception iff a command to stop replication arrives from the master to the slave before the initial connection (successful slave started) command returns. This is a corner case race condition, and not very likely to occur in practice, since it makes little sense to stop replication immediately after starting it. b) fixing a bug in the test, which switched on its head what is really expected behavior, and also adds a waiting loop to allow intermediate state on slave after the master is stopped. Also fixed some erroneous comment and removed some cruft. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@769962 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/db/SlaveDatabase.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.error.ExceptionSeverity;" ], "header": "@@ -25,6 +25,7 @@ import org.apache.derby.iapi.error.PublicAPI;", "removed": [] }, { "added": [ "" ], "header": "@@ -134,6 +135,7 @@ public class SlaveDatabase extends BasicDatabase {", "removed": [] } ] } ]
derby-DERBY-4191-46df2550
DERBY-4191 Require minimum select privilege from the tables in the SELECT sql if no column is selected from the table by the user eg select count(*) from root.t; select 1 from root.t; For the query above, Derby was letting the user execute the select even if the user had no select privilege available on root.t With this fix, Derby will check if there is atleast one column on which the user has select privilege available to it or if the user select privilege at the table level. If yes, only then the user will be able to select from another user's table. select myTable.a from myTable, admin.privateTable for the query above, since no column is selected specifically from admin.privateTable, Derby will now see if there is table level select privilege or atleast one column level select privilege available on admin.privatTable One other problem scenario was update ruth.t_ruth set a = ( select max(c) from ruth.v_ruth ); For the query above, prior to fix for DERBY-4191, we were not looking for select privilege for the subquery. That has also been fixed with fix for DERBY-4191 All the existing tests passed with no regression. Added few tests for the fixes involved in this jira. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@898635 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/sql/dictionary/StatementColumnPermission.java", "hunks": [ { "added": [ "\t\t", "\t\t//DERBY-4191", "\t\t//If we are looking for select privilege on ANY column,", "\t\t//then we can quit as soon as we find some column with select", "\t\t//privilege. This is needed for queries like", "\t\t//select count(*) from t1", "\t\t//select count(1) from t1", "\t\t//select 1 from t1", "\t\t//select t1.c1 from t1, t2", "\t\tif (privType == Authorizer.MIN_SELECT_PRIV && permittedColumns != null)", "\t\t\treturn;" ], "header": "@@ -122,6 +122,17 @@ public class StatementColumnPermission extends StatementTablePermission", "removed": [] }, { "added": [ "\t\t\t\t\t//The user does not have needed privilege directly ", "\t\t\t\t\t//granted to it, so let's see if he has that privilege", "\t\t\t\t\t//available to him/her through his roles.", "\t\t\t\t\tpermittedColumns = tryRole(lcc, dd,\tforGrant, r);", "\t\t\t\t\t//DERBY-4191", "\t\t\t\t\t//If we are looking for select privilege on ANY column,", "\t\t\t\t\t//then we can quit as soon as we find some column with select", "\t\t\t\t\t//privilege through this role. This is needed for queries like", "\t\t\t\t\t//select count(*) from t1", "\t\t\t\t\t//select count(1) from t1", "\t\t\t\t\t//select 1 from t1", "\t\t\t\t\t//select t1.c1 from t1, t2", "\t\t\t\t\tif (privType == Authorizer.MIN_SELECT_PRIV && permittedColumns != null) {", "\t\t\t\t\t\tDependencyManager dm = dd.getDependencyManager();", "\t\t\t\t\t\tRoleGrantDescriptor rgd =", "\t\t\t\t\t\t\tdd.getRoleDefinitionDescriptor(role);", "\t\t\t\t\t\tContextManager cm = lcc.getContextManager();", "", "\t\t\t\t\t\tdm.addDependency(ps, rgd, cm);", "\t\t\t\t\t\tdm.addDependency(activation, rgd, cm);", "\t\t\t\t\t\treturn;", "\t\t\t\t\t}", "\t\t\t\t\t//Use the privileges obtained through the role to satisfy", "\t\t\t\t\t//the column level privileges we need. If all the remaining", "\t\t\t\t\t//column level privileges are satisfied through this role,", "\t\t\t\t\t//we will quit out of this while loop", "\t\t\t\t\tfor(int i = unresolvedColumns.anySetBit();", "\t\t\t\t\t\ti >= 0;", "\t\t\t\t\t\ti = unresolvedColumns.anySetBit(i)) {", "", "\t\t\t\t\t\tif(permittedColumns != null && permittedColumns.get(i)) {", "\t\t\t\t\t\t\tunresolvedColumns.clear(i);", "\t\t\t\t\t\t}", "\t\t\t\t\t}", "\t\tTableDescriptor td = getTableDescriptor(dd);", "\t\t//if we are still here, then that means that we didn't find any select", "\t\t//privilege on the table or any column in the table", "\t\tif (privType == Authorizer.MIN_SELECT_PRIV)", "\t\t\tthrow StandardException.newException( forGrant ? SQLState.AUTH_NO_TABLE_PERMISSION_FOR_GRANT", "\t\t\t\t\t : SQLState.AUTH_NO_TABLE_PERMISSION,", "\t\t\t\t\t authorizationId,", "\t\t\t\t\t getPrivName(),", "\t\t\t\t\t td.getSchemaName(),", "\t\t\t\t\t td.getName());" ], "header": "@@ -181,18 +192,59 @@ public class StatementColumnPermission extends StatementTablePermission", "removed": [ "\t\t\t\t\tunresolvedColumns = tryRole(lcc, dd, forGrant,", "\t\t\t\t\t\t\t\t\t\t\t\tr, unresolvedColumns);", "\t\t\tTableDescriptor td = getTableDescriptor(dd);" ] }, { "added": [ "\t * Try to use the supplied role r to see what column privileges are we ", "\t * entitled to. ", "\t * return the set of columns on which we have privileges through this role", "\t\t\t\t\t\t\t\t\t String r)" ], "header": "@@ -378,23 +430,20 @@ public class StatementColumnPermission extends StatementTablePermission", "removed": [ "\t * Given the set of yet unresolved column permissions, try to use", "\t * the supplied role r to resolve them. After this is done, return", "\t * the set of columns still unresolved. If the role is used for", "\t * anything, record a dependency.", "\t * @param unresolvedColumns the set of columns yet unaccounted for", "\t\t\t\t\t\t\t\t\t String r,", "\t\t\t\t\t\t\t\t\t FormatableBitSet unresolvedColumns)" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/sql/dictionary/StatementTablePermission.java", "hunks": [ { "added": [ "\t\tcase Authorizer.MIN_SELECT_PRIV:" ], "header": "@@ -247,6 +247,7 @@ public class StatementTablePermission extends StatementPermission", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/CompilerContextImpl.java", "hunks": [ { "added": [ "" ], "header": "@@ -741,7 +741,7 @@ public class CompilerContextImpl extends ContextImpl", "removed": [ "\t" ] }, { "added": [ "", "\t\t//DERBY-4191", "\t\tif( currPrivType == Authorizer.MIN_SELECT_PRIV){", "\t\t\t//If we are here for MIN_SELECT_PRIV requirement, then first", "\t\t\t//check if there is already a SELECT privilege requirement on any ", "\t\t\t//of the columns in the table. If yes, then we do not need to add ", "\t\t\t//MIN_SELECT_PRIV requirement for the table because that ", "\t\t\t//requirement is already getting satisfied with the already", "\t\t\t//existing SELECT privilege requirement", "\t\t\tStatementTablePermission key = new StatementTablePermission( ", "\t\t\t\t\ttableUUID, Authorizer.SELECT_PRIV);", "\t\t\tStatementColumnPermission tableColumnPrivileges", "\t\t\t = (StatementColumnPermission) requiredColumnPrivileges.get( key);", "\t\t\tif( tableColumnPrivileges != null)", "\t\t\t\treturn;", "\t\t}", "\t\tif( currPrivType == Authorizer.SELECT_PRIV){", "\t\t\t//If we are here for SELECT_PRIV requirement, then first check", "\t\t\t//if there is already any MIN_SELECT_PRIV privilege required", "\t\t\t//on this table. If yes, then that requirement will be fulfilled", "\t\t\t//by the SELECT_PRIV requirement we are adding now. Because of", "\t\t\t//that, remove the MIN_SELECT_PRIV privilege requirement", "\t\t\tStatementTablePermission key = new StatementTablePermission( ", "\t\t\t\t\ttableUUID, Authorizer.MIN_SELECT_PRIV);", "\t\t\tStatementColumnPermission tableColumnPrivileges", "\t\t\t = (StatementColumnPermission) requiredColumnPrivileges.get( key);", "\t\t\tif( tableColumnPrivileges != null)", "\t\t\t\trequiredColumnPrivileges.remove(key);", "\t\t}", "\t\t" ], "header": "@@ -781,6 +781,36 @@ public class CompilerContextImpl extends ContextImpl", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/CursorNode.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.conn.Authorizer;" ], "header": "@@ -31,6 +31,7 @@ import org.apache.derby.iapi.services.compiler.MethodBuilder;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/SubqueryNode.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.conn.Authorizer;" ], "header": "@@ -28,6 +28,7 @@ import org.apache.derby.iapi.sql.compile.CostEstimate;", "removed": [] }, { "added": [ "\t\t/* DERBY-4191", "\t\t * We should make sure that we require select privileges", "\t\t * on the tables in the underlying subquery and not the", "\t\t * parent sql's privilege. eg", "\t\t * update t1 set c1=(select c2 from t2) ", "\t\t * For the query above, when working with the subquery, we should", "\t\t * require select privilege on t2.c2 rather than update privilege.", "\t\t * Prior to fix for DERBY-4191, we were collecting update privilege", "\t\t * requirement for t2.c2 rather than select privilege ", "\t\t */", "\t\tcc.pushCurrentPrivType(Authorizer.SELECT_PRIV);" ], "header": "@@ -497,6 +498,17 @@ public class SubqueryNode extends ValueNode", "removed": [] } ] } ]
derby-DERBY-4193-be4b84e8
DERBY-4193: ASSERT FAILED Scan position already saved with multi-threaded insert/update/delete Forget about the current position before trying to reposition on the end point of a scan. Then the ASSERT won't be confused if it needs to save the position again (it fails if it finds that a position is already saved). BTreeScan.positionAtStartForBackwardScan() also had this problem, but since it doesn't have any callers, this method was removed instead of being fixed. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@772090 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/store/access/btree/BTreeScan.java", "hunks": [ { "added": [ " // Forget the current position since we'll use the start key", " // to reposition on the start of the scan.", " pos.init();" ], "header": "@@ -457,7 +457,9 @@ public abstract class BTreeScan extends OpenBTree implements ScanManager", "removed": [ " pos.current_leaf = null;" ] } ] } ]
derby-DERBY-4198-7d0f620f
DERBY-4198 When using the FOR UPDATE OF clause with SUR (Scroll-insensive updatable result sets), the updateRow() method crashes Patch derby-4198-4. This is the second part of the solution for this issue. It solves the issue of column mapping in the presence of named columns in a FOR UPDATE OF clause. The original code was not general enough; ScrollInsensitiveResultSet.updateRow needs to make use of RowChanger to do the right thing. The patch also adds new test cases. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@785163 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/sql/execute/NoPutResultSet.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.execute.RowChanger;" ], "header": "@@ -24,6 +24,7 @@ package org.apache.derby.iapi.sql.execute;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/iapi/sql/execute/RowChanger.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.services.io.FormatableBitSet;" ], "header": "@@ -23,6 +23,7 @@ package org.apache.derby.iapi.sql.execute;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/CurrentOfResultSet.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.execute.RowChanger;" ], "header": "@@ -39,6 +39,7 @@ import org.apache.derby.iapi.types.RowLocation;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/NoPutResultSetImpl.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.execute.RowChanger;" ], "header": "@@ -39,6 +39,7 @@ import org.apache.derby.iapi.sql.execute.RunTimeStatistics;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/NormalizeResultSet.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.execute.RowChanger;" ], "header": "@@ -30,6 +30,7 @@ import org.apache.derby.iapi.sql.ResultDescription;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/ProjectRestrictResultSet.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.execute.RowChanger;" ], "header": "@@ -46,6 +46,7 @@ import org.apache.derby.iapi.error.StandardException;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/ScrollInsensitiveResultSet.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.execute.RowChanger;" ], "header": "@@ -37,6 +37,7 @@ import org.apache.derby.iapi.reference.SQLState;", "removed": [] }, { "added": [ "", "\tpublic void updateRow(ExecRow row, RowChanger rowChanger)", "\t\t\tthrows StandardException {" ], "header": "@@ -1092,15 +1093,15 @@ public class ScrollInsensitiveResultSet extends NoPutResultSetImpl", "removed": [ "\tpublic void updateRow(ExecRow row) throws StandardException {", "\t\tExecRow newRow = row;", "\t\tboolean undoProjection = false;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/TemporaryRowHolderResultSet.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.execute.RowChanger;" ], "header": "@@ -33,6 +33,7 @@ import org.apache.derby.iapi.sql.Row;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/UpdateResultSet.java", "hunks": [ { "added": [ "\t\t\t\tsource.updateRow(newBaseRow, rowChanger);" ], "header": "@@ -557,7 +557,7 @@ class UpdateResultSet extends DMLWriteResultSet", "removed": [ "\t\t\t\tsource.updateRow(newBaseRow);" ] } ] } ]
derby-DERBY-4199-fe7db0f9
DERBY-4199: Write exceptions to file in the fail directory as they occur with JUnit tests. Write exceptions being thrown during JUnit testing to file as soon as they happen. The output file is called 'error-stacktrace.out' and is located under the fail-directory. If the output file already exists, the new output is appended. Added a new method for opening a file in append mode (in a privileged block). Patch file: derby-4199-1a-writeExceptionsToFile.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@774729 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4201-210e0349
DERBY-4201: SecureServerTest AssertionFailedError: Timed out waiting for network server to start Wait until server instances from earlier test cases have released the server port before starting a new server. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1210846 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/junit/NetworkServerTestSetup.java", "hunks": [ { "added": [ "import java.net.ServerSocket;" ], "header": "@@ -24,8 +24,8 @@ import java.net.InetAddress;", "removed": [ "import java.io.InputStream;" ] }, { "added": [], "header": "@@ -33,7 +33,6 @@ import java.security.PrivilegedExceptionAction;", "removed": [ "import org.apache.derbyTesting.junit.BaseTestCase;" ] }, { "added": [ " // DERBY-4201: A network server instance used in an earlier test", " // case might not have completely shut down and released the server", " // port yet. Wait here until the port has been released.", " waitForAvailablePort();", "" ], "header": "@@ -184,6 +183,11 @@ final public class NetworkServerTestSetup extends BaseTestSetup {", "removed": [] }, { "added": [ " /**", " * Wait until the server port has been released by server instances used", " * by earlier test cases, or until the timeout specified by", " * {@link #getWaitTime()} has elapsed.", " *", " * @throws Exception if the port didn't become available before the timeout", " */", " private void waitForAvailablePort() throws Exception {", " TestConfiguration conf = TestConfiguration.getCurrent();", " InetAddress serverAddress = InetAddress.getByName(conf.getHostName());", " int port = conf.getPort();", " long giveUp = System.currentTimeMillis() + getWaitTime();", "", " while (true) {", " try {", " probeServerPort(port, serverAddress);", " break;", " } catch (IOException ioe) {", " if (System.currentTimeMillis() < giveUp) {", " Thread.sleep(SLEEP_TIME);", " } else {", " BaseTestCase.fail(", " \"Timed out waiting for server port to become available\",", " ioe);", " }", " }", " }", " }", "", " /**", " * Check if a server socket can be opened on the specified port.", " *", " * @param port the port to check", " * @param addr the address of the network interface", " * @throws IOException if a server socket couldn't be opened", " */", " private void probeServerPort(final int port, final InetAddress addr)", " throws IOException {", " try {", " AccessController.doPrivileged(new PrivilegedExceptionAction() {", " public Object run() throws IOException {", " new ServerSocket(port, 0, addr).close();", " return null;", " }", " });", " } catch (PrivilegedActionException pae) {", " throw (IOException) pae.getCause();", " }", " }", "" ], "header": "@@ -208,6 +212,56 @@ final public class NetworkServerTestSetup extends BaseTestSetup {", "removed": [] } ] } ]
derby-DERBY-4203-934f02da
DERBY-4203 (partial) Change mailjdbc system test to be able to restart with the existing database instead of creating a new one so it can be used for upgrade testing This patch does not include the sttest changes. Contrubuted by Lily Wei (lilywei at yahoo dot com) git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@795166 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/system/mailjdbc/utils/DbTasks.java", "hunks": [ { "added": [ "\t/**", "\t * jdbcLoad - Create url, schema and set driver and database system property that will be use later in the test.", "\t * @param driverType - \"embedded\" or \"NetworkServer\" ", "\t * @param useexistingdb - whether to use existing database or not", "\t */", "\tpublic static void jdbcLoad(String driverType, boolean useexistingdb) {" ], "header": "@@ -57,8 +57,12 @@ public class DbTasks {", "removed": [ "", "\tpublic static void jdbcLoad(String driverType) {" ] }, { "added": [ "\t\t\tif (useexistingdb)", "\t\t setSystemProperty(\"database\", \"jdbc:derby:mailsdb\");", "\t\t\telse", "\t\t setSystemProperty(\"database\", \"jdbc:derby:mailsdb;create=true\");\t\t\t", "\t\t\tif (useexistingdb)", "\t\t\t\tsetSystemProperty(\"database\",", "\t\t\t\t\t\"jdbc:derby://localhost:1527/mailsdb\");", "\t\t\telse", "\t\t\t\tsetSystemProperty(\"database\",", "\t\t\t\t\t\"jdbc:derby://localhost:1527/mailsdb;create=true\");", "\t\t\tsetSystemProperty(\"ij.user\", \"REFRESH\");", "\t\t\tsetSystemProperty(\"ij.password\", \"Refresh\");" ], "header": "@@ -69,13 +73,22 @@ public class DbTasks {", "removed": [ "\t\t\tsetSystemProperty(\"database\", \"jdbc:derby:mailsdb;create=true\");", "\t\t\tsetSystemProperty(\"database\",", "\t\t\t\t\t\"jdbc:derby://localhost:1527/mailsdb;create=true;user=REFRESH;password=Refresh\");" ] }, { "added": [ "\t\t\tif (useexistingdb)", "\t\t\t{", "\t\t\t\tMailJdbc.logAct", "\t\t\t\t.logMsg(\"Useing the existing database...\");", "\t\t\t\treturn;", "\t\t\t}" ], "header": "@@ -84,6 +97,12 @@ public class DbTasks {", "removed": [] } ] } ]
derby-DERBY-4204-698ab621
DERBY-4204: Runtime statistics not collected on re-execution of statement Make sure NoRowsResultSetImpl.close() prints the collected runtime statistics each time close() is called on a result set that is not currently closed. After DERBY-827 close() didn't print the statistics if the result set had been closed and reopened. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@774281 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/execute/NoRowsResultSetImpl.java", "hunks": [ { "added": [], "header": "@@ -62,7 +62,6 @@ import org.apache.derby.iapi.types.DataValueDescriptor;", "removed": [ "\tprivate boolean dumpedStats;" ] }, { "added": [], "header": "@@ -344,7 +343,6 @@ abstract class NoRowsResultSetImpl implements ResultSet", "removed": [ "\t\tif (! dumpedStats)" ] } ] } ]
derby-DERBY-4204-6b03ead6
DERBY-4204: Statistics not collected on re-execution of statement This change enhances the DERBY-4204 regression test to verify that the XPLAIN table form of statistics collection is also re-collected on each statement re-execution. Without the DERBY-4204 fix, re-execution of DML statements was only resulting in a single row in the SYSXPLAIN_STATEMENTS table, while after the fix, the correct results of 5 rows are observed. This change should NOT be backported to 10.5 and earlier branches, because it depends on the XPLAIN functionality. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@774830 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4208-00c7a208
DERBY-4208 Parameters ? with OFFSET and/or FETCH This patch implements the use of dynamic parameters with OFFSET/FETCH and adds new tests. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@807337 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/sql/execute/ResultSetFactory.java", "hunks": [ { "added": [ "\t * @param offsetMethod The OFFSET parameter was specified", "\t * @param fetchFirstMethod The FETCH FIRST/NEXT parameter was", "\t * specified" ], "header": "@@ -1620,8 +1620,9 @@ public interface ResultSetFactory {", "removed": [ "\t * @param offset The offset value (0 by default)", "\t * @param fetchFirst The fetch first value (-1 if not in use)" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/CursorNode.java", "hunks": [ { "added": [ "import java.sql.Types;" ], "header": "@@ -23,6 +23,7 @@ package\torg.apache.derby.impl.sql.compile;", "removed": [] }, { "added": [ "import org.apache.derby.iapi.types.DataTypeDescriptor;", "import org.apache.derby.iapi.types.TypeId;" ], "header": "@@ -35,6 +36,8 @@ import org.apache.derby.iapi.sql.dictionary.TableDescriptor;", "removed": [] }, { "added": [ "\tprivate ValueNode offset; // <result offset clause> value", "\tprivate ValueNode fetchFirst; // <fetch first clause> value" ], "header": "@@ -53,8 +56,8 @@ public class CursorNode extends DMLStatementNode", "removed": [ "\tprivate NumericConstantNode offset; // <result offset clause> value", "\tprivate NumericConstantNode fetchFirst; // <fetch first clause> value" ] }, { "added": [ "\t\tthis.offset = (ValueNode)offset;", "\t\tthis.fetchFirst = (ValueNode)fetchFirst;" ], "header": "@@ -106,8 +109,8 @@ public class CursorNode extends DMLStatementNode", "removed": [ "\t\tthis.offset = (NumericConstantNode)offset;", "\t\tthis.fetchFirst = (NumericConstantNode)fetchFirst;" ] }, { "added": [ "\t\tif (offset instanceof ConstantNode) {" ], "header": "@@ -362,7 +365,7 @@ public class CursorNode extends DMLStatementNode", "removed": [ "\t\tif (offset != null) {" ] }, { "added": [ "\t\t} else if (offset instanceof ParameterNode) {", "\t\t\toffset.", "\t\t\t\tsetType(new DataTypeDescriptor(", "\t\t\t\t\t\t\tTypeId.getBuiltInTypeId(Types.BIGINT),", "\t\t\t\t\t\t\tfalse /* ignored tho; ends up nullable,", "\t\t\t\t\t\t\t\t\t so we test for NULL at execute time */));", "", "\t\tif (fetchFirst instanceof ConstantNode) {" ], "header": "@@ -371,9 +374,16 @@ public class CursorNode extends DMLStatementNode", "removed": [ "\t\tif (fetchFirst != null) {" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/RowCountNode.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.types.SQLLongint;", "import org.apache.derby.iapi.reference.ClassName;" ], "header": "@@ -27,6 +27,8 @@ import org.apache.derby.iapi.services.compiler.MethodBuilder;", "removed": [] }, { "added": [ " boolean dynamicOffset = false;", " boolean dynamicFetchFirst = false;", " // arg4", " if (offset != null) {", " generateExprFun(acb, mb, offset);", " } else {", " mb.pushNull(ClassName.GeneratedMethod);", " }", " // arg5", " if (fetchFirst != null) {", " generateExprFun(acb, mb, fetchFirst);", " } else {", " mb.pushNull(ClassName.GeneratedMethod);", " }" ], "header": "@@ -93,19 +95,23 @@ public final class RowCountNode extends SingleChildResultSetNode", "removed": [ " // If OFFSET is not given, we pass in the default, i.e 0.", " long offsetVal =", " (offset != null) ?", " ((ConstantNode)offset).getValue().getLong() : 0;", " // If FETCH FIRST is not given, we pass in -1 to RowCountResultSet.", " long fetchFirstVal =", " (fetchFirst != null) ?", " ((ConstantNode)fetchFirst).getValue().getLong() : -1;", " mb.push(offsetVal); // arg4", " mb.push(fetchFirstVal); // arg5" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/GenericResultSetFactory.java", "hunks": [ { "added": [ "\t\tGeneratedMethod offsetMethod,", "\t\tGeneratedMethod fetchFirstMethod," ], "header": "@@ -1273,8 +1273,8 @@ public class GenericResultSetFactory implements ResultSetFactory", "removed": [ "\t\tlong offset,", "\t\tlong fetchFirst," ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/RowCountResultSet.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.reference.SQLState;" ], "header": "@@ -21,6 +21,7 @@", "removed": [] }, { "added": [ "import org.apache.derby.iapi.services.loader.GeneratedMethod;", "import org.apache.derby.iapi.types.DataValueDescriptor;" ], "header": "@@ -28,7 +29,9 @@ import org.apache.derby.iapi.sql.execute.NoPutResultSet;", "removed": [] }, { "added": [ " private long offset;", " private long fetchFirst;", " final private GeneratedMethod offsetMethod;", " final private GeneratedMethod fetchFirstMethod;" ], "header": "@@ -49,8 +52,10 @@ class RowCountResultSet extends NoPutResultSetImpl", "removed": [ " final private long offset;", " final private long fetchFirst;" ] }, { "added": [ " * @param offsetMethod Generated method", " * @param fetchFirstMethod Generated method" ], "header": "@@ -72,8 +77,8 @@ class RowCountResultSet extends NoPutResultSetImpl", "removed": [ " * @param offset The offset value (0 by default)", " * @param fetchFirst The fetch first value (-1 if not in use)" ] }, { "added": [ " GeneratedMethod offsetMethod,", " GeneratedMethod fetchFirstMethod,", " throws StandardException {", "", " this.offsetMethod = offsetMethod;", " this.fetchFirstMethod = fetchFirstMethod;", "" ], "header": "@@ -84,21 +89,22 @@ class RowCountResultSet extends NoPutResultSetImpl", "removed": [ " long offset,", " long fetchFirst,", " throws StandardException", " {", " this.offset = offset;", " this.fetchFirst = fetchFirst;" ] } ] } ]
derby-DERBY-4212-06154a0a
DERBY-4212: Prepared statement with OFFSET/FETCH gives different results on subsequent execute Added a test case to OffsetFetchNextTest (disabled for now). git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@771613 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4212-640d05d6
DERBY-4212 Prepared statement with OFFSET/FETCH gives different results on subsequent execute Patch DERBY-4212-2, which fixes this issue by resetting state variables in the close method of RowCountResultSet, and adds a new test case for this use case to OffsetFetchNextTest. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@772299 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/execute/RowCountResultSet.java", "hunks": [ { "added": [ " final private long offset;", " final private long fetchFirst;", "", " /**", " * True if we haven't yet fetched any rows from this result set.", " * Will be reset on close so the result set is ready to reuse.", " */", " private boolean virginal;", "", " /**", " * Holds the number of rows returned so far in this round of using the", " * result set. Will be reset on close so the result set is ready to reuse.", " */", " private long rowsFetched;" ], "header": "@@ -49,8 +49,20 @@ class RowCountResultSet extends NoPutResultSetImpl", "removed": [ " private long offset;", " private long fetchFirst;" ] }, { "added": [ " virginal = true;", " rowsFetched = 0;" ], "header": "@@ -87,6 +99,8 @@ class RowCountResultSet extends NoPutResultSetImpl", "removed": [] }, { "added": [ " if (virginal && offset > 0) {", " // Only skip rows the first time around", " virginal = false;", "", " long offsetCtr = offset;", "", " offsetCtr--;", " if (result != null && offsetCtr >= 0) {", " if (fetchFirst != -1 && rowsFetched >= fetchFirst) {" ], "header": "@@ -159,24 +173,26 @@ class RowCountResultSet extends NoPutResultSetImpl", "removed": [ " if (offset > 0) {", " offset--;", " if (result != null && offset >= 0) {", "", " // only skip row first time", " offset = 0;", " if (fetchFirst != -1 && rowsSeen >= fetchFirst) {" ] }, { "added": [ " rowsFetched++;" ], "header": "@@ -185,6 +201,7 @@ class RowCountResultSet extends NoPutResultSetImpl", "removed": [] } ] } ]
derby-DERBY-4213-bbde6187
DERBY-4213; sttest needs to be adjusted to not run out of disk space this change modifies the test in 2 ways: - add in-place-compress so occasionally unused space gets released - adjust the code that picks rows for delete so more rows will get deleted git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@806699 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/system/sttest/utils/CompressTable.java", "hunks": [ { "added": [ "import java.sql.CallableStatement;" ], "header": "@@ -21,10 +21,10 @@", "removed": [ "import java.sql.Statement;" ] } ] }, { "file": "java/testing/org/apache/derbyTesting/system/sttest/utils/Datatypes.java", "hunks": [ { "added": [ "\t\t\t.prepareStatement(\" select id from Datatypes where id >= ?\");" ], "header": "@@ -228,7 +228,7 @@ public class Datatypes {", "removed": [ "\t\t\t.prepareStatement(\" select id from Datatypes where id = ?\");" ] }, { "added": [ "\t\t\t\t\t//if the id is null, find another row." ], "header": "@@ -245,8 +245,8 @@ public class Datatypes {", "removed": [ "\t\t\t\t\t//keep trying till we get a good one" ] }, { "added": [ "\t\t\t\telse {", "\t\t\t\t\t// the random number is higher than the", "\t\t\t\t\t// highest id value in the database. ", "\t\t\t\t\t// delete would fail. Pick another.", "\t\t\t\t\ti--;", "\t\t\t\t\tcontinue;", "\t\t\t\t}" ], "header": "@@ -254,6 +254,13 @@ public class Datatypes {", "removed": [] } ] } ]
derby-DERBY-4214-a61fd536
DERBY-4214: Fix signature of CLOBGETSUBSTRING during hard-upgrade to 10.6. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@787310 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4215-b05fa6ea
DERBY-4215: remove bad permissions tuple during 10.6 upgrade. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@787754 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/catalog/DD_Version.java", "hunks": [ { "added": [ " //", " // Change the return type of SYSIBM.CLOBGETSUBSTRING if necessary. See", " //" ], "header": "@@ -451,11 +451,13 @@ public\tclass DD_Version implements\tFormatable", "removed": [ " // change the return type of SYSIBM.CLOBGETSUBSTRING if necessary. See" ] } ] } ]
derby-DERBY-4218-c5d9be33
DERBY-4218: "Table/View 'MAX_SCAN' does not exist" in IndexSplitDeadlockTest Wait for all threads to finish before dropping tables in tearDown(). git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@772534 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4223-a39c84c1
DERBY-4223 Provide the ability to use properties with ij.runScript() adds a new runScript method that allows use of system properties with runScript. DERBY-4217 (partial) adds use derby.tests.port system property when initializing TestConfiguration. Contributed by Tiago Espinha (tiago at espinhas dot net) git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@781200 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/junit/TestConfiguration.java", "hunks": [ { "added": [ "", " /**", " * Return a decorator for the passed in tests that sets the", " * configuration for the client to be Derby's JDBC client", " * and to start the network server at setUp.", " * <BR>", " * The database configuration (name etc.) is based upon", " * the previous configuration.", " * <BR>", " * The previous TestConfiguration is restored at tearDown and", " * the network server is shutdown.", " * @param suite the suite to decorate", " */", " public static Test clientServerDecoratorWithPort(Test suite, int port)", " {", " Test test = new NetworkServerTestSetup(suite, false);", "", " return existingServerDecorator(test,\"localhost\",port);", " }", "" ], "header": "@@ -454,6 +454,26 @@ public class TestConfiguration {", "removed": [] } ] }, { "file": "java/tools/org/apache/derby/tools/ij.java", "hunks": [ { "added": [ "\t return ij.runScript(conn, sqlIn, inputEncoding, sqlOut, outputEncoding,false);", " }", "", " /**", " * Run a SQL script from an InputStream and write", " * the resulting output to the provided PrintStream.", " * SQL commands are separated by a semi-colon ';' character.", " *", " * @param conn Connection to be used as the script's default connection.", " * @param sqlIn InputStream for the script.", " * @param inputEncoding Encoding of the script.", " * @param sqlOut OutputStream for the script's output", " * @param outputEncoding Output encoding to use.", " * @param loadSystemProperties Whether to use the system properties.", " * @return Number of SQLExceptions thrown during the execution, -1 if not known.", " * @throws UnsupportedEncodingException", " */", " public static int runScript(", "\t\t Connection conn,", "\t\t InputStream sqlIn,", "\t\t String inputEncoding,", "\t\t OutputStream sqlOut,", "\t\t String outputEncoding,", " boolean loadSystemProperties)", "\t\t throws UnsupportedEncodingException", " {", " LocalizedOutput lo =", " outputEncoding == null ?", " LocalizedResource.getInstance().", " getNewOutput(sqlOut)", " :", " LocalizedResource.getInstance().", " Main ijE = new Main(false);", " LocalizedInput li = LocalizedResource.getInstance().", " getNewEncodedInput(sqlIn, inputEncoding);", "", " utilMain um = ijE.getutilMain(1, lo, loadSystemProperties);", "", " return um.goScript(conn, li);", " }" ], "header": "@@ -80,23 +80,49 @@ public class ij {", "removed": [ "\t LocalizedOutput lo = ", "\t\t outputEncoding == null ?", "\t\t\t\t LocalizedResource.getInstance().", "\t\t getNewOutput(sqlOut)", "\t : ", "\t\t LocalizedResource.getInstance().", "\t Main ijE = new Main(false);", "\t ", "\t LocalizedInput li = LocalizedResource.getInstance().", "\t getNewEncodedInput(sqlIn, inputEncoding);", "\t ", "\t utilMain um = ijE.getutilMain(1, lo);", "\t return um.goScript(conn, li);", " }" ] } ] } ]