id
stringlengths 22
25
| commit_message
stringlengths 137
6.96k
| diffs
listlengths 0
63
|
|---|---|---|
derby-DERBY-3270-392ac57f
|
DERBY-3270 Delayed (on-demand) creation of current user schema makes select from view belonging to other schema fail.
Fixed with patch derby-3270-dhw-3. This temporarily sets the default
current schema back to what it was at the time of definition of the
schema. This need also exsts in other places, so a general mechanism
to do this was factored in and used in all such places.
New testcase added.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@735084 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/CompilerContextImpl.java",
"hunks": [
{
"added": [
""
],
"header": "@@ -1,3 +1,4 @@",
"removed": []
},
{
"added": [
"\t\tdefaultSchemaStack = null;"
],
"header": "@@ -157,6 +158,7 @@ public class CompilerContextImpl extends ContextImpl",
"removed": []
},
{
"added": [
"\t/**",
"\t * @see CompilerContext#pushCompilationSchema",
"\t */",
"\tpublic void pushCompilationSchema(SchemaDescriptor sd)",
"\t{",
"\t\tif (defaultSchemaStack == null) {",
"\t\t\tdefaultSchemaStack = new ArrayList(2);",
"\t\t}",
"",
"\t\tdefaultSchemaStack.add(defaultSchemaStack.size(),",
"\t\t\t\t\t\t\t getCompilationSchema());",
"\t\tsetCompilationSchema(sd);",
"\t}",
"",
"\t/**",
"\t * @see CompilerContext#popCompilationSchema",
"\t */",
"\tpublic void popCompilationSchema()",
"\t{",
"\t\tSchemaDescriptor sd =",
"\t\t\t(SchemaDescriptor)defaultSchemaStack.remove(",
"\t\t\t\tdefaultSchemaStack.size() - 1);",
"\t\tsetCompilationSchema(sd);",
"\t}",
""
],
"header": "@@ -553,6 +555,31 @@ public class CompilerContextImpl extends ContextImpl",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/FromBaseTable.java",
"hunks": [
{
"added": [
"import org.apache.derby.impl.sql.compile.FromSubquery;"
],
"header": "@@ -78,6 +78,7 @@ import org.apache.derby.iapi.types.DataValueDescriptor;",
"removed": []
},
{
"added": [
"\t\t\tFromSubquery fsq;"
],
"header": "@@ -2204,12 +2205,11 @@ public class FromBaseTable extends FromTable",
"removed": [
"\t\t\tFromTable\t\t\t\t\tfsq;",
"\t\t\tSchemaDescriptor\t\t\tprevCompSchema;"
]
},
{
"added": [
"\t\t\tcompilerContext.pushCompilationSchema(compSchema);"
],
"header": "@@ -2224,7 +2224,7 @@ public class FromBaseTable extends FromTable",
"removed": [
"\t\t\tprevCompSchema = compilerContext.setCompilationSchema(compSchema);"
]
},
{
"added": [
"\t\t\t\tfsq = (FromSubquery) getNodeFactory().getNode("
],
"header": "@@ -2269,7 +2269,7 @@ public class FromBaseTable extends FromTable",
"removed": [
"\t\t\t\tfsq = (FromTable) getNodeFactory().getNode("
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/FromSubquery.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.sql.compile.CompilerContext;",
"import org.apache.derby.iapi.sql.dictionary.SchemaDescriptor;"
],
"header": "@@ -24,7 +24,9 @@ package\torg.apache.derby.impl.sql.compile;",
"removed": []
},
{
"added": [
"\t/**",
"\t * DERBY-3270: If this subquery represents an expanded view, this holds the",
"\t * current compilation schema at view definition time.",
"\t */",
"\tprivate SchemaDescriptor origCompilationSchema = null;",
""
],
"header": "@@ -45,6 +47,12 @@ public class FromSubquery extends FromTable",
"removed": []
},
{
"added": [
"",
"\t\tCompilerContext compilerContext = getCompilerContext();",
"",
"\t\tif (origCompilationSchema != null) {",
"\t\t\t// View expansion needs the definition time schema",
"\t\t\tcompilerContext.pushCompilationSchema(origCompilationSchema);",
"\t\t}",
"",
"\t\ttry {",
"\t\t\tsubquery.bindExpressions(nestedFromList);",
"\t\t\tsubquery.bindResultColumns(nestedFromList);",
"\t\t} finally {",
"\t\t\tif (origCompilationSchema != null) {",
"\t\t\t\tcompilerContext.popCompilationSchema();",
"\t\t\t}",
"\t\t}"
],
"header": "@@ -213,8 +221,22 @@ public class FromSubquery extends FromTable",
"removed": [
"\t\tsubquery.bindExpressions(nestedFromList);",
"\t\tsubquery.bindResultColumns(nestedFromList);"
]
}
]
}
] |
derby-DERBY-3279-20bd3c03
|
DERBY-3279: Add logic for execution-time sorting of IN list values
into DESCENDING order when required. This is necessary when doing
IN-list "multi-probing" on some column C for which a DESC sort has
been eliminated (by the optimizer). In such a case the row values
will come back in the order in which they appear in the IN List,
which means the IN list values must themselves be sorted in the
correct (descending) order.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@616126 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/sql/execute/ResultSetFactory.java",
"hunks": [
{
"added": [
"\t\t@param sortRequired Which type of sort we need for the values",
"\t\t\t(ascending, descending, or none)."
],
"header": "@@ -1002,8 +1002,8 @@ public interface ResultSetFactory {",
"removed": [
"\t\t@param probeValsAreSorted Whether or not the values in probeVals are",
"\t\t\tsorted."
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/BaseTableNumbersVisitor.java",
"hunks": [
{
"added": [
"\t/* Column number of the ColumnReference or ResultColumn",
"\t * for which we most recently found a base table number. ",
"\t * In cases where this visitor is only expected to find",
"\t * a single base table number, this field is useful for",
"\t * determining what the column position w.r.t. the found",
"\t * base table was.",
"\t */",
"\tprivate int columnNumber;",
""
],
"header": "@@ -41,6 +41,15 @@ public class BaseTableNumbersVisitor implements Visitor",
"removed": []
},
{
"added": [
"\t\tcolumnNumber = -1;"
],
"header": "@@ -50,6 +59,7 @@ public class BaseTableNumbersVisitor implements Visitor",
"removed": []
},
{
"added": [
"\t/**",
"\t * Reset the state of this visitor.",
"\t */",
"\tprotected void reset()",
"\t{",
"\t\ttableMap.clearAll();",
"\t\tcolumnNumber = -1;",
"\t}",
"",
"\t/**",
"\t * Retrieve the the position of the ColumnReference or",
"\t * ResultColumn for which we most recently found a base",
"\t * table number.",
"\t */",
"\tprotected int getColumnNumber()",
"\t{",
"\t\treturn columnNumber;",
"\t}",
""
],
"header": "@@ -63,6 +73,25 @@ public class BaseTableNumbersVisitor implements Visitor",
"removed": []
},
{
"added": [
"\t\t\trc = (ResultColumn)node;"
],
"header": "@@ -96,7 +125,7 @@ public class BaseTableNumbersVisitor implements Visitor",
"removed": [
"\t\t\trc = (ResultColumn)rc;"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/FromBaseTable.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.sql.compile.C_NodeTypes;"
],
"header": "@@ -40,6 +40,7 @@ import org.apache.derby.iapi.services.sanity.SanityManager;",
"removed": []
},
{
"added": [
"import org.apache.derby.iapi.sql.compile.RequiredRowOrdering;"
],
"header": "@@ -48,8 +49,8 @@ import org.apache.derby.iapi.sql.compile.Optimizable;",
"removed": [
"import org.apache.derby.iapi.sql.compile.C_NodeTypes;"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/InListOperatorNode.java",
"hunks": [
{
"added": [
"\tprivate boolean sortDescending;"
],
"header": "@@ -52,6 +52,7 @@ import java.lang.reflect.Modifier;",
"removed": []
},
{
"added": [
"\t\tif (sortDescending)",
"\t\t\tilon.markSortDescending();",
""
],
"header": "@@ -103,6 +104,9 @@ public final class InListOperatorNode extends BinaryListOperatorNode",
"removed": []
},
{
"added": [
"\t/**",
"\t * Indicate that the IN-list values for this node must be sorted",
"\t * in DESCENDING order. This only applies to in-list \"multi-probing\",",
"\t * where the rows are processed in the order of the IN list elements",
"\t * themselves. In that case, any requirement to sort the rows in",
"\t * descending order means that the values in the IN list have to",
"\t * be sorted in descending order, as well.",
"\t */",
"\tprotected void markSortDescending()",
"\t{",
"\t\tsortDescending = true;",
"\t}",
""
],
"header": "@@ -738,6 +742,19 @@ public final class InListOperatorNode extends BinaryListOperatorNode",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/PredicateList.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.sql.compile.RequiredRowOrdering;",
"import org.apache.derby.iapi.sql.compile.RowOrdering;"
],
"header": "@@ -36,6 +36,8 @@ import org.apache.derby.iapi.sql.compile.ExpressionClassBuilderInterface;",
"removed": []
},
{
"added": [
"",
"\t\t\tif (ilon.sortDescending())",
"\t\t\t\tmb.push(RowOrdering.DESCENDING);",
"\t\t\telse if (!ilon.isOrdered())",
"\t\t\t{",
"\t\t\t\t/* If there is no requirement to sort descending and the",
"\t\t\t\t * IN list values have not already been sorted, then we",
"\t\t\t\t * sort them in ascending order at execution time.",
"\t\t\t\t */",
"\t\t\t\tmb.push(RowOrdering.ASCENDING);",
"\t\t\t}",
"\t\t\telse",
"\t\t\t{",
"\t\t\t\t/* DONTCARE here means we don't have to sort the IN",
"\t\t\t\t * values at execution time because we already did",
"\t\t\t\t * it as part of compilation (esp. preprocessing).",
"\t\t\t\t * This can only be the case if all values in the IN",
"\t\t\t\t * list are literals (as opposed to parameters).",
"\t\t\t\t */",
"\t\t\t\tmb.push(RowOrdering.DONTCARE);",
"\t\t\t}",
""
],
"header": "@@ -2919,7 +2921,28 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre",
"removed": [
"\t\t\tmb.push(ilon.isOrdered());"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/GenericResultSetFactory.java",
"hunks": [
{
"added": [
"\t\t\t\t\t\t\t\t\tint sortRequired,"
],
"header": "@@ -735,7 +735,7 @@ public class GenericResultSetFactory implements ResultSetFactory",
"removed": [
"\t\t\t\t\t\t\t\t\tboolean probeValsAreSorted,"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/MultiProbeTableScanResultSet.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.sql.compile.RowOrdering;"
],
"header": "@@ -31,6 +31,7 @@ import org.apache.derby.iapi.store.access.StaticCompiledOpenConglomInfo;",
"removed": []
},
{
"added": [
" * Indicator as to which type of sort we need: ASCENDING, DESCENDING,",
" * or NONE (NONE is represented by \"RowOrdering.DONTCARE\" and is used",
" * for cases where all necessary sorting occurred at compilation time).",
" private int sortRequired;"
],
"header": "@@ -81,11 +82,11 @@ class MultiProbeTableScanResultSet extends TableScanResultSet",
"removed": [
" * Whether or not we need to sort the values. If all values were",
" * specified as literals (as opposed to parameters) then we did the",
" * sort at compile time and so we do not need to do it here.",
" private boolean needSort;"
]
},
{
"added": [
" int sortRequired,"
],
"header": "@@ -103,7 +104,7 @@ class MultiProbeTableScanResultSet extends TableScanResultSet",
"removed": [
" boolean probeValsAreSorted,"
]
},
{
"added": [
" this.sortRequired = sortRequired;"
],
"header": "@@ -157,7 +158,7 @@ class MultiProbeTableScanResultSet extends TableScanResultSet",
"removed": [
" this.needSort = !probeValsAreSorted;"
]
},
{
"added": [
" if (sortRequired == RowOrdering.DONTCARE)",
" {",
" /* DONTCARE really means that the values are already sorted",
" * in ascending order, and that's good enough.",
" */",
" probeValues = origProbeValues;",
" }",
" else"
],
"header": "@@ -175,7 +176,14 @@ class MultiProbeTableScanResultSet extends TableScanResultSet",
"removed": [
" if (needSort)"
]
},
{
"added": [
" if (sortRequired == RowOrdering.ASCENDING)",
" java.util.Arrays.sort(pVals);",
" else",
" {",
" // Sort the values in DESCENDING order.",
" java.util.Arrays.sort(",
" pVals, java.util.Collections.reverseOrder());",
" }",
""
],
"header": "@@ -190,11 +198,17 @@ class MultiProbeTableScanResultSet extends TableScanResultSet",
"removed": [
" java.util.Arrays.sort(pVals);",
" else",
" probeValues = origProbeValues;"
]
}
]
}
] |
derby-DERBY-3279-50567fe2
|
DERBY-3279: Add logic for execution-time sorting of IN list values
into DESCENDING order when required.
This is a follow-up patch to add an implementation of the
adjustForSortElimination(RequiredRowOrdering) to IndexToBaseRowNode,
which was missing from the previous commit (svn # 616126). This
commit also adds more test cases to InListMultiProbeTest.java.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@617548 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/IndexToBaseRowNode.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.sql.compile.RequiredRowOrdering;"
],
"header": "@@ -26,6 +26,7 @@ import org.apache.derby.iapi.services.context.ContextManager;",
"removed": []
}
]
},
{
"file": "java/testing/org/apache/derbyTesting/junit/JDBC.java",
"hunks": [
{
"added": [
"import java.util.BitSet;"
],
"header": "@@ -23,6 +23,7 @@ import java.io.IOException;",
"removed": []
},
{
"added": [
" /**",
" * Similar to assertFullResultSet(...) above, except that this",
" * method takes a BitSet and checks the received expectedRows",
" * against the columns referenced by the BitSet. So the assumption",
" * here is that expectedRows will only have as many columns as",
" * there are \"true\" bits in the received BitSet.",
" *",
" * This method is useful when we expect there to be a specific",
" * ordering on some column OC in the result set, but do not care",
" * about the ordering of the non-OC columns when OC is the",
" * same across rows. Ex. If we have the following results with",
" * an expected ordering on column J:",
" *",
" * I J",
" * - -",
" * a 1",
" * b 1",
" * c 2",
" * c 2",
" *",
" * Then this method allows us to verify that J is sorted as",
" * \"1, 1, 2, 2\" without having to worry about whether or not",
" * (a,1) comes before (b,1). The caller would simply pass in",
" * a BitSet whose content was {1} and an expectedRows array",
" * of {{\"1\"},{\"1\"},{\"2\"},{\"2\"}}.",
" *",
" * For now this method always does comparisons with",
" * \"asTrimmedStrings\" set to true, and always closes",
" * the result set.",
" */",
" public static void assertPartialResultSet(ResultSet rs,",
" Object [][] expectedRows, BitSet colsToCheck)",
" throws SQLException",
" {",
" int rows;",
"",
" // Assert that we have the right number of columns. If we expect an",
" // empty result set, the expected column count is unknown, so don't",
" // check.",
" if (expectedRows.length > 0) {",
" Assert.assertEquals(\"Unexpected column count:\",",
" expectedRows[0].length, colsToCheck.cardinality());",
" }",
"",
" for (rows = 0; rs.next(); rows++)",
" {",
" /* If we have more actual rows than expected rows, don't",
" * try to assert the row. Instead just keep iterating",
" * to see exactly how many rows the actual result set has.",
" */",
" if (rows < expectedRows.length)",
" {",
" assertRowInResultSet(rs, rows + 1,",
" expectedRows[rows], true, colsToCheck);",
" }",
" }",
"",
" rs.close();",
"",
" // And finally, assert the row count.",
" Assert.assertEquals(\"Unexpected row count:\", expectedRows.length, rows);",
" }",
""
],
"header": "@@ -884,6 +885,69 @@ public class JDBC {",
"removed": []
},
{
"added": [
" assertRowInResultSet(",
" rs, rowNum, expectedRow, asTrimmedStrings, (BitSet)null);",
" }",
"",
" /**",
" * See assertRowInResultSet(...) above.",
" *",
" * @param BitSet colsToCheck If non-null then for every bit b",
" * that is set in colsToCheck, we'll compare the (b+1)-th column",
" * of the received result set's current row to the i-th column",
" * of expectedRow, where 0 <= i < # bits set in colsToCheck.",
" * So if colsToCheck is { 0, 3 } then expectedRow should have",
" * two objects and we'll check that:",
" *",
" * expectedRow[0].equals(rs.getXXX(1));",
" * expectedRow[1].equals(rs.getXXX(4));",
" *",
" * If colsToCheck is null then the (i+1)-th column in the",
" * result set is compared to the i-th column in expectedRow,",
" * where 0 <= i < expectedRow.length.",
" */",
" private static void assertRowInResultSet(ResultSet rs,",
" int rowNum, Object [] expectedRow, boolean asTrimmedStrings,",
" BitSet colsToCheck) throws SQLException",
" {",
" int cPos = 0;",
" cPos = (colsToCheck == null)",
" ? (i+1)",
" : colsToCheck.nextSetBit(cPos) + 1;",
""
],
"header": "@@ -922,9 +986,39 @@ public class JDBC {",
"removed": []
},
{
"added": [
" && (rsmd.getColumnType(cPos) == Types.SMALLINT))",
" obj = (rs.getShort(cPos) == 0) ? \"false\" : \"true\";",
" obj = rs.getString(cPos);",
" obj = rs.getString(cPos);"
],
"header": "@@ -942,18 +1036,18 @@ public class JDBC {",
"removed": [
" && (rsmd.getColumnType(i+1) == Types.SMALLINT))",
" obj = (rs.getShort(i+1) == 0) ? \"false\" : \"true\";",
" obj = rs.getString(i+1);",
" obj = rs.getString(i+1);"
]
},
{
"added": [
" obj = rs.getObject(cPos);"
],
"header": "@@ -963,7 +1057,7 @@ public class JDBC {",
"removed": [
" obj = rs.getObject(i+1);"
]
},
{
"added": [
" rsmd.getColumnName(cPos) + \"', row \" + rowNum +",
" assertResultColumnNullable(rsmd, cPos);"
],
"header": "@@ -981,13 +1075,13 @@ public class JDBC {",
"removed": [
" rsmd.getColumnName(i+1) + \"', row \" + rowNum +",
" assertResultColumnNullable(rsmd, i+1);"
]
}
]
}
] |
derby-DERBY-3288-00679884
|
DERBY-3288: Fix optimizer dependency tracking logic so that it
correctly enforces join order dependencies between Optimizables,
even when plan "short-circuiting" occurs. This patch also fixes
a bug in FromVTI's referenced table map (which affects dependencies)
and does a slight refactoring of the "pull Optimizable" code for
the sake of clarity. And finally, it adds an appropriate test
case to the existing lang/subqueryFlattening.sql test.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@618841 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/OptimizerImpl.java",
"hunks": [
{
"added": [
"\t\t\tint nextOptimizable = proposedJoinOrder[joinPosition] + 1;",
"\t\t\tif (proposedJoinOrder[joinPosition] >= 0)",
"\t\t\t{",
"\t\t\t\t/* We are either going to try another table at the current",
"\t\t\t\t * join order position, or we have exhausted all the tables",
"\t\t\t\t * at the current join order position. In either case, we",
"\t\t\t\t * need to pull the table at the current join order position",
"\t\t\t\t * and remove it from the join order. Do this BEFORE we",
"\t\t\t\t * search for the next optimizable so that assignedTableMap,",
"\t\t\t\t * which is updated to reflect the PULL, has the correct",
"\t\t\t\t * information for enforcing join order depdendencies.",
"\t\t\t\t * DERBY-3288.",
"\t\t\t\t */",
"\t\t\t\tpullOptimizableFromJoinOrder();",
"\t\t\t}"
],
"header": "@@ -596,7 +596,21 @@ public class OptimizerImpl implements Optimizer",
"removed": [
"\t\t\tint nextOptimizable = 0;"
]
},
{
"added": [],
"header": "@@ -737,8 +751,6 @@ public class OptimizerImpl implements Optimizer",
"removed": [
"\t\t\t\tnextOptimizable = proposedJoinOrder[joinPosition] + 1;",
""
]
},
{
"added": [
"\t\t\t\t\t/* No need to check the dependencies if the optimizable",
"\t\t\t\t\t * is already in the join order--because we should have",
"\t\t\t\t\t * checked its dependencies before putting it there.",
"\t\t\t\t\tif (found)",
"\t\t\t\t\t\tif (SanityManager.DEBUG)",
"\t\t\t\t\t\t\t// Doesn't hurt to check in SANE mode, though...",
"\t\t\t\t\t\t\tif ((nextOptimizable < numOptimizables) &&",
"\t\t\t\t\t\t\t\t!joinOrderMeetsDependencies(nextOptimizable))",
"\t\t\t\t\t\t\t\tSanityManager.THROWASSERT(",
"\t\t\t\t\t\t\t\t\t\"Found optimizable '\" + nextOptimizable +",
"\t\t\t\t\t\t\t\t\t\"' in current join order even though \" +",
"\t\t\t\t\t\t\t\t\t\"its dependencies were NOT satisfied.\");",
"\t\t\t\t\t\tcontinue;",
"\t\t\t\t\t/* Check to make sure that all of the next optimizable's",
"\t\t\t\t\t * dependencies have been satisfied.",
"\t\t\t\t\t */",
"\t\t\t\t\tif ((nextOptimizable < numOptimizables) &&",
"\t\t\t\t\t\t!joinOrderMeetsDependencies(nextOptimizable))",
"\t\t\t\t\t\tif (optimizerTrace)",
"\t\t\t\t\t\t\ttrace(SKIPPING_JOIN_ORDER, nextOptimizable, 0, 0.0, null);",
"\t\t\t\t\t\t/*",
"\t\t\t\t\t\t** If this is a user specified join order then it is illegal.",
"\t\t\t\t\t\t*/",
"\t\t\t\t\t\tif ( ! optimizableList.optimizeJoinOrder())",
"\t\t\t\t\t\t\tif (optimizerTrace)",
"\t\t\t\t\t\t\t\ttrace(ILLEGAL_USER_JOIN_ORDER, 0, 0, 0.0, null);",
"\t\t\t\t\t\t\tthrow StandardException.newException(",
"\t\t\t\t\t\t\t\tSQLState.LANG_ILLEGAL_FORCED_JOIN_ORDER);",
"\t\t\t\t\t\t}",
"\t\t\t\t\t\tcontinue;",
"\t\t\t\t\tbreak;",
"\t\t\t\t}"
],
"header": "@@ -755,281 +767,58 @@ public class OptimizerImpl implements Optimizer",
"removed": [
"\t\t\t\t\t/* Check to make sure that all of the next optimizable's",
"\t\t\t\t\t * dependencies have been satisfied.",
"\t\t\t\t\tif (nextOptimizable < numOptimizables)",
"\t\t\t\t\t\tOptimizable nextOpt =",
"\t\t\t\t\t\t\t\toptimizableList.getOptimizable(nextOptimizable);",
"\t\t\t\t\t\tif (! (nextOpt.legalJoinOrder(assignedTableMap)))",
"\t\t\t\t\t\t\tif (optimizerTrace)",
"\t\t\t\t\t\t\t\ttrace(SKIPPING_JOIN_ORDER, nextOptimizable, 0, 0.0, null);",
"",
"\t\t\t\t\t\t\t/*",
"\t\t\t\t\t\t\t** If this is a user specified join order then it is illegal.",
"\t\t\t\t\t\t\t*/",
"\t\t\t\t\t\t\tif ( ! optimizableList.optimizeJoinOrder())",
"\t\t\t\t\t\t\t{",
"\t\t\t\t\t\t\t\tif (optimizerTrace)",
"\t\t\t\t\t\t\t\t{",
"\t\t\t\t\t\t\t\t\ttrace(ILLEGAL_USER_JOIN_ORDER, 0, 0, 0.0, null);",
"\t\t\t\t\t\t\t\t}",
"",
"\t\t\t\t\t\t\t\tthrow StandardException.newException(SQLState.LANG_ILLEGAL_FORCED_JOIN_ORDER);",
"\t\t\t\t\t\t\t}",
"\t\t\t\t\t\t\tcontinue;",
"\t\t\t\t\t}",
"",
"\t\t\t\t\tif (! found)",
"\t\t\t\t\t{",
"\t\t\t\t\t\tbreak;",
"\t\t\t\t\t}",
"\t\t\t\t}",
"",
"\t\t\t}",
"",
"\t\t\t/*",
"\t\t\t** We are going to try an optimizable at the current join order",
"\t\t\t** position. Is there one already at that position?",
"\t\t\t*/",
"\t\t\tif (proposedJoinOrder[joinPosition] >= 0)",
"\t\t\t{",
"\t\t\t\t/*",
"\t\t\t\t** We are either going to try another table at the current",
"\t\t\t\t** join order position, or we have exhausted all the tables",
"\t\t\t\t** at the current join order position. In either case, we",
"\t\t\t\t** need to pull the table at the current join order position",
"\t\t\t\t** and remove it from the join order.",
"\t\t\t\t*/",
"\t\t\t\tOptimizable pullMe =",
"\t\t\t\t\toptimizableList.getOptimizable(",
"\t\t\t\t\t\t\t\t\t\t\tproposedJoinOrder[joinPosition]);",
"",
"\t\t\t\t/*",
"\t\t\t\t** Subtract the cost estimate of the optimizable being",
"\t\t\t\t** removed from the total cost estimate.",
"\t\t\t\t**",
"\t\t\t\t** The total cost is the sum of all the costs, but the total",
"\t\t\t\t** number of rows is the number of rows returned by the",
"\t\t\t\t** innermost optimizable.",
"\t\t\t\t*/",
"\t\t\t\tdouble prevRowCount;",
"\t\t\t\tdouble prevSingleScanRowCount;",
"\t\t\t\tint prevPosition = 0;",
"\t\t\t\tif (joinPosition == 0)",
"\t\t\t\t{",
"\t\t\t\t\tprevRowCount = outermostCostEstimate.rowCount();",
"\t\t\t\t\tprevSingleScanRowCount = outermostCostEstimate.singleScanRowCount();",
"\t\t\t\t}",
"\t\t\t\telse",
"\t\t\t\t{",
"\t\t\t\t\tprevPosition = proposedJoinOrder[joinPosition - 1];",
"\t\t\t\t\tCostEstimate localCE = ",
"\t\t\t\t\t\toptimizableList.",
"\t\t\t\t\t\t\tgetOptimizable(prevPosition).",
"\t\t\t\t\t\t\t\tgetBestAccessPath().",
"\t\t\t\t\t\t\t\t\tgetCostEstimate();",
"\t\t\t\t\tprevRowCount = localCE.rowCount();",
"\t\t\t\t\tprevSingleScanRowCount = localCE.singleScanRowCount();",
"\t\t\t\t}",
"",
"\t\t\t\t/*",
"\t\t\t\t** If there is no feasible join order, the cost estimate",
"\t\t\t\t** in the best access path may never have been set.",
"\t\t\t\t** In this case, do not subtract anything from the",
"\t\t\t\t** current cost, since nothing was added to the current",
"\t\t\t\t** cost.",
"\t\t\t\t*/",
"\t\t\t\tdouble newCost = currentCost.getEstimatedCost();",
"\t\t\t\tdouble pullCost = 0.0;",
"\t\t\t\tCostEstimate pullCostEstimate =",
"\t\t\t\t\t\t\t\tpullMe.getBestAccessPath().getCostEstimate();",
"\t\t\t\tif (pullCostEstimate != null)",
"\t\t\t\t{",
"\t\t\t\t\tpullCost = pullCostEstimate.getEstimatedCost();",
"",
"\t\t\t\t\tnewCost -= pullCost;",
"",
"\t\t\t\t\t/*",
"\t\t\t\t\t** It's possible for newCost to go negative here due to",
"\t\t\t\t\t** loss of precision--but that should ONLY happen if the",
"\t\t\t\t\t** optimizable we just pulled was at position 0. If we",
"\t\t\t\t\t** have a newCost that is <= 0 at any other time, then",
"\t\t\t\t\t** it's the result of a different kind of precision loss--",
"\t\t\t\t\t** namely, the estimated cost of pullMe was so large that",
"\t\t\t\t\t** we lost the precision of the accumulated cost as it",
"\t\t\t\t\t** existed prior to pullMe. Then when we subtracted",
"\t\t\t\t\t** pullMe's cost out, we ended up setting newCost to zero.",
"\t\t\t\t\t** That's an unfortunate side effect of optimizer cost",
"\t\t\t\t\t** estimates that grow too large. If that's what happened",
"\t\t\t\t\t** here,try to make some sense of things by adding up costs",
"\t\t\t\t\t** as they existed prior to pullMe...",
"\t\t\t\t\t*/",
"\t\t\t\t\tif (newCost <= 0.0)",
"\t\t\t\t\t{",
"\t\t\t\t\t\tif (joinPosition == 0)",
"\t\t\t\t\t\t\tnewCost = 0.0;",
"\t\t\t\t\t\telse",
"\t\t\t\t\t\t\tnewCost = recoverCostFromProposedJoinOrder(false);",
"\t\t\t\t\t}",
"\t\t\t\t}",
"\t\t\t\t/* If we are choosing a new outer table, then",
"\t\t\t\t * we rest the starting cost to the outermostCost.",
"\t\t\t\t * (Thus avoiding any problems with floating point",
"\t\t\t\t * accuracy and going negative.)",
"\t\t\t\t */",
"\t\t\t\tif (joinPosition == 0)",
"\t\t\t\t{",
"\t\t\t\t\tif (outermostCostEstimate != null)",
"\t\t\t\t\t{",
"\t\t\t\t\t\tnewCost = outermostCostEstimate.getEstimatedCost();",
"\t\t\t\t\t}",
"\t\t\t\t\telse",
"\t\t\t\t\t{",
"\t\t\t\t\t\tnewCost = 0.0;",
"\t\t\t\t}",
"\t\t\t\tcurrentCost.setCost(",
"\t\t\t\t\tnewCost,",
"\t\t\t\t\tprevRowCount,",
"\t\t\t\t\tprevSingleScanRowCount);",
"\t\t\t\t",
"\t\t\t\t/*",
"\t\t\t\t** Subtract from the sort avoidance cost if there is a",
"\t\t\t\t** required row ordering.",
"\t\t\t\t**",
"\t\t\t\t** NOTE: It is not necessary here to check whether the",
"\t\t\t\t** best cost was ever set for the sort avoidance path,",
"\t\t\t\t** because it considerSortAvoidancePath() would not be",
"\t\t\t\t** set if there cost were not set.",
"\t\t\t\t*/",
"\t\t\t\tif (requiredRowOrdering != null)",
"\t\t\t\t{",
"\t\t\t\t\tif (pullMe.considerSortAvoidancePath())",
"\t\t\t\t\t\tAccessPath ap = pullMe.getBestSortAvoidancePath();",
"\t\t\t\t\t\tdouble\t prevEstimatedCost = 0.0d;",
"",
"\t\t\t\t\t\t/*",
"\t\t\t\t\t\t** Subtract the sort avoidance cost estimate of the",
"\t\t\t\t\t\t** optimizable being removed from the total sort",
"\t\t\t\t\t\t** avoidance cost estimate.",
"\t\t\t\t\t\t**",
"\t\t\t\t\t\t** The total cost is the sum of all the costs, but the",
"\t\t\t\t\t\t** total number of rows is the number of rows returned",
"\t\t\t\t\t\t** by the innermost optimizable.",
"\t\t\t\t\t\t*/",
"\t\t\t\t\t\tif (joinPosition == 0)",
"\t\t\t\t\t\t{",
"\t\t\t\t\t\t\tprevRowCount = outermostCostEstimate.rowCount();",
"\t\t\t\t\t\t\tprevSingleScanRowCount = outermostCostEstimate.singleScanRowCount();",
"\t\t\t\t\t\t\t/* If we are choosing a new outer table, then",
"\t\t\t\t\t\t\t * we rest the starting cost to the outermostCost.",
"\t\t\t\t\t\t\t * (Thus avoiding any problems with floating point",
"\t\t\t\t\t\t\t * accuracy and going negative.)",
"\t\t\t\t\t\t\t */",
"\t\t\t\t\t\t\tprevEstimatedCost = outermostCostEstimate.getEstimatedCost();",
"\t\t\t\t\t\t}",
"\t\t\t\t\t\telse",
"\t\t\t\t\t\t\tCostEstimate localCE = ",
"\t\t\t\t\t\t\t\toptimizableList.",
"\t\t\t\t\t\t\t\t\tgetOptimizable(prevPosition).",
"\t\t\t\t\t\t\t\t\t\tgetBestSortAvoidancePath().",
"\t\t\t\t\t\t\t\t\t\t\tgetCostEstimate();",
"\t\t\t\t\t\t\tprevRowCount = localCE.rowCount();",
"\t\t\t\t\t\t\tprevSingleScanRowCount = localCE.singleScanRowCount();",
"\t\t\t\t\t\t\tprevEstimatedCost = currentSortAvoidanceCost.getEstimatedCost() -",
"\t\t\t\t\t\t\t\t\t\t\t\t\tap.getCostEstimate().getEstimatedCost();",
"\t\t\t\t\t\t// See discussion above for \"newCost\"; same applies here.",
"\t\t\t\t\t\tif (prevEstimatedCost <= 0.0)",
"\t\t\t\t\t\t\tif (joinPosition == 0)",
"\t\t\t\t\t\t\t\tprevEstimatedCost = 0.0;",
"\t\t\t\t\t\t\telse",
"\t\t\t\t\t\t\t\tprevEstimatedCost =",
"\t\t\t\t\t\t\t\t\trecoverCostFromProposedJoinOrder(true);",
"\t\t\t\t\t\t}",
"\t\t\t\t\t\tcurrentSortAvoidanceCost.setCost(",
"\t\t\t\t\t\t\tprevEstimatedCost,",
"\t\t\t\t\t\t\tprevRowCount,",
"\t\t\t\t\t\t\tprevSingleScanRowCount);",
"",
"\t\t\t\t\t\t/*",
"\t\t\t\t\t\t** Remove the table from the best row ordering.",
"\t\t\t\t\t\t** It should not be necessary to remove it from",
"\t\t\t\t\t\t** the current row ordering, because it is",
"\t\t\t\t\t\t** maintained as we step through the access paths",
"\t\t\t\t\t\t** for the current Optimizable.",
"\t\t\t\t\t\t*/",
"\t\t\t\t\t\tbestRowOrdering.removeOptimizable(",
"\t\t\t\t\t\t\t\t\t\t\t\t\tpullMe.getTableNumber());",
"\t\t\t\t\t\t/*",
"\t\t\t\t\t\t** When removing a table from the join order,",
"\t\t\t\t\t\t** the best row ordering for the remaining outer tables",
"\t\t\t\t\t\t** becomes the starting point for the row ordering of",
"\t\t\t\t\t\t** the current table.",
"\t\t\t\t\t\t*/",
"\t\t\t\t\t\tbestRowOrdering.copy(currentRowOrdering);",
"\t\t\t\t}",
"",
"\t\t\t\t/*",
"\t\t\t\t** Pull the predicates at from the optimizable and put",
"\t\t\t\t** them back in the predicate list.",
"\t\t\t\t**",
"\t\t\t\t** NOTE: This is a little inefficient because it pulls the",
"\t\t\t\t** single-table predicates, which are guaranteed to always",
"\t\t\t\t** be pushed to the same optimizable. We could make this",
"\t\t\t\t** leave the single-table predicates where they are.",
"\t\t\t\t*/",
"\t\t\t\tpullMe.pullOptPredicates(predicateList);",
"",
"\t\t\t\t/*",
"\t\t\t\t** When we pull an Optimizable we need to go through and",
"\t\t\t\t** load whatever best path we found for that Optimizable",
"\t\t\t\t** with respect to this OptimizerImpl. The reason is that",
"\t\t\t\t** we could be pulling the Optimizable for the last time",
"\t\t\t\t** (before returning false), in which case we want it (the",
"\t\t\t\t** Optimizable) to be holding the best access path that it",
"\t\t\t\t** had at the time we found bestJoinOrder. This ensures",
"\t\t\t\t** that the access path which is generated and executed for",
"\t\t\t\t** the Optimizable matches the the access path decisions",
"\t\t\t\t** made by this OptimizerImpl for the best join order.",
"\t\t\t\t**",
"\t\t\t\t** NOTE: We we only reload the best plan if it's necessary",
"\t\t\t\t** to do so--i.e. if the best plans aren't already loaded.",
"\t\t\t\t** The plans will already be loaded if the last complete",
"\t\t\t\t** join order we had was the best one so far, because that",
"\t\t\t\t** means we called \"rememberAsBest\" on every Optimizable",
"\t\t\t\t** in the list and, as part of that call, we will run through",
"\t\t\t\t** and set trulyTheBestAccessPath for the entire subtree.",
"\t\t\t\t** So if we haven't tried any other plans since then,",
"\t\t\t\t** we know that every Optimizable (and its subtree) already",
"\t\t\t\t** has the correct best plan loaded in its trulyTheBest",
"\t\t\t\t** path field. It's good to skip the load in this case",
"\t\t\t\t** because 'reloading best plans' involves walking the",
"\t\t\t\t** entire subtree of _every_ Optimizable in the list, which",
"\t\t\t\t** can be expensive if there are deeply nested subqueries.",
"\t\t\t\t*/",
"\t\t\t\tif (reloadBestPlan)",
"\t\t\t\t\tpullMe.updateBestPlanMap(FromTable.LOAD_PLAN, this);",
"\t\t\t\t/* Mark current join position as unused */",
"\t\t\t\tproposedJoinOrder[joinPosition] = -1;"
]
},
{
"added": [],
"header": "@@ -1118,23 +907,6 @@ public class OptimizerImpl implements Optimizer",
"removed": [
"\t\t\t\t/* Clear the assigned table map for the previous position ",
"\t\t\t\t * NOTE: We need to do this here to for the dependency tracking",
"\t\t\t\t */",
"\t\t\t\tif (joinPosition >= 0)",
"\t\t\t\t{",
"\t\t\t\t\tOptimizable pullMe =",
"\t\t\t\t\t\toptimizableList.getOptimizable(",
"\t\t\t\t\t\t\t\t\t\t\tproposedJoinOrder[joinPosition]);",
"",
"\t\t\t\t\t/*",
"\t\t\t\t\t** Clear the bits from the table at this join position.",
"\t\t\t\t\t** This depends on them having been set previously.",
"\t\t\t\t\t** NOTE: We need to do this here to for the dependency tracking",
"\t\t\t\t\t*/",
"\t\t\t\t\tassignedTableMap.xor(pullMe.getReferencedTableMap());",
"\t\t\t\t}",
""
]
},
{
"added": [],
"header": "@@ -1192,15 +964,6 @@ public class OptimizerImpl implements Optimizer",
"removed": [
"\t\t\t/* Set the assigned table map to be exactly the tables",
"\t\t\t * in the current join order. ",
"\t\t\t */",
"\t\t\tassignedTableMap.clearAll();",
"\t\t\tfor (int index = 0; index <= joinPosition; index++)",
"\t\t\t{",
"\t\t\t\tassignedTableMap.or(optimizableList.getOptimizable(proposedJoinOrder[index]).getReferencedTableMap());",
"\t\t\t}",
""
]
},
{
"added": [
"\t\t\t/* Update the assigned table map to include the newly-placed",
"\t\t\t * Optimizable in the current join order. Assumption is that",
"\t\t\t * this OR can always be undone using an XOR, which will only",
"\t\t\t * be true if none of the Optimizables have overlapping table",
"\t\t\t * maps. The XOR itself occurs as part of optimizable \"PULL\"",
"\t\t\t * processing.",
"\t\t\t */",
"\t\t\tif (SanityManager.DEBUG)",
"\t\t\t{",
"\t\t\t\tJBitSet optMap =",
"\t\t\t\t\t(JBitSet)nextOpt.getReferencedTableMap().clone();",
"",
"\t\t\t\toptMap.and(assignedTableMap);",
"\t\t\t\tif (optMap.getFirstSetBit() != -1)",
"\t\t\t\t{",
"\t\t\t\t\tSanityManager.THROWASSERT(",
"\t\t\t\t\t\t\"Found multiple optimizables that share one or \" +",
"\t\t\t\t\t\t\"more referenced table numbers (esp: '\" +",
"\t\t\t\t\t\toptMap + \"'), but that should not be the case.\");",
"\t\t\t\t}",
"\t\t\t}",
"",
"\t\t\tassignedTableMap.or(nextOpt.getReferencedTableMap());"
],
"header": "@@ -1209,6 +972,29 @@ public class OptimizerImpl implements Optimizer",
"removed": []
}
]
}
] |
derby-DERBY-3296-161e9cca
|
DERBY-3296: Importing to table in default schema fails when another table with the same name exists in a different schema
If schema isn't specified, ColumnInfo should pass the name of the
session's current schema to DatabaseMetaData.getColumns().
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@779681 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/load/ColumnInfo.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.jdbc.EngineConnection;"
],
"header": "@@ -33,6 +33,7 @@ import java.sql.Connection;",
"removed": []
}
]
}
] |
derby-DERBY-3299-c437565f
|
DERBY-3299 (incremental): Updates ConglomerateDescriptor.java to include
logic for detecting a conglomerate "replacement" scenario, and adds logic
to drop the old (shared) conglomerate in those scenarios. Also changes
the various ConstantAction classes which rely on the "drop(...)" methods
of ConglomerateDescriptor and ConstraintDescriptor so that they use the
utility methods added as part of svn # 628181. This commit is the one
which "activates" the previous changes for this issue and ultimately
changes Derby's behavior to fix the problem. Test cases will be added
in a subsequent commit.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@629160 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/sql/dictionary/ConglomerateDescriptor.java",
"hunks": [
{
"added": [
"\t ConglomerateDescriptor [] congDescs =",
"\t dd.getConglomerateDescriptors(getConglomerateNumber());",
"",
"\t\tboolean dropConglom = false;",
"\t\tConglomerateDescriptor physicalCD = null;",
"\t\tif (congDescs.length == 1)",
"\t\t\tdropConglom = true;",
"\t\telse",
"\t\t{",
"\t\t \t/* There are multiple conglomerate descriptors which share",
"\t\t\t * the same physical conglomerate. That said, if we are",
"\t\t\t * dropping the *ONLY* conglomerate descriptor that fully",
"\t\t\t * matches the physical conglomerate, then we have to do",
"\t\t\t * a little extra work. Namely, if the physical conglomerate",
"\t\t\t * is unique and this descriptor is unique, but none of the",
"\t\t\t * other descriptors which share with this one are unique,",
"\t\t\t * then we have to \"update\" the physical conglomerate to",
"\t\t\t * be non-unique. This ensures correct behavior for the",
"\t\t\t * remaining descriptors. (DERBY-3299)",
"\t\t\t *",
"\t\t\t * Note that \"update the physical conglomerate\" above is",
"\t\t\t * currently implemented as \"drop the old conglomerate\"",
"\t\t\t * (now) and \"create a new (replacement) one\" (later--let",
"\t\t\t * the caller do it). Possible improvements to that logic",
"\t\t\t * may be desirable in the future...",
"\t\t\t */",
"",
"\t\t\tboolean needNewConglomerate;",
"",
"\t\t\t/* Find a conglomerate descriptor that fully describes what",
"\t\t\t * a physical conglomerate would have to look like in order",
"\t\t\t * to fulfill the requirements (esp. uniqueness) of _all_",
"\t\t\t * conglomerate descriptors which share a physical conglomerate",
"\t\t\t * with this one. \"true\" in the next line means that when we",
"\t\t\t * search for such a conglomerate, we should ignore \"this\"",
"\t\t\t * descriptor--because we're going to drop this one and we",
"\t\t\t * want to see what the physical conglomerate must look like",
"\t\t\t * when \"this\" descriptor does not exist. Note that this",
"\t\t\t * call should never return null because we only get here",
"\t\t\t * if more than one descriptor shares a conglom with this",
"\t\t\t * one--so at the very least we'll have two descriptors,",
"\t\t\t * which means the following call should return the \"other\"",
"\t\t\t * one.",
"\t\t\t */",
"",
"\t\t\tphysicalCD = describeSharedConglomerate(congDescs, true);",
"\t\t\tIndexRowGenerator othersIRG = physicalCD.getIndexDescriptor();",
"",
"\t\t\t/* Let OTHERS denote the set of \"other\" descriptors which",
"\t\t\t * share a physical conglomerate with this one. Recall",
"\t\t\t * that (for now) 1) sharing descriptors must always have",
"\t\t\t * the same columns referenced in the same order, and",
"\t\t\t * 2) if a unique descriptor shares a conglomerate with",
"\t\t\t * a non-unique descriptor, the physical conglomerate",
"\t\t\t * must itself be unique. So given that, we have four",
"\t\t\t * possible cases:",
"\t\t\t *",
"\t\t\t * 1. \"this\" is unique, none of OTHERS are unique",
"\t\t\t * 2. \"this\" is unique, 1 or more of OTHERS is unique",
"\t\t\t * 3. \"this\" is not unique, none of OTHERS are unique",
"\t\t\t * 4. \"this\" is not unique, 1 or more of OTHERS is unique",
"\t\t\t *",
"\t\t\t * In case 1 \"this\" conglomerate descriptor must be the",
"\t\t\t * _only_ one which fully matches the physical conglom.",
"\t\t\t * In case 4, \"this\" descriptor does _not_ fully match",
"\t\t\t * the physical conglomerate. In cases 2 and 3, \"this\"",
"\t\t\t * descriptor fully matches the physical conglom, but it",
"\t\t\t * is NOT the only one to do so--which means we don't need",
"\t\t\t * to update the physical conglomerate when we drop \"this\"",
"\t\t\t * (because OTHERS need the exact same physical conglom).",
"\t\t\t * The only case that actually requires an \"updated\"",
"\t\t\t * conglomerate, then, is case 1, since the physical",
"\t\t\t * conglomerate for the remaining descriptors no longer",
"\t\t\t * has a uniqueness requirement.",
"\t\t\t */",
"\t\t\tneedNewConglomerate =",
"\t\t\t\tindexRowGenerator.isUnique() && !othersIRG.isUnique();",
"",
"\t\t\tif (needNewConglomerate)",
"\t\t\t{",
"\t\t\t\t/* We have to create a new backing conglomerate",
"\t\t\t\t * to correctly represent the remaing (sharing)",
"\t\t\t\t * descriptors, so drop the physical conglomerate",
"\t\t\t\t * now. The caller of the method can then create",
"\t\t\t\t * new conglomerate as/if needed.",
"\t\t\t\t */",
"\t\t\t\tdropConglom = true;",
"\t\t\t}",
"\t\t\telse",
"\t\t\t\tphysicalCD = null;",
"\t\t}",
"",
"\t if (dropConglom)",
"\t /* Drop the physical conglomerate */",
"\t }",
""
],
"header": "@@ -378,15 +378,107 @@ public final class ConglomerateDescriptor extends TupleDescriptor",
"removed": [
"\t if (dd.getConglomerateDescriptors(getConglomerateNumber()).length == 1)",
"\t /* Drop the conglomerate */",
" }\t ",
"\t "
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/AlterTableConstantAction.java",
"hunks": [
{
"added": [
"import java.util.ArrayList;"
],
"header": "@@ -21,6 +21,7 @@",
"removed": []
},
{
"added": [
"\t\tArrayList newCongloms = new ArrayList();",
""
],
"header": "@@ -813,6 +814,8 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction",
"removed": []
},
{
"added": [
"\t\t\tdropConstraint(cd, td, newCongloms, activation, lcc, true);"
],
"header": "@@ -872,8 +875,8 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction",
"removed": [
" cd.drop(lcc, true);"
]
},
{
"added": [
"\t\t\tdropConstraint(cd, td, newCongloms, activation, lcc, false);"
],
"header": "@@ -882,7 +885,7 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction",
"removed": [
"\t\t\tcd.drop(lcc, false);"
]
},
{
"added": [
"\t\t\t\t\tdropConstraint(fkcd, td,",
"\t\t\t\t\t\tnewCongloms, activation, lcc, true);"
],
"header": "@@ -900,7 +903,8 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction",
"removed": [
" fkcd.drop(lcc, true);"
]
},
{
"added": [
"\t\t/* If there are new backing conglomerates which must be",
"\t\t * created to replace a dropped shared conglomerate",
"\t\t * (where the shared conglomerate was dropped as part",
"\t\t * of a \"drop constraint\" call above), then create them",
"\t\t * now. We do this *after* dropping all dependent",
"\t\t * constraints because we don't want to waste time",
"\t\t * creating a new conglomerate if it's just going to be",
"\t\t * dropped again as part of another \"drop constraint\".",
"\t\t */",
"\t\tcreateNewBackingCongloms(",
"\t\t\tnewCongloms, (long[])null, activation, dd);",
""
],
"header": "@@ -914,6 +918,18 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction",
"removed": []
},
{
"added": [
"\t\tArrayList newCongloms = new ArrayList();"
],
"header": "@@ -1733,6 +1749,7 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction",
"removed": []
},
{
"added": [
"",
"\t\t\t\t\tdropConglomerate(cd, td, true, newCongloms, activation,",
"\t\t\t\t\t\tactivation.getLanguageConnectionContext());"
],
"header": "@@ -1753,7 +1770,9 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction",
"removed": [
"\t\t\t\t\tcd.drop(activation.getLanguageConnectionContext(), td);"
]
},
{
"added": [
"",
"\t\t\t/* If there are new backing conglomerates which must be",
"\t\t\t * created to replace a dropped shared conglomerate",
"\t\t\t * (where the shared conglomerate was dropped as part",
"\t\t\t * of a \"drop conglomerate\" call above), then create",
"\t\t\t * them now. We do this *after* dropping all dependent",
"\t\t\t * conglomerates because we don't want to waste time",
"\t\t\t * creating a new conglomerate if it's just going to be",
"\t\t\t * dropped again as part of another \"drop conglomerate\"",
"\t\t\t * call.",
"\t\t\t */",
"\t\t\tcreateNewBackingCongloms(newCongloms,",
"\t\t\t\tindexConglomerateNumbers, activation, dd);",
""
],
"header": "@@ -1771,6 +1790,20 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/DropConstraintConstantAction.java",
"hunks": [
{
"added": [
"\t\tdropConstraint(conDesc, activation, lcc, !cascadeOnRefKey);"
],
"header": "@@ -227,7 +227,7 @@ public class DropConstraintConstantAction extends ConstraintConstantAction",
"removed": [
" conDesc.drop(lcc, !cascadeOnRefKey);"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/DropTableConstantAction.java",
"hunks": [
{
"added": [
"\t\t\tdropConstraint(cd, td, activation, lcc, true);"
],
"header": "@@ -312,7 +312,7 @@ class DropTableConstantAction extends DDLSingleTableConstantAction",
"removed": [
"\t\t\tcd.drop(lcc, true);"
]
},
{
"added": [
"\t\t\tdropConstraint(cd, td, activation, lcc, false);"
],
"header": "@@ -343,7 +343,7 @@ class DropTableConstantAction extends DDLSingleTableConstantAction",
"removed": [
"\t\t\tcd.drop(lcc, false);"
]
}
]
}
] |
derby-DERBY-3299-c92685ee
|
DERBY-3299 (incremental): Add several utility methods to
DDLSingleTableConstantAction.java that can be called by the various
ConstantAction subclasses to drop a constraint and/or a conglomerate
descriptor. In addition to performing the drop as before, these
utility methods also check to see if dropping the constraint or
index necessitates the "update" of a shared physical conglomerate.
If so, the new methods will take the necessary steps to create
create a new ("replacement") physical conglomerate that correctly
satisfies all remaining conglomerate descriptors--i.e. all of the
ones that were sharing the dropped physical conglomerate.
Note that a) Derby does not currently have logic to recognize when
a shared physical conglomerate needs to be updated (that's coming
in a subsequent patch), and b) there are currently no calls to the
new utility methods. Thus this commit should not have any effect
on Derby yet (incremental development).
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@628181 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/sql/dictionary/ConglomerateDescriptor.java",
"hunks": [
{
"added": [
" * @param lcc Connection context to use for dropping",
" * @param td TableDescriptor for the table to which this",
" * conglomerate belongs",
" * @return If the conglomerate described by this descriptor",
" * is an index conglomerate that is shared by multiple",
" * constraints/indexes, then we may have to create a new",
" * conglomerate to satisfy the constraints/indexes which",
" * remain after we drop the existing conglomerate. If that's",
" * needed then we'll return a conglomerate descriptor which",
" * describes what the new conglomerate must look like. It",
" * is then up to the caller of this method to create a new",
" * corresponding conglomerate. We don't create the index",
" * here because depending on who called us, it might not",
" * make sense to create it--esp. if we get here because of",
" * a DROP TABLE.",
"\tpublic ConglomerateDescriptor drop(LanguageConnectionContext lcc,",
"\t\tTableDescriptor td) throws StandardException",
"\t{"
],
"header": "@@ -348,14 +348,26 @@ public final class ConglomerateDescriptor extends TupleDescriptor",
"removed": [
" * @param lcc",
" * @param td",
"\tpublic void drop(LanguageConnectionContext lcc,",
"\t TableDescriptor td)",
"\tthrows StandardException",
"\t{ "
]
}
]
},
{
"file": "java/engine/org/apache/derby/iapi/sql/dictionary/ConstraintDescriptor.java",
"hunks": [
{
"added": [
" *",
" * @return If the backing conglomerate for this constraint",
" * was a) dropped and b) shared by other constraints/indexes,",
" * then this method will return a ConglomerateDescriptor that",
" * describes what a new backing conglomerate must look like",
" * to stay \"sharable\" across the remaining constraints/indexes.",
" * It is then up to the caller to create a corresponding ",
" * conglomerate. We don't create the conglomerate here",
" * because depending on who called us, it might not make",
" * sense to create it--ex. if we get here because of a DROP",
" * TABLE, the DropTable action doesn't need to create a",
" * new backing conglomerate since the table (and all of",
" * its constraints/indexes) are going to disappear anyway.",
" public ConglomerateDescriptor drop(LanguageConnectionContext lcc,",
" boolean clearDependencies) throws StandardException"
],
"header": "@@ -641,10 +641,22 @@ public abstract class ConstraintDescriptor",
"removed": [
" public void drop(LanguageConnectionContext lcc,",
" boolean clearDependencies)",
" throws StandardException"
]
},
{
"added": [
" ConglomerateDescriptor newBackingConglomCD = null;"
],
"header": "@@ -667,6 +679,7 @@ public abstract class ConstraintDescriptor",
"removed": []
},
{
"added": [
" newBackingConglomCD = conglomDescs[i].drop(lcc, table);"
],
"header": "@@ -689,7 +702,7 @@ public abstract class ConstraintDescriptor",
"removed": [
" conglomDescs[i].drop(lcc, table);"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/DDLSingleTableConstantAction.java",
"hunks": [
{
"added": [
"import java.util.List;",
"import java.util.Properties;",
"",
"import org.apache.derby.iapi.error.StandardException;",
"",
"import org.apache.derby.iapi.services.sanity.SanityManager;",
"",
"import org.apache.derby.iapi.sql.Activation;",
"import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;",
"import org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor;",
"import org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor;",
"import org.apache.derby.iapi.sql.dictionary.KeyConstraintDescriptor;",
"import org.apache.derby.iapi.sql.dictionary.DataDictionary;",
"import org.apache.derby.iapi.sql.dictionary.TableDescriptor;",
"import org.apache.derby.iapi.sql.execute.ConstantAction;",
"import org.apache.derby.iapi.store.access.ConglomerateController;",
"import org.apache.derby.iapi.store.access.TransactionController;",
""
],
"header": "@@ -21,8 +21,26 @@",
"removed": []
},
{
"added": [
"",
"\t/**",
"\t * Drop the constraint corresponding to the received descriptor.",
"\t * If in doing so we also drop a backing conglomerate that is",
"\t * shared by other constraints/indexes, then we have to create",
"\t * a new conglomerate to fill the gap.",
"\t *",
"\t * This method exists here as a \"utility\" method for the various",
"\t * constant actions that may drop constraints in one way or",
"\t * another (there are several that do).",
"\t *",
"\t * @param consDesc ConstraintDescriptor for the constraint to drop",
"\t * @param activation Activation used when creating a new backing",
"\t * index (if a new backing index is needed)",
"\t * @param lcc LanguageConnectionContext used for dropping",
"\t * @param clearDeps Whether or not to clear dependencies when",
"\t * dropping the constraint",
"\t */",
"\tvoid dropConstraint(ConstraintDescriptor consDesc,",
"\t\tActivation activation, LanguageConnectionContext lcc,",
"\t\tboolean clearDeps) throws StandardException",
"\t{",
"\t\tdropConstraint(consDesc, (TableDescriptor)null,",
"\t\t\t(List)null, activation, lcc, clearDeps);",
"\t}",
"",
"\t/**",
"\t * See \"dropConstraint(...\") above.",
"\t *",
"\t * @param skipCreate Optional TableDescriptor. If non-null",
"\t * then we will skip the \"create new conglomerate\" processing",
"\t * *IF* the constraint that we drop came from the table",
"\t * described by skipCreate.",
"\t */",
"\tvoid dropConstraint(ConstraintDescriptor consDesc,",
"\t\tTableDescriptor skipCreate, Activation activation,",
"\t\tLanguageConnectionContext lcc, boolean clearDeps)",
"\t\tthrows StandardException",
"\t{",
"\t\tdropConstraint(consDesc, skipCreate,",
"\t\t\t(List)null, activation, lcc, clearDeps);",
"\t}",
"",
"\t/**",
"\t * See \"dropConstraint(...\") above.",
"\t *",
"\t * @param newConglomActions Optional List. If non-null then",
"\t * for each ConglomerateDescriptor for which we skip the",
"\t * \"create new conglomerate\" processing we will add a",
"\t * ConstantAction to this list. The constant action can",
"\t * then be executed later (esp. by the caller) to create the",
"\t * new conglomerate, if needed. If this argument is null and",
"\t * we skip creation of a new conglomerate, the new conglomerate",
"\t * is effectively ignored (which may be fine in some cases--",
"\t * ex. when dropping a table).",
"\t */",
"\tvoid dropConstraint(ConstraintDescriptor consDesc,",
"\t\tTableDescriptor skipCreate, List newConglomActions,",
"\t\tActivation activation, LanguageConnectionContext lcc,",
"\t\tboolean clearDeps) throws StandardException",
"\t{",
"\t\t/* Get the properties on the old backing conglomerate before",
"\t\t * dropping the constraint, since we can't get them later if",
"\t\t * dropping the constraint causes us to drop the backing",
"\t\t * conglomerate.",
"\t\t */",
"\t\tProperties ixProps = null;",
"\t\tif (consDesc instanceof KeyConstraintDescriptor)",
"\t\t{",
"\t\t\tixProps = new Properties();",
"\t\t\tloadIndexProperties(lcc,",
"\t\t\t\t((KeyConstraintDescriptor)consDesc)",
"\t\t\t\t\t.getIndexConglomerateDescriptor(lcc.getDataDictionary()),",
"\t\t\t\tixProps);",
"\t\t}",
"",
"\t\tConglomerateDescriptor newBackingConglomCD = consDesc.drop(lcc, clearDeps);",
"",
"\t\t/* If we don't need a new conglomerate then there's nothing",
"\t\t * else to do.",
"\t\t */",
"\t\tif (newBackingConglomCD == null)",
"\t\t\treturn;",
"",
"\t\t/* Only create the new conglomerate if it is NOT for the table",
"\t\t * described by skipCreate.",
"\t\t */",
"\t\tif ((skipCreate != null) &&",
"\t\t\tskipCreate.getUUID().equals(",
"\t\t\t\tconsDesc.getTableDescriptor().getUUID()))",
"\t\t{",
"\t\t\t/* We're skipping the \"create new conglom\" phase; if we have",
"\t\t\t * a list in which to store the ConstantAction, then store it;",
"\t\t\t * otherwise, the new conglomerate is effectively ignored.",
"\t\t\t */",
"\t\t\tif (newConglomActions != null)",
"\t\t\t{",
"\t\t\t\tnewConglomActions.add(",
"\t\t\t\t\tgetConglomReplacementAction(newBackingConglomCD,",
"\t\t\t\t\t\tconsDesc.getTableDescriptor(), ixProps));",
"\t\t\t}",
"\t\t}",
"\t\telse",
"\t\t{",
"\t\t\texecuteConglomReplacement(",
"\t\t\t\tgetConglomReplacementAction(newBackingConglomCD,",
"\t\t\t\t\tconsDesc.getTableDescriptor(), ixProps),",
"\t\t\t\tactivation);",
"\t\t}",
"",
"\t\treturn;",
"\t}",
"",
"\t/**",
"\t * Similar to dropConstraint(...) above, except this method",
"\t * drops a conglomerate directly instead of going through",
"\t * a ConstraintDescriptor.",
"\t *",
"\t * @param congDesc ConglomerateDescriptor for the conglom to drop",
"\t * @param td TableDescriptor for the table on which congDesc exists",
"\t * @param activation Activation used when creating a new backing",
"\t * index (if a new backing index is needed)",
"\t * @param lcc LanguageConnectionContext used for dropping",
"\t */",
"\tvoid dropConglomerate(",
"\t\tConglomerateDescriptor congDesc, TableDescriptor td,",
"\t\tActivation activation, LanguageConnectionContext lcc)",
"\t\tthrows StandardException",
"\t{",
"\t\tdropConglomerate(congDesc, td,",
"\t\t\tfalse, (List)null, activation, lcc);",
"\t}",
"",
"\t/**",
"\t * See \"dropConglomerate(...)\" above.",
"\t *\t",
"\t * @param skipCreate If true then we will skip the \"create",
"\t * new conglomerate\" processing for the dropped conglom.",
"\t * @param newConglomActions Optional List. If non-null then",
"\t * for each ConglomerateDescriptor for which we skip the",
"\t * \"create new conglomerate\" processing we will add a",
"\t * ConstantAction to this list. The constant action can",
"\t * then be executed later (esp. by the caller) to create the",
"\t * new conglomerate, if needed. If this argument is null and",
"\t * we skip creation of a new conglomerate, the new conglomerate",
"\t * is effectively ignored (which may be fine in some cases--",
"\t * ex. when dropping a table).",
"\t */",
"\tvoid dropConglomerate(",
"\t\tConglomerateDescriptor congDesc, TableDescriptor td,",
"\t\tboolean skipCreate, List newConglomActions,",
"\t\tActivation activation, LanguageConnectionContext lcc)",
"\t\tthrows StandardException",
"\t{",
"\t\t// Get the properties on the old index before dropping.",
"\t\tProperties ixProps = new Properties();",
"\t\tloadIndexProperties(lcc, congDesc, ixProps);",
"",
"\t\t// Drop the conglomerate.",
"\t\tConglomerateDescriptor newBackingConglomCD = congDesc.drop(lcc, td);",
"",
"\t\t/* If we don't need a new conglomerate then there's nothing",
"\t\t * else to do.",
"\t\t */",
"\t\tif (newBackingConglomCD == null)",
"\t\t\treturn;",
"",
"\t\tif (skipCreate)",
"\t\t{",
"\t\t\t/* We're skipping the \"create new conglom\" phase; if we have",
"\t\t\t * a list in which to store the ConstantAction, then store it;",
"\t\t\t * otherwise, the new conglomerate is effectively ignored.",
"\t\t\t */",
"\t\t\tif (newConglomActions != null)",
"\t\t\t{",
"\t\t\t\tnewConglomActions.add(",
"\t\t\t\t\tgetConglomReplacementAction(",
"\t\t\t\t\t\tnewBackingConglomCD, td, ixProps));",
"\t\t\t}",
"\t\t}",
"\t\telse",
"\t\t{",
"\t\t\texecuteConglomReplacement(",
"\t\t\t\tgetConglomReplacementAction(newBackingConglomCD, td, ixProps),",
"\t\t\t\tactivation);",
"\t\t}",
"",
"\t\treturn;",
"\t}",
"",
"\t/**",
"\t * Get any table properties that exist for the received",
"\t * index descriptor.",
"\t */",
"\tprivate void loadIndexProperties(LanguageConnectionContext lcc,",
"\t\tConglomerateDescriptor congDesc, Properties ixProps)",
"\t\tthrows StandardException",
"\t{",
"\t \tConglomerateController cc = ",
"\t\t \tlcc.getTransactionExecute().openConglomerate(",
"\t\t\t \tcongDesc.getConglomerateNumber(),",
"\t\t\t \tfalse,",
"\t\t\t \tTransactionController.OPENMODE_FORUPDATE,",
"\t\t\t \tTransactionController.MODE_TABLE,",
"\t\t\t \tTransactionController.ISOLATION_SERIALIZABLE);",
"",
"\t\tcc.getInternalTablePropertySet(ixProps);",
"\t\tcc.close();",
"\t\treturn;",
"\t}",
"",
"\t/**",
"\t * Create a ConstantAction which, when executed, will create a",
"\t * new conglomerate whose attributes match those of the received",
"\t * ConglomerateDescriptor.",
"\t *",
"\t * @param srcCD Descriptor describing what the replacement",
"\t * physical conglomerate should look like",
"\t * @param td Table descriptor for the table to which srcCD belongs",
"\t * @param properties Properties from the old (dropped) conglom",
"\t * that should be \"forwarded\" to the new (replacement) conglom.",
"\t */",
"\tConstantAction getConglomReplacementAction(ConglomerateDescriptor srcCD,",
"\t\tTableDescriptor td, Properties properties) throws StandardException",
"\t{",
"\t\t/* Re-use CreateIndexActionConstantAction to do the work",
"\t\t * of creating a new conglomerate. The big difference",
"\t\t * between creating an _index_ and creating an index",
"\t\t * _conglomerate_ is that we don't need to create a new",
"\t\t * ConglomerateDescriptor in the latter case. Use of the",
"\t\t * following constructor dictates that we want to create",
"\t\t * a _conglomerate_ only--i.e. that no new conglomerate",
"\t\t * descriptor is necessary.",
"\t\t */",
"\t\treturn new CreateIndexConstantAction(srcCD, td, properties);",
"\t}",
"",
"\t/**",
"\t * Execute the received ConstantAction, which will create a",
"\t * new physical conglomerate (or find an existing physical",
"\t * conglomerate that is \"sharable\") to replace some dropped",
"\t * physical conglomerate. Then find any conglomerate descriptors",
"\t * which still reference the dropped physical conglomerate and",
"\t * update them all to have a conglomerate number that points",
"\t * to the conglomerate created by the ConstantAction.",
"\t *",
"\t * This method is called as part of DROP processing to handle",
"\t * cases where a physical conglomerate that was shared by",
"\t * multiple descriptors is dropped--in which case a new physical",
"\t * conglomerate must be created to support the remaining",
"\t * descriptors.",
"\t *",
"\t * @param replaceConglom Constant action which, when executed,",
"\t * will either create a new conglomerate or find an existing",
"\t * one that satisfies the ConstantAction's requirements.",
"\t * @param activation Activation used when creating the conglom",
"\t */",
"\tvoid executeConglomReplacement(ConstantAction replaceConglom,",
"\t\tActivation activation) throws StandardException",
"\t{",
"\t\tCreateIndexConstantAction replaceConglomAction =",
"\t\t\t(CreateIndexConstantAction)replaceConglom;",
"",
"\t\tLanguageConnectionContext lcc =",
"\t\t\tactivation.getLanguageConnectionContext();",
"",
"\t\tDataDictionary dd = lcc.getDataDictionary();",
"",
"\t\t// Create the new (replacment) backing conglomerate...",
"\t\treplaceConglomAction.executeConstantAction(activation);",
"",
"\t\t/* Find all conglomerate descriptors that referenced the",
"\t\t * old backing conglomerate and update them to have the",
"\t\t * conglomerate number for the new backing conglomerate.",
"\t\t */",
"\t\tConglomerateDescriptor [] congDescs =",
"\t\t\tdd.getConglomerateDescriptors(",
"\t\t\t\treplaceConglomAction.getReplacedConglomNumber());",
"",
"\t\tif (SanityManager.DEBUG)",
"\t\t{",
"\t\t\t/* There should be at least one descriptor requiring",
"\t\t\t * an updated conglomerate number--namely, the one",
"\t\t\t * corresponding to \"srcCD\" for which the constant",
"\t\t\t * action was created (see getConglomReplacementAction()",
"\t\t\t * above). There may be others, as well.",
"\t\t\t */",
"\t\t\tif (congDescs.length < 1)",
"\t\t\t{",
"\t\t\t\tSanityManager.THROWASSERT(",
"\t\t\t\t\t\"Should have found at least one conglomerate \" +",
"\t\t\t\t\t\"descriptor that needs an updated conglomerate \" +",
"\t\t\t\t\t\"number (due to a dropped index), but only \" +",
"\t\t\t\t\t\"found \" + congDescs.length);",
"\t\t\t}",
"\t\t}",
"",
"\t\tdd.updateConglomerateDescriptor(congDescs,",
"\t\t\treplaceConglomAction.getCreatedConglomNumber(),",
"\t\t\tlcc.getTransactionExecute());",
"",
"\t\treturn;",
"\t}"
],
"header": "@@ -46,4 +64,307 @@ abstract class DDLSingleTableConstantAction extends DDLConstantAction",
"removed": []
}
]
}
] |
derby-DERBY-3299-cf870799
|
DERBY-3299 (incremental: Changes/additions to CreateIndexConstantAction.java
so that it can support the creation of "replacement" conglomerates, which
will be necessary when the physical conglomerate for an index has been
dropped but the index descriptor still exists. This can happen if multiple
indexes share a physical conglomerate but then the conglomerate is dropped
as part of "drop index" processing for one of the indexes. (Note that
"indexes" here includes indexes which were created to back constraints).
In that case we'll need to create a new "replacement" physical conglomerate
to support the existing descriptor.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@627836 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/execute/CreateIndexConstantAction.java",
"hunks": [
{
"added": [
"\t/** Conglomerate number for the conglomerate created by this",
"\t * constant action; -1L if this constant action has not been",
"\t * executed. If this constant action doesn't actually create",
"\t * a new conglomerate--which can happen if it finds an existing",
"\t * conglomerate that satisfies all of the criteria--then this",
"\t * field will hold the conglomerate number of whatever existing",
"\t * conglomerate was found.",
"\t */",
"\tprivate long conglomId;",
"",
"\t/** Conglomerate number of the physical conglomerate that we",
"\t * will \"replace\" using this constant action. That is, if",
"\t * the purpose of this constant action is to create a new physical",
"\t * conglomerate to replace a dropped physical conglomerate, then",
"\t * this field holds the conglomerate number of the dropped physical",
"\t * conglomerate. If -1L then we are not replacing a conglomerate,",
"\t * we're simply creating a new index (and backing physical",
"\t * conglomerate) as normal.",
"\t */",
"\tprivate long droppedConglomNum;"
],
"header": "@@ -85,6 +85,26 @@ class CreateIndexConstantAction extends IndexConstantAction",
"removed": []
},
{
"added": [
"\t\tthis.conglomId = -1L;",
"\t\tthis.droppedConglomNum = -1L;",
"\t}",
"",
"\t/**",
"\t * Make a ConstantAction that creates a new physical conglomerate",
"\t * based on index information stored in the received descriptors.",
"\t * Assumption is that the received ConglomerateDescriptor is still",
"\t * valid (meaning it has corresponding entries in the system tables",
"\t * and it describes some constraint/index that has _not_ been",
"\t * dropped--though the physical conglomerate underneath has).",
"\t *",
"\t * This constructor is used in cases where the physical conglomerate",
"\t * for an index has been dropped but the index still exists. That",
"\t * can happen if multiple indexes share a physical conglomerate but",
"\t * then the conglomerate is dropped as part of \"drop index\" processing",
"\t * for one of the indexes. (Note that \"indexes\" here includes indexes",
"\t * which were created to back constraints.) In that case we have to",
"\t * create a new conglomerate to satisfy the remaining sharing indexes,",
"\t * so that's what we're here for. See ConglomerateDescriptor.drop()",
"\t * for details on when that is necessary.",
"\t */",
"\tCreateIndexConstantAction(ConglomerateDescriptor srcCD,",
"\t\tTableDescriptor td, Properties properties)",
"\t{",
"\t\tsuper(td.getUUID(),",
"\t\t\tsrcCD.getConglomerateName(), td.getName(), td.getSchemaName());",
"",
"\t\tthis.forCreateTable = false;",
"",
"\t\t/* We get here when a conglomerate has been dropped and we",
"\t\t * need to create (or find) another one to fill its place.",
"\t\t * At this point the received conglomerate descriptor still",
"\t\t * references the old (dropped) conglomerate, so we can",
"\t\t * pull the conglomerate number from there.",
"\t\t */",
"\t\tthis.droppedConglomNum = srcCD.getConglomerateNumber();",
"",
"\t\t/* Plug in the rest of the information from the received",
"\t\t * descriptors.",
"\t\t */",
"\t\tIndexRowGenerator irg = srcCD.getIndexDescriptor();",
"\t\tthis.unique = irg.isUnique();",
"\t\tthis.indexType = irg.indexType();",
"\t\tthis.columnNames = srcCD.getColumnNames();",
"\t\tthis.isAscending = irg.isAscending();",
"\t\tthis.isConstraint = srcCD.isConstraint();",
"\t\tthis.conglomerateUUID = srcCD.getUUID();",
"\t\tthis.properties = properties;",
"\t\tthis.conglomId = -1L;",
"",
"\t\t/* The ConglomerateDescriptor may not know the names of",
"\t\t * the columns it includes. If that's true (which seems",
"\t\t * to be the more common case) then we have to build the",
"\t\t * list of ColumnNames ourselves.",
"\t\t */",
"\t\tif (columnNames == null)",
"\t\t{",
"\t\t\tint [] baseCols = irg.baseColumnPositions();",
"\t\t\tcolumnNames = new String[baseCols.length];",
"\t\t\tColumnDescriptorList colDL = td.getColumnDescriptorList();",
"\t\t\tfor (int i = 0; i < baseCols.length; i++)",
"\t\t\t{",
"\t\t\t\tcolumnNames[i] =",
"\t\t\t\t\tcolDL.elementAt(baseCols[i]-1).getColumnName();",
"\t\t\t}",
"\t\t}"
],
"header": "@@ -126,6 +146,73 @@ class CreateIndexConstantAction extends IndexConstantAction",
"removed": []
},
{
"added": [
"\t\t/* The code below tries to determine if the index that we're about",
"\t\t * to create can \"share\" a conglomerate with an existing index.",
"\t\t * If so, we will use a single physical conglomerate--namely, the",
"\t\t * one that already exists--to support both indexes. I.e. we will",
"\t\t * *not* create a new conglomerate as part of this constant action.",
"\t\t */ ",
"",
"\t\tboolean shareExisting = false;",
"",
"\t\t\tif (droppedConglomNum == cd.getConglomerateNumber())",
"\t\t\t{",
"\t\t\t\t/* We can't share with any conglomerate descriptor",
"\t\t\t\t * whose conglomerate number matches the dropped",
"\t\t\t\t * conglomerate number, because that descriptor's",
"\t\t\t\t * backing conglomerate was dropped, as well. If",
"\t\t\t\t * we're going to share, we have to share with a",
"\t\t\t\t * descriptor whose backing physical conglomerate",
"\t\t\t\t * is still around.",
"\t\t\t\t */",
"\t\t\t\tcontinue;",
"\t\t\t}",
"",
"\t\t\t/* The conditions which allow an index to share an existing",
"\t\t\t * conglomerate are as follows:",
"\t\t\t *",
"\t\t\t */ ",
"\t\t\tboolean possibleShare = (irg.isUnique() || !unique) &&",
"\t\t\t (bcps.length == baseColumnPositions.length);",
"\t\t\tif (possibleShare && indexType.equals(irg.indexType()))"
],
"header": "@@ -298,33 +385,53 @@ class CreateIndexConstantAction extends IndexConstantAction",
"removed": [
"\t\tboolean duplicate = false;",
" long conglomId = 0;",
"",
"\t\t\t/* For an index to be considered a duplicate of already existing index, the",
"\t\t\t * following conditions have to be satisfied:",
"\t\t\t */",
"\t\t\tif ((bcps.length == baseColumnPositions.length) &&",
"\t\t\t (irg.isUnique() || !unique) &&",
"\t\t\t\tindexType.equals(irg.indexType()))"
]
},
{
"added": [
"\t\t\tif (j == baseColumnPositions.length)\t// share"
],
"header": "@@ -333,7 +440,7 @@ class CreateIndexConstantAction extends IndexConstantAction",
"removed": [
"\t\t\tif (j == baseColumnPositions.length)\t// duplicate"
]
},
{
"added": [
"\t\t\t\t/* Sharing indexes share the physical conglomerate",
"\t\t\t\t * underneath, so pull the conglomerate number from",
"\t\t\t\t * the existing conglomerate descriptor.",
"\t\t\t\t */",
"",
"\t\t\t\t/* We create a new IndexRowGenerator because certain",
"\t\t\t\t * attributes--esp. uniqueness--may be different between",
"\t\t\t\t * the index we're creating and the conglomerate that",
"\t\t\t\t * already exists. I.e. even though we're sharing a",
"\t\t\t\t * conglomerate, the new index is not necessarily",
"\t\t\t\t * identical to the existing conglomerate. We have to",
"\t\t\t\t * keep track of that info so that if we later drop",
"\t\t\t\t * the shared physical conglomerate, we can figure out",
"\t\t\t\t * what this index (the one we're creating now) is",
"\t\t\t\t * really supposed to look like.",
"\t\t\t\t */",
"\t\t\t\tindexRowGenerator =",
"\t\t\t\t\tnew IndexRowGenerator(",
"\t\t\t\t\t\tindexType, unique,",
"\t\t\t\t\t\tbaseColumnPositions,",
"\t\t\t\t\t\tisAscending,",
"\t\t\t\t\t\tbaseColumnPositions.length);",
"",
"\t\t\t\t// Sharing indexes will have unique logical conglomerate UUIDs.",
"\t\t\t\tshareExisting = true;",
"\t\t/* If we have a droppedConglomNum then the index we're about to",
"\t\t * \"create\" already exists--i.e. it has an index descriptor and",
"\t\t * the corresponding information is already in the system catalogs.",
"\t\t * The only thing we're missing, then, is the physical conglomerate",
"\t\t * to back the index (because the old conglomerate was dropped).",
"\t\t */",
"\t\tboolean alreadyHaveConglomDescriptor = (droppedConglomNum > -1L);",
"",
"\t\t * entry into SYSCONGLOMERATES--unless we already have a descriptor,",
"\t\t * in which case we don't even need to do that.",
"\t\tif (shareExisting && !alreadyHaveConglomDescriptor)"
],
"header": "@@ -349,23 +456,53 @@ class CreateIndexConstantAction extends IndexConstantAction",
"removed": [
"\t\t\t\t//Duplicate indexes share the physical conglomerate underneath",
"\t\t\t\tindexRowGenerator = cd.getIndexDescriptor();",
"\t\t\t\t//Duplicate indexes will have unqiue logical conglomerate UUIDs. ",
"\t\t\t\tduplicate = true;",
"\t\t * entry into SYSCONGLOMERATES.",
"\t\tif (duplicate)"
]
},
{
"added": [
"\t\tif (! shareExisting)"
],
"header": "@@ -416,7 +553,7 @@ class CreateIndexConstantAction extends IndexConstantAction",
"removed": [
"\t\tif (! duplicate)"
]
},
{
"added": [
"\t\t\t/* now that we got indexTemplateRow, done for sharing index",
"\t\t\tif (shareExisting)"
],
"header": "@@ -527,9 +664,9 @@ class CreateIndexConstantAction extends IndexConstantAction",
"removed": [
"\t\t\t/* now that we got indexTemplateRow, done for duplicate index",
"\t\t\tif (duplicate)"
]
},
{
"added": [
"\t\t// Create a conglomerate descriptor with the conglomId filled",
"\t\t// in and add it--if we don't have one already.",
"\t\tif (!alreadyHaveConglomDescriptor)",
"\t\t{",
"\t\t\tConglomerateDescriptor cgd =",
"\t\t\t\tddg.newConglomerateDescriptor(",
"\t\t\t\t\tconglomId, indexName, true,",
"\t\t\t\t\tindexRowGenerator, isConstraint,",
"\t\t\t\t\tconglomerateUUID, td.getUUID(), sd.getUUID() );",
"\t\t\tdd.addDescriptor(cgd, sd,",
"\t\t\t\tDataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc);",
"\t\t\t// add newly added conglomerate to the list of conglomerate",
"\t\t\t// descriptors in the td.",
"\t\t\tConglomerateDescriptorList cdl = td.getConglomerateDescriptorList();",
"\t\t\tcdl.add(cgd);",
"\t\t\t/* Since we created a new conglomerate descriptor, load",
"\t\t\t * its UUID into the corresponding field, to ensure that",
"\t\t\t * it is properly set in the StatisticsDescriptor created",
"\t\t\t * below.",
"\t\t\t */",
"\t\t\tconglomerateUUID = cgd.getUUID();",
"\t\t}"
],
"header": "@@ -646,21 +783,32 @@ class CreateIndexConstantAction extends IndexConstantAction",
"removed": [
"\t\t// Create a conglomerate descriptor with the conglomId filled in and",
"\t\t// add it.",
"\t\tConglomerateDescriptor cgd =",
"\t\t\tddg.newConglomerateDescriptor(conglomId, indexName, true,",
"\t\t\t\t\t\t\t\t\t\t indexRowGenerator, isConstraint,",
"\t\t\t\t\t\t\t\t\t\t conglomerateUUID, td.getUUID(), sd.getUUID() );",
"\t\tdd.addDescriptor(cgd, sd, DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc);",
"\t\t// add newly added conglomerate to the list of conglomerate descriptors",
"\t\t// in the td.",
"\t\tConglomerateDescriptorList cdl = td.getConglomerateDescriptorList();",
"\t\tcdl.add(cgd);"
]
},
{
"added": [
"\t\t\t\t\tnew StatisticsDescriptor(dd,",
"\t\t\t\t\t\tdd.getUUIDFactory().createUUID(),",
"\t\t\t\t\t\tconglomerateUUID, td.getUUID(), \"I\",",
"\t\t\t\t\t\tnew StatisticsImpl(numRows, c[i]), i + 1);",
""
],
"header": "@@ -670,9 +818,11 @@ class CreateIndexConstantAction extends IndexConstantAction",
"removed": [
"\t\t\t\t\tnew StatisticsDescriptor(dd, dd.getUUIDFactory().createUUID(),",
"\t\t\t\t\t\t\t\t\t\t\t\tcgd.getUUID(), td.getUUID(), \"I\", new StatisticsImpl(numRows, c[i]),",
"\t\t\t\t\t\t\t\t\t\t\t\ti + 1);"
]
},
{
"added": [
"\t/**",
"\t * Get the conglomerate number for the conglomerate that was",
"\t * created by this constant action. Will return -1L if the",
"\t * constant action has not yet been executed. This is used",
"\t * for updating conglomerate descriptors which share a",
"\t * conglomerate that has been dropped, in which case those",
"\t * \"sharing\" descriptors need to point to the newly-created",
"\t * conglomerate (the newly-created conglomerate replaces",
"\t * the dropped one).",
"\t */",
"\tlong getCreatedConglomNumber()",
"\t{",
"\t\tif (SanityManager.DEBUG)",
"\t\t{",
"\t\t\tif (conglomId == -1L)",
"\t\t\t{",
"\t\t\t\tSanityManager.THROWASSERT(",
"\t\t\t\t\t\"Called getCreatedConglomNumber() on a CreateIndex\" +",
"\t\t\t\t\t\"ConstantAction before the action was executed.\");",
"\t\t\t}",
"\t\t}",
"",
"\t\treturn conglomId;",
"\t}",
"",
"\t/**",
"\t * If the purpose of this constant action was to \"replace\" a",
"\t * dropped physical conglomerate, then this method returns the",
"\t * conglomerate number of the dropped conglomerate. Otherwise",
"\t * this method will end up returning -1.",
"\t */",
"\tlong getReplacedConglomNumber()",
"\t{",
"\t\treturn droppedConglomNum;",
"\t}",
"",
"\t/**",
"\t * Get the UUID for the conglomerate descriptor that was created",
"\t * (or re-used) by this constant action.",
"\t */",
"\tUUID getCreatedUUID()",
"\t{",
"\t\treturn conglomerateUUID;",
"\t}",
""
],
"header": "@@ -692,6 +842,51 @@ class CreateIndexConstantAction extends IndexConstantAction",
"removed": []
}
]
}
] |
derby-DERBY-3301-de519067
|
DERBY-3301: Incorrect result from query with nested EXIST
Prevent the optimizer from flattening subqueries that
need to be evaluated to get correct results.
Patch contributed by Thomas Nielsen
Patch files: derby-3301-8.diff, derby-3301-test-master-2.diff,
derby-3301-test-3.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@618586 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/SelectNode.java",
"hunks": [
{
"added": [
"\tboolean originalWhereClauseHadSubqueries;",
"\t"
],
"header": "@@ -112,6 +112,8 @@ public class SelectNode extends ResultSetNode",
"removed": []
},
{
"added": [
"\t\t",
"\t\tthis.originalWhereClauseHadSubqueries = false;",
"\t\tif (this.whereClause != null){",
"\t\t\tCollectNodesVisitor cnv = ",
"\t\t\t\tnew CollectNodesVisitor(SubqueryNode.class, SubqueryNode.class);",
"\t\t\tthis.whereClause.accept(cnv);",
"\t\t\tif (!cnv.getList().isEmpty()){",
"\t\t\t\tthis.originalWhereClauseHadSubqueries = true;",
"\t\t\t}",
"\t\t}"
],
"header": "@@ -138,6 +140,16 @@ public class SelectNode extends ResultSetNode",
"removed": []
},
{
"added": [
"\t\t\t"
],
"header": "@@ -456,7 +468,7 @@ public class SelectNode extends ResultSetNode",
"removed": [
""
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/SubqueryNode.java",
"hunks": [
{
"added": [
"import java.util.Iterator;"
],
"header": "@@ -44,6 +44,7 @@ import org.apache.derby.iapi.store.access.Qualifier;",
"removed": []
},
{
"added": [
"\t/* Whether or not this subquery began life as a subquery in a where clause */",
"\tboolean\t\t\twhereSubquery;",
"\t"
],
"header": "@@ -110,6 +111,9 @@ public class SubqueryNode extends ValueNode",
"removed": []
},
{
"added": [
"\t\t * o Either a) it does not appear within a WHERE clause, or ",
"\t\t * b) it appears within a WHERE clause but does not itself ",
"\t\t * contain a WHERE clause with other subqueries in it. ",
"\t\t * (DERBY-3301)",
"\t\t\t\t\t !isWhereExistsAnyInWithWhereSubquery() &&"
],
"header": "@@ -614,9 +618,14 @@ public class SubqueryNode extends ValueNode",
"removed": []
},
{
"added": [
"\t\t * o Either a) it does not appear within a WHERE clause, or ",
"\t\t * b) it appears within a WHERE clause but does not itself ",
"\t\t * contain a WHERE clause with other subqueries in it. ",
"\t\t * (DERBY-3301)",
"\t\t\t\t\t !isWhereExistsAnyInWithWhereSubquery() &&"
],
"header": "@@ -677,11 +686,16 @@ public class SubqueryNode extends ValueNode",
"removed": []
}
]
}
] |
derby-DERBY-3302-9460d4cc
|
DERBY-3302 The user was running into null pointer exception at the time of database recovery
because Derby was trying to get the Collator object through database context. But the
Collator object is already available in the territory sensitive character classes and we
do not have to go to database context to get it. I changed the code to use that collator
object rather than look into database context. The reason for null pointer exception was
that database context was not loaded yet during database recovery.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@610846 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/types/SQLChar.java",
"hunks": [
{
"added": [
"\t/**",
"\t * This method gets called for the collation sensitive char classes ie",
"\t * CollatorSQLChar, CollatorSQLVarchar, CollatorSQLLongvarchar,",
"\t * CollatorSQLClob. These collation sensitive chars need to have the ",
"\t * collation key in order to do string comparison. And the collation key",
"\t * is obtained using the Collator object that these classes already have.",
"\t * ",
"\t * @return CollationKey obtained using Collator on the string",
"\t * @throws StandardException",
"\t */"
],
"header": "@@ -2492,6 +2492,16 @@ readingLoop:",
"removed": []
},
{
"added": [
"\t\tRuleBasedCollator rbc = getCollatorForCollation();\t\t"
],
"header": "@@ -2513,7 +2523,7 @@ readingLoop:",
"removed": [
"\t\tRuleBasedCollator rbc = getLocaleFinder().getCollator();\t\t"
]
},
{
"added": [
"\tprotected RuleBasedCollator getCollatorForCollation() throws StandardException",
"\t{",
"\t\treturn getLocaleFinder().getCollator();",
"\t}",
""
],
"header": "@@ -2748,6 +2758,11 @@ readingLoop:",
"removed": []
}
]
}
] |
derby-DERBY-3303-d2a6cce8
|
DERBY-3303: Fix OrderByColumn.java to account for "pulled" GROUP BY columns
when a) checking to see if the user has specified a valid ORDER BY column,
and b) trying to resolve "pulled" ORDER BY columns to their underlying values.
Also, add some relevant test cases to the existing lang/orderby.sql test.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@628823 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/OrderByColumn.java",
"hunks": [
{
"added": [
"",
"\t\t\t/* Column is out of range if either a) resultCol is null, OR",
"\t\t\t * b) resultCol points to a column that is not visible to the",
"\t\t\t * user (i.e. it was generated internally).",
"\t\t\t */",
"\t\t\tif ((resultCol == null) ||",
"\t\t\t\t(resultCol.getColumnPosition() > targetCols.visibleSize()))",
"\t\t\t{"
],
"header": "@@ -196,8 +196,14 @@ public class OrderByColumn extends OrderedColumn {",
"removed": [
"\t\t\t",
"\t\t\tif (resultCol == null) {"
]
}
]
}
] |
derby-DERBY-3304-002bf942
|
DERBY-3304 and DERBY-3414
This serves as a test case for both the jira entries above.
Number of code changes for transaction ending time went in as part of DERBY-3304 and this
new test will check those code changes for specific case of rollback inside a java procedure
call.
The test case is currently disabled for network server because the rollback inside the
procedure is not closing all the resultsets and DERBY-3414 is to track this behavior of
network server. Once DERBY-3414 is fixed, we should enable the test for network server.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@627673 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3304-039b8cdf
|
Adding a junit test for the standalone test case provided by Dag for DERBY-3304. Here, we
are adding a Java procedure which does a commit and then returns a resultset back to the
caller. The resultset should not get closed as part of the commit.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@614292 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3304-0f450705
|
DERBY-3304
DERBY-3037
DERBY-1585
I am adding a test case to check for the resultset from the java procedure call when the
java procedure has done a rollback inside it. This test shows that in trunk, after checkin
602991 for DERBY-1585, a procedure does not return a resultset if there was a rollbck
inside the procedure with resultset creation before rollback. This behavior is different
than what happens in 10.2 codeline. In 10.2, a procedure will return a *closed* resultset
if there was a rollback inside the procedure. But a procedure should not return closed
result sets, so it appears that trunk is behaving correctly and 10.2's behavior was
incorrect.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@628130 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/JDBC.java",
"hunks": [
{
"added": [
" * Assert that the statement has no more results(getMoreResults) and it ",
" * indeed does not return any resultsets(by checking getResultSet). ",
" * Also, ensure that update count is -1.",
" \tAssert.assertFalse(s.getMoreResults());",
" Assert.assertTrue(s.getUpdateCount() == -1);",
" Assert.assertNull(s.getResultSet());"
],
"header": "@@ -523,15 +523,17 @@ public class JDBC {",
"removed": [
" * Assert that the statement has no more results. Logic taken",
" * from javadoc for java.sql.Statement.getMoreResults.",
" Assert.assertTrue((s.getMoreResults() == false)",
" && (s.getUpdateCount() == -1));"
]
}
]
}
] |
derby-DERBY-3304-27711132
|
DERBY-3304
Some code cleanup in GenericLanguageConnectionContext.endTransactionActivationHandling so the code is more readable.
No functionality change, just consolidated various if statements and used some local variables to replace repeated
method calls like a.getResultSet() and a.getResultSet().returnsRows()
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@619772 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/conn/GenericLanguageConnectionContext.java",
"hunks": [
{
"added": [
"\t\t\tif (forRollback) { ",
"\t\t\t\t// Only invalidate statements if we performed DDL.",
"\t\t\t\tif (dataDictionaryInWriteMode()) {",
"\t\t\t\t\tExecPreparedStatement ps = a.getPreparedStatement();",
"\t\t\t\t\tif (ps != null) {",
"\t\t\t\t\t\tps.makeInvalid(DependencyManager.ROLLBACK, this);",
"\t\t\t\t\t}",
"\t\t\t\t}",
"\t\t\t} else {",
"\t\t\t\t\tResultSet activationResultSet = a.getResultSet();",
"\t\t\t\t\tboolean resultsetReturnsRows = activationResultSet.returnsRows();",
"\t\t\t\t\tif (resultsetReturnsRows){",
"\t\t\t\t\t\tif (a.getResultSetHoldability() == false)",
"\t\t\t\t\t\t\t//Close result sets that return rows and are not held ",
"\t\t\t\t\t\t\t//across commit. This is to implement closing JDBC ",
"\t\t\t\t\t\t\t//result sets that are CLOSE_CURSOR_ON_COMMIT at commit ",
"\t\t\t\t\t\t\t//time. ",
"\t\t\t\t\t\t\tactivationResultSet.close();",
"\t\t\t\t\t\telse ",
"\t\t\t\t\t\t\t//Clear the current row of the result sets that return",
"\t\t\t\t\t\t\t//rows and are held across commit. This is to implement",
"\t\t\t\t\t\t\t//keeping JDBC result sets open that are ",
"\t\t\t\t\t\t\t//HOLD_CURSORS_OVER_COMMIT at commit time and marking",
"\t\t\t\t\t\t\t//the resultset to be not on a valid row position. The ",
"\t\t\t\t\t\t\t//user will need to reposition within the resultset ",
"\t\t\t\t\t\t\t//before doing any row operations.",
"\t\t\t\t\t\t\tactivationResultSet.clearCurrentRow();\t\t\t\t\t\t\t"
],
"header": "@@ -2744,43 +2744,45 @@ public class GenericLanguageConnectionContext",
"removed": [
"\t\t\tif (forRollback) ",
"\t\t\telse {",
"\t\t\t\t\tif ((a.getResultSetHoldability() == false && a.getResultSet().returnsRows()==true)){",
"\t\t\t\t\t\t//Close result sets that return rows and are not held ",
"\t\t\t\t\t\t//across commit. This is to implement closing JDBC ",
"\t\t\t\t\t\t//result sets that are CLOSE_CURSOR_ON_COMMIT at commit ",
"\t\t\t\t\t\t//time. ",
"\t\t\t\t\t\ta.getResultSet().close();",
"\t\t\t\t\t} else if (a.getResultSet().returnsRows()) {",
"\t\t\t\t\t\t//Clear the current row of the result sets that return",
"\t\t\t\t\t\t//rows and are held across commit. This is to implement",
"\t\t\t\t\t\t//keeping JDBC result sets open that are ",
"\t\t\t\t\t\t//HOLD_CURSORS_OVER_COMMIT at commit time and marking",
"\t\t\t\t\t\t//the resultset to be not on a valid row position. The ",
"\t\t\t\t\t\t//user will need to reposition within the resultset ",
"\t\t\t\t\t\t//before doing any row operations.",
"\t\t\t\t\t\ta.getResultSet().clearCurrentRow();",
"",
"\t\t\t// Only invalidate statements if we performed DDL.",
"\t\t\tif (forRollback && dataDictionaryInWriteMode()) {",
"\t\t\t\tExecPreparedStatement ps = a.getPreparedStatement();",
"\t\t\t\tif (ps != null) {",
"\t\t\t\t\tps.makeInvalid(DependencyManager.ROLLBACK, this);",
"\t\t\t\t}",
"\t\t\t}"
]
}
]
}
] |
derby-DERBY-3304-782354ac
|
DERBY-3304
The main purpose of this patch is to fix the rollback handling for resultsets that do not
return rows. An example case for this is a java procedure which has a Connection.rollback
inside it. When the user calls the java procedure, and Connection.rollback is made inside
of it, Derby should not be closing the resultset assoicated with the java procedure call
(that resultset is a CallStatementResultSet). In other words, a user initiated rollback
through JDBC Connection object should leave the resultsets that do not return rows open. In
order to implement this, I had to make changes to ignore resultsets that do not return rows
in
GenericLanguageConnectionContext.endTransactionActivationHandling. As a result of this
change, for the eg case given above, the activation assoicated with the java procedure
will not be reset (which also means that, CallStatementResultSet.close will not be called)
inside GenericLanguageConnectionContext.endTransactionActivationHandling.
But the code inside CallStatementResultset.close() took care of the closed dynamic resultset
and it took out the closed dynamic resultset from the list of resultsets that would be
available to user through the Statement.getResultSet api. With my changes through this
patch, we are going to skip the CallStatementResultset.close during
GenericLanguageConnectionContext.endTransactionActivationHandling which means that we have
to deal with those closed dynamic resultsets on our own. I did that part of logic
changes in EmbedStatement.processDynamicResults
EmbedStatement.processDynamicResults used to check if the JDBC Resultset is closed by
directly checking the field isClosed in the EmbedResultSet. But it is not sufficient to
check only JDBC Resultset. We need to check the underlying language Resultset too to
determine if the dynamic resultset is closed. There is no direct api on EmbedResultset
which will return a boolean after checking the JDBC Resultset and language Resulset. Instead,
there is a method called EmbedResultSet.checkIfClosed. This method will throw an exception
if it finds the JDBC ResultSet or language ResultSet closed. So, my changes in
EmbedStatement.processDynamicResults make a call to EmbedResultSet.checkIfClosed and if
there is an exception thrown, then we know that the resultset is closed and hence we should
move to the next resultset.
In addition to these code changes, I have added a new test to LangProcedureTest. The new
java procedure is defined to return 2 dynamic resultsets. One of these resultsets is
created before Connection.rollback inside the java procedure. The other dynamic resultset
is created after Connection.rollback. As as result of Connection.rollback, the first
dynamic resultset will be closed but the second one will remain open. The test makes sure
that only one dynamic resultset is returned by the java procedure.
Also, made one minor change in LangProcedureTest for an existing test. The test at line 804
was getting a resultset from the Statement object without asserting that there are no more
resultsets. The resultset object would have been null anyways in this test because there
are no open resulsets from the Java procedure. Because of this, I took out the redundant
code of getting null resultset object from Statement using getResultset,
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@629926 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/conn/GenericLanguageConnectionContext.java",
"hunks": [
{
"added": [
"\t\tthe activations that have resultset returning rows associated with ",
"\t\tthem. DERBY-3304 Resultsets that do not return rows should be left ",
"\t\talone when the rollback is through the JDBC Connection object. If the ",
"\t\trollback is caused by an exception, then at that time, all kinds of",
"\t\tresultsets should be closed. "
],
"header": "@@ -2699,7 +2699,11 @@ public class GenericLanguageConnectionContext",
"removed": [
"\t\tthe activations. "
]
},
{
"added": [
"\t\t\tResultSet activationResultSet = null;",
"\t\t\tboolean resultsetReturnsRows = false;",
"\t\t\tif (a.getResultSet() != null) {",
"\t\t\t\tactivationResultSet = a.getResultSet();",
"\t\t\t\tresultsetReturnsRows = activationResultSet.returnsRows();",
"\t\t\t}",
"",
"\t\t\t\tif (activationResultSet != null) ",
"\t\t\t\t\tif (resultsetReturnsRows)",
"\t\t\t\t\t\t//Since we are dealing with rollback, we need to reset ",
"\t\t\t\t\t\t//the activation no matter what the holdability might ",
"\t\t\t\t\t\t//be provided that resultset returns rows. An example",
"\t\t\t\t\t\t//where we do not want to close a resultset that does",
"\t\t\t\t\t\t//not return rows would be a java procedure which has",
"\t\t\t\t\t\t//user invoked rollback inside of it. That rollback",
"\t\t\t\t\t\t//should not reset the activation associated with",
"\t\t\t\t\t\t//the call to java procedure because that activation",
"\t\t\t\t\t\t//is still being used.",
"\t\t\t\t\t\ta.reset();"
],
"header": "@@ -2738,11 +2742,26 @@ public class GenericLanguageConnectionContext",
"removed": [
"\t\t\t\t//Since we are dealing with rollback, we need to reset the ",
"\t\t\t\t//activation no matter what the holdability might be or no",
"\t\t\t\t//matter whether the associated resultset returns rows or not.",
"\t\t\t\ta.reset();"
]
}
]
}
] |
derby-DERBY-3304-9f9a13ab
|
DERBY-3304
When a SQL exception is thrown, make sure that rollback caused by it closes all the resultsets
irrespective of whether they return rows or not. This cleanup was not happening for
CallableStatementResultSet. To fix this, in CallableStatementResultSet class, I have changed
the no-op cleanup() method to call close(). Without this, the locks held by the resultsets
created inside the Java procedure method were not getting released.
I have added a test case to make sure that this code change is tested. I have created a
Java procedure which creates a dynamic resultset, a local resultset and then does an
insert which will cause duplicate key exception. As part of rollback for exception, Derby
closes the dynamic resultset and local resultset along with the CallableStatementResultset.
And the test case is able to drop the tables which were used by the dynamic and local
resultset without running into locking issues.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@631108 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3304-a68377ce
|
Checking in code cleanup for DERBY-3304. This code cleanup is based on Knut's review of my
earlier commit 629926. No functionality has changed, but code will be now much easier to
read.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@631481 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/conn/GenericLanguageConnectionContext.java",
"hunks": [
{
"added": [
"\t\t\t//Determine if the activation has a resultset and if that resultset",
"\t\t\t//returns rows. For such an activation, we need to take special",
"\t\t\t//actions during commit and rollback as explained in the comments",
"\t\t\t//below.",
"\t\t\tResultSet activationResultSet = a.getResultSet();",
"\t\t\tboolean resultsetReturnsRows = ",
"\t\t\t\t(activationResultSet != null) && activationResultSet.returnsRows(); ;",
"\t\t\t\tif (resultsetReturnsRows)",
"\t\t\t\t\t//Since we are dealing with rollback, we need to reset ",
"\t\t\t\t\t//the activation no matter what the holdability might ",
"\t\t\t\t\t//be provided that resultset returns rows. An example",
"\t\t\t\t\t//where we do not want to close a resultset that does",
"\t\t\t\t\t//not return rows would be a java procedure which has",
"\t\t\t\t\t//user invoked rollback inside of it. That rollback",
"\t\t\t\t\t//should not reset the activation associated with",
"\t\t\t\t\t//the call to java procedure because that activation",
"\t\t\t\t\t//is still being used.",
"\t\t\t\t\ta.reset();"
],
"header": "@@ -2742,26 +2742,26 @@ public class GenericLanguageConnectionContext",
"removed": [
"\t\t\tResultSet activationResultSet = null;",
"\t\t\tboolean resultsetReturnsRows = false;",
"\t\t\tif (a.getResultSet() != null) {",
"\t\t\t\tactivationResultSet = a.getResultSet();",
"\t\t\t\tresultsetReturnsRows = activationResultSet.returnsRows();",
"\t\t\t}",
"\t\t\t\tif (activationResultSet != null) ",
"\t\t\t\t\tif (resultsetReturnsRows)",
"\t\t\t\t\t\t//Since we are dealing with rollback, we need to reset ",
"\t\t\t\t\t\t//the activation no matter what the holdability might ",
"\t\t\t\t\t\t//be provided that resultset returns rows. An example",
"\t\t\t\t\t\t//where we do not want to close a resultset that does",
"\t\t\t\t\t\t//not return rows would be a java procedure which has",
"\t\t\t\t\t\t//user invoked rollback inside of it. That rollback",
"\t\t\t\t\t\t//should not reset the activation associated with",
"\t\t\t\t\t\t//the call to java procedure because that activation",
"\t\t\t\t\t\t//is still being used.",
"\t\t\t\t\t\ta.reset();"
]
},
{
"added": [
"\t\t\t\tif (resultsetReturnsRows){",
"\t\t\t\t\tif (a.getResultSetHoldability() == false)",
"\t\t\t\t\t\t//Close result sets that return rows and are not held ",
"\t\t\t\t\t\t//across commit. This is to implement closing JDBC ",
"\t\t\t\t\t\t//result sets that are CLOSE_CURSOR_ON_COMMIT at commit ",
"\t\t\t\t\t\t//time. ",
"\t\t\t\t\t\tactivationResultSet.close();",
"\t\t\t\t\telse ",
"\t\t\t\t\t\t//Clear the current row of the result sets that return",
"\t\t\t\t\t\t//rows and are held across commit. This is to implement",
"\t\t\t\t\t\t//keeping JDBC result sets open that are ",
"\t\t\t\t\t\t//HOLD_CURSORS_OVER_COMMIT at commit time and marking",
"\t\t\t\t\t\t//the resultset to be not on a valid row position. The ",
"\t\t\t\t\t\t//user will need to reposition within the resultset ",
"\t\t\t\t\t\t//before doing any row operations.",
"\t\t\t\t\t\tactivationResultSet.clearCurrentRow();\t\t\t\t\t\t\t"
],
"header": "@@ -2771,26 +2771,22 @@ public class GenericLanguageConnectionContext",
"removed": [
"\t\t\t\tif (activationResultSet != null) {",
"\t\t\t\t\t//if the activation has resultset associated with it, then ",
"\t\t\t\t\t//use following criteria to take the action",
"\t\t\t\t\tif (resultsetReturnsRows){",
"\t\t\t\t\t\tif (a.getResultSetHoldability() == false)",
"\t\t\t\t\t\t\t//Close result sets that return rows and are not held ",
"\t\t\t\t\t\t\t//across commit. This is to implement closing JDBC ",
"\t\t\t\t\t\t\t//result sets that are CLOSE_CURSOR_ON_COMMIT at commit ",
"\t\t\t\t\t\t\t//time. ",
"\t\t\t\t\t\t\tactivationResultSet.close();",
"\t\t\t\t\t\telse ",
"\t\t\t\t\t\t\t//Clear the current row of the result sets that return",
"\t\t\t\t\t\t\t//rows and are held across commit. This is to implement",
"\t\t\t\t\t\t\t//keeping JDBC result sets open that are ",
"\t\t\t\t\t\t\t//HOLD_CURSORS_OVER_COMMIT at commit time and marking",
"\t\t\t\t\t\t\t//the resultset to be not on a valid row position. The ",
"\t\t\t\t\t\t\t//user will need to reposition within the resultset ",
"\t\t\t\t\t\t\t//before doing any row operations.",
"\t\t\t\t\t\t\tactivationResultSet.clearCurrentRow();\t\t\t\t\t\t\t",
"\t\t\t\t\t}"
]
}
]
}
] |
derby-DERBY-3304-ac5abd96
|
DERBY-6038: Intermittent failure in LangProcedureTest: cannot drop table because of open ResultSet
Explicitly close the non-dynamic result set in the regression test case
for DERBY-3304. Also update the comments to reflect that DERBY-3304 only
affected the internal CallStatementResultSet and its dynamic result sets.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1431945 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3304-cbc650d0
|
DERBY-3304
This commit addresses two issues.
First of all, it cleanups up reset method in BaseActivation which was doing more than just bringing the Activation back
to pre-execution state. The method had to make itself aware of holdability and what
kind of resultset it was dealing with
before closing or clearing the row of the resultset. The reason for this behavior
is commit code path was relying on
Activation.reset to do more than just bringing the activation to pre-execution state.
I fixed this by moving this code
from BaseActivation.reset to GenericLanguageConnectionContext.resetActivations.
Additionally, in the new code in GenericLanguageConnectionContext.resetActivations, I added the code to not close the
language result sets associated with activations that do not return rows even if activation may have holdability set to
false. This will ensure that a commit inside a java procedure will not inadvertantly close the resultset associated with
the java procedure call.
Additionally, I copied some of the cleanup work(as shown below) from BaseActivation.reset into
new code in GenericLanguageConnectionContext.resetActivations
a.clearHeapConglomerateController();
if (!a.isSingleExecution())
a.clearWarnings();
This code above was always getting executed at the time of commit before my commit and because of that, I decided to copy
it in GenericLanguageConnectionContext.resetActivations. If anyone has any comments on this, please let me know.
(Andrew trying to change commit message for Mamta)
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@618788 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/execute/BaseActivation.java",
"hunks": [
{
"added": [
"\t\tif (resultSet != null) ",
"\t\t\tresultSet.close();",
"\t\t"
],
"header": "@@ -332,16 +332,9 @@ public abstract class BaseActivation implements CursorActivation, GeneratedByteC",
"removed": [
"\t\t// if resultset holdability after commit is false, close it",
"\t\tif (resultSet != null) {",
"\t\t\tif (!resultSetHoldability || !resultSet.returnsRows()) {\t\t\t",
"\t\t\t\t// would really like to check if it is open,",
"\t\t\t\t// this is as close as we can approximate that.",
"\t\t\t\tresultSet.close();",
"\t\t\t} else if (resultSet.returnsRows()) {",
"\t\t\t\tresultSet.clearCurrentRow();",
"\t\t\t}",
"\t\t}"
]
}
]
}
] |
derby-DERBY-3304-ff175071
|
DERBY-3304
This is a followup commit for DERBY-3304 based on various comments. It does following
1)The existing method resetActivations in GenericLanguageConnectionContext has been renamed to better reflect it's
functionality. It will be now called endTransactionActivationHandling since it gets called for commit/rollback.
2)The javadoc comments for resetActivations(now called endTransactionActivationHandling) were not valid. Fixed that in
this commit.
3)Took out the redundant code about setting the holdability to false if we were in rollback. It was needed earlier
because the method that took care of activations at rollback time needed to check the holdability. That method
(BaseActivation.reset) does not check holdability anymore and hence we do not need to set the activations to false
holdability when we are dealing with rollback.
4)Lastly, JDBC api for Connection.commit does not ask for clearing of warnings and hence we should not have code to
clear the warnings at the time of commit. I removed the warning clearing code from resetActivations(now called
endTransactionActivationHandling) in GenericLanguageConnectionContext.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@619279 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/conn/GenericLanguageConnectionContext.java",
"hunks": [
{
"added": [
"\t\tendTransactionActivationHandling(false);"
],
"header": "@@ -1122,7 +1122,7 @@ public class GenericLanguageConnectionContext",
"removed": [
"\t\tresetActivations(false);"
]
},
{
"added": [
"\t\tendTransactionActivationHandling(true);"
],
"header": "@@ -1366,7 +1366,7 @@ public class GenericLanguageConnectionContext",
"removed": [
"\t\tresetActivations(true);"
]
},
{
"added": [
"\t\t\t\tendTransactionActivationHandling(true);"
],
"header": "@@ -1447,7 +1447,7 @@ public class GenericLanguageConnectionContext",
"removed": [
"\t\t\t\tresetActivations(true);"
]
},
{
"added": [
"\t\tIf we are called as part of rollback code path, then we will reset all ",
"\t\tthe activations. ",
"\t\t",
"\t\tIf we are called as part of commit code path, then we will do one of ",
"\t\tthe following if the activation has resultset assoicated with it. Also,",
"\t\twe will clear the conglomerate used while scanning for update/delete",
"\t\t1)Close result sets that return rows and are not held across commit.",
"\t\t2)Clear the current row of the resultsets that return rows and are",
"\t\theld across commit.",
"\t\t3)Leave the result sets untouched if they do not return rows",
"\t\t",
"\t\tAdditionally, clean up (close()) activations that have been",
"\tprivate void endTransactionActivationHandling(boolean forRollback) throws StandardException {"
],
"header": "@@ -2703,15 +2703,24 @@ public class GenericLanguageConnectionContext",
"removed": [
"",
"\t\tresets all open activations, to close their result sets.",
"\t\tAlso cleans up (close()) activations that have been",
"\tprivate void resetActivations(boolean andClose) throws StandardException {"
]
},
{
"added": [],
"header": "@@ -2724,15 +2733,6 @@ public class GenericLanguageConnectionContext",
"removed": [
"\t\t\t/*",
"\t\t\t** andClose true means we are here for rollback.",
"\t\t\t** In case of rollback, we don't care for holding",
"\t\t\t** cursors and that's why I am resetting holdability",
"\t\t\t** to false for all activations just before rollback",
"\t\t\t*/\t",
"\t\t\tif (andClose)",
"\t\t\t\ta.setResultSetHoldability(false);",
""
]
},
{
"added": [
"\t\t\tif (forRollback) "
],
"header": "@@ -2744,7 +2744,7 @@ public class GenericLanguageConnectionContext",
"removed": [
"\t\t\tif (andClose) "
]
},
{
"added": [
"\t\t\tif (forRollback && dataDictionaryInWriteMode()) {"
],
"header": "@@ -2772,12 +2772,10 @@ public class GenericLanguageConnectionContext",
"removed": [
"\t\t\t\tif (!a.isSingleExecution())",
"\t\t\t\t\ta.clearWarnings();",
"\t\t\tif (andClose && dataDictionaryInWriteMode()) {"
]
}
]
}
] |
derby-DERBY-3305-f62cb688
|
DERBY-3305 Ensure that a processing a ResultSet into a dynamic result marks the application statment result set as the correct one, based upon the statement the result set is transferred to. Add asserts to jdbcapi.ProcedureTest to check that ResultSet.getStatement returns the correct Statement object.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@610238 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3306-181c56ab
|
DERBY-3325: Add 'maxStatements' property to ClientConnectionPoolDataSource.
Enabled the test jdbcapi.ClientConnectionPoolDataSourceTest, removed two unused imports from _Suite and removed the workaround for DERBY-3306.
Patch file: derby-3325-2a-enable_test_and_remove_workaround.diff
(I added one missing space character to the diff)
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@628102 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3306-7fe51c32
|
DERBY-3306 (+DERBY-3412): jdbc4.StatementEventsTest cannot be run individually in a clean environment.
This commit backs out the previous patch committed for this issue (revision 620480). Manual work was required because J2EEDataSourceTest has been split off DataSourceTest in the mean time.
This patch also fixes StatementEventsTest by setting the createDatabase to create.
Patch file: derby-3306-2a-backout_and_alternative_fix.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@629894 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/JDBCDataSource.java",
"hunks": [
{
"added": [
" * configuration. The getConnection() method will return",
" * a connection identical to TestConfiguration.openDefaultConnection()."
],
"header": "@@ -37,9 +37,8 @@ public class JDBCDataSource {",
"removed": [
" * configuration.",
" * <p>The getConnection() method will return a connection identical to",
" * {@link TestConfiguration#openDefaultConnection}."
]
},
{
"added": [],
"header": "@@ -85,10 +84,6 @@ public class JDBCDataSource {",
"removed": [
" * <p>",
" * If no properties are passed in, defaults are obtained from the",
" * current <code>TestConfiguration</code> and the data source will be",
" * configured to create the specified database if it does not exist."
]
},
{
"added": [],
"header": "@@ -101,10 +96,6 @@ public class JDBCDataSource {",
"removed": [
" * <p>",
" * If no properties are passed in, defaults are obtained from the",
" * current <code>TestConfiguration</code> and the data source will be",
" * configured to create the specified database if it does not exist."
]
},
{
"added": [],
"header": "@@ -132,8 +123,6 @@ public class JDBCDataSource {",
"removed": [
" // By default non-existing databases will be created.",
" beanProperties.put(\"createDatabase\", \"create\");"
]
}
]
}
] |
derby-DERBY-3306-9cf3fb5e
|
DERBY-3306: jdbc4.StatementEventsTest cannot be run individually in a clean environment. This is in fact a more general fix for tests using a DataSource obtained through the JUnit framework utility classes to create connections. The connections will now default to create the database if it does not exists. The accompanying test changes were required to get a clean suites.All run, and falls into three categories; test doesn't want to create db, asserting on zero warnings or uses connection attributes that conflict with db creation.
Patch file: derby-3306-1c-create_db_by_default_and_test_fixes.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@620480 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/JDBCDataSource.java",
"hunks": [
{
"added": [
" * configuration.",
" * <p>The getConnection() method will return a connection identical to",
" * {@link TestConfiguration#openDefaultConnection}."
],
"header": "@@ -37,8 +37,9 @@ public class JDBCDataSource {",
"removed": [
" * configuration. The getConnection() method will return",
" * a connection identical to TestConfiguration.openDefaultConnection()."
]
},
{
"added": [
" * <p>",
" * If no properties are passed in, defaults are obtained from the",
" * current <code>TestConfiguration</code> and the data source will be",
" * configured to create the specified database if it does not exist."
],
"header": "@@ -84,6 +85,10 @@ public class JDBCDataSource {",
"removed": []
},
{
"added": [
" * <p>",
" * If no properties are passed in, defaults are obtained from the",
" * current <code>TestConfiguration</code> and the data source will be",
" * configured to create the specified database if it does not exist."
],
"header": "@@ -96,6 +101,10 @@ public class JDBCDataSource {",
"removed": []
},
{
"added": [
" // By default non-existing databases will be created.",
" beanProperties.put(\"createDatabase\", \"create\");"
],
"header": "@@ -123,6 +132,8 @@ public class JDBCDataSource {",
"removed": []
}
]
}
] |
derby-DERBY-3307-2e2d2ee2
|
DERBY-3307: NPE in PooledConnction event notification handling if a null listener is added
- Added null check in ClientPooledConnection.addConnectionEventListener().
- Added regression test in J2EEDataSourceTest
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@666040 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3308-e5b0435f
|
DERBY-3308: Broken synchronization for event handling in ClientPooledConnection40.
Patch file: derby-3308-1b-eventlisteners_synchronization.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@614536 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/ClientPooledConnection40.java",
"hunks": [
{
"added": [
"import java.util.ArrayList;"
],
"header": "@@ -23,7 +23,7 @@ package org.apache.derby.client;",
"removed": [
"import java.util.Vector;"
]
},
{
"added": [
" ",
" /** List of statement event listeners. */",
" //@GuardedBy(\"this\")",
" private final ArrayList<StatementEventListener> statementEventListeners = ",
" new ArrayList<StatementEventListener>();"
],
"header": "@@ -37,9 +37,11 @@ import org.apache.derby.jdbc.ClientBaseDataSource;",
"removed": [
" //using generics to avoid casting problems",
" protected final Vector<StatementEventListener> statementEventListeners = ",
" new Vector<StatementEventListener>();"
]
},
{
"added": [
" public synchronized void addStatementEventListener(StatementEventListener listener){",
" statementEventListeners.add(listener);"
],
"header": "@@ -71,11 +73,11 @@ public class ClientPooledConnection40 extends ClientPooledConnection {",
"removed": [
" public void addStatementEventListener(StatementEventListener listener){",
" statementEventListeners.addElement(listener);"
]
},
{
"added": [
" public synchronized void removeStatementEventListener(StatementEventListener listener){",
" statementEventListeners.remove(listener);"
],
"header": "@@ -87,11 +89,11 @@ public class ClientPooledConnection40 extends ClientPooledConnection {",
"removed": [
" public void removeStatementEventListener(StatementEventListener listener){",
" statementEventListeners.removeElement(listener);"
]
},
{
"added": [
" public synchronized void onStatementClose(PreparedStatement statement) {",
" for (StatementEventListener l : statementEventListeners) {",
" l.statementClosed(event);"
],
"header": "@@ -102,15 +104,11 @@ public class ClientPooledConnection40 extends ClientPooledConnection {",
"removed": [
" public void onStatementClose(PreparedStatement statement) {",
" //synchronized block on statementEventListeners to make it thread",
" //safe",
" synchronized(statementEventListeners) {",
" for (StatementEventListener l : statementEventListeners) {",
" l.statementClosed(event);",
" }"
]
},
{
"added": [
" public synchronized void onStatementErrorOccurred(",
" PreparedStatement statement,",
" SQLException sqle) {",
" for (StatementEventListener l : statementEventListeners) {",
" l.statementErrorOccurred(event);"
],
"header": "@@ -125,16 +123,13 @@ public class ClientPooledConnection40 extends ClientPooledConnection {",
"removed": [
" public void onStatementErrorOccurred(PreparedStatement statement,",
" SQLException sqle) {",
" //synchronized block on statementEventListeners to make it thread",
" //safe",
" synchronized(statementEventListeners) {",
" for (StatementEventListener l : statementEventListeners) {",
" l.statementErrorOccurred(event);",
" }"
]
}
]
}
] |
derby-DERBY-3310-0dfa31cd
|
Cleanup related to DERBY-3310. Simplify ResultSetNode.genNormalizeResultSetNode to always work on itself by producing a NormalizeResultSetNode that wraps it. That is how the function worked and was imlemented.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@630643 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/ResultSetNode.java",
"hunks": [
{
"added": [
" ResultColumnList prRCList = resultColumns;"
],
"header": "@@ -821,12 +821,10 @@ public abstract class ResultSetNode extends QueryTreeNode",
"removed": [
"\t\tResultColumnList\tprRCList;",
"",
"\t\tprRCList = resultColumns;"
]
},
{
"added": [
" ResultColumnList prRCList = resultColumns;"
],
"header": "@@ -1344,12 +1342,10 @@ public abstract class ResultSetNode extends QueryTreeNode",
"removed": [
"\t\tResultColumnList\tprRCList;",
"",
"\t\tprRCList = resultColumns;"
]
},
{
"added": [
"\t * Put a NormalizeResultSetNode on top of this ResultSetNode."
],
"header": "@@ -1401,7 +1397,7 @@ public abstract class ResultSetNode extends QueryTreeNode",
"removed": [
"\t * Put a NormalizeResultSetNode on top of the specified ResultSetNode."
]
},
{
"added": [],
"header": "@@ -1418,7 +1414,6 @@ public abstract class ResultSetNode extends QueryTreeNode",
"removed": [
"\t * @param normalizeChild\tChild result set for new NRSN."
]
},
{
"added": [
"\tNormalizeResultSetNode ",
"\t\tgenNormalizeResultSetNode(boolean forUpdate)",
" ResultColumnList prRCList = resultColumns;"
],
"header": "@@ -1428,18 +1423,14 @@ public abstract class ResultSetNode extends QueryTreeNode",
"removed": [
"\tpublic NormalizeResultSetNode ",
"\t\tgenNormalizeResultSetNode(ResultSetNode\tnormalizeChild, ",
"\t\t\t\t\t\t\t\t boolean forUpdate)",
"\t\tNormalizeResultSetNode\tnrsn;",
"\t\tResultColumnList\t\tprRCList;",
"",
"\t\tprRCList = resultColumns;"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/UpdateNode.java",
"hunks": [
{
"added": [
"\t\t\tresultSet = resultSet.genNormalizeResultSetNode(true);"
],
"header": "@@ -548,7 +548,7 @@ public final class UpdateNode extends DMLModStatementNode",
"removed": [
"\t\t\tresultSet = resultSet.genNormalizeResultSetNode(resultSet, true);"
]
}
]
}
] |
derby-DERBY-3310-2e105f0d
|
Add some comments to ResultColumn related to investigations for DERBY-3310 explaining some more about how the column can have a different type to the expression it refers to.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@630645 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/ResultColumn.java",
"hunks": [
{
"added": [
" * <P>",
" * its underlying column is not. In an INSERT or UPDATE the ResultColumn",
" * will represent the type of the column in the table, the type of",
" * the underlying expresion will be the type of the source of the",
" * value to be insert or updated. The method columnTypeAndLengthMatch()",
" * can be used to detect when normalization is required between",
" * the expression and the tyoe of ResultColumn. This class does",
" * not implement any type normalization (conversion), this is",
" * typically handled by a NormalizeResultSetNode."
],
"header": "@@ -50,10 +50,17 @@ import org.apache.derby.iapi.util.StringUtil;",
"removed": [
" * ",
" * its underlying column is not."
]
},
{
"added": [
"\t/**"
],
"header": "@@ -911,7 +918,7 @@ public class ResultColumn extends ValueNode",
"removed": [
"\t/*"
]
},
{
"added": [],
"header": "@@ -921,7 +928,6 @@ public class ResultColumn extends ValueNode",
"removed": [
""
]
},
{
"added": [],
"header": "@@ -948,9 +954,6 @@ public class ResultColumn extends ValueNode",
"removed": [
" if (expressionType == null)",
" System.out.println(getExpression().getClass());",
" "
]
}
]
}
] |
derby-DERBY-3310-c6564415
|
DERBY-3597 Incorporate DERBY-3310 and DERBY-3494 write-ups into NormalizeResultSetNode code comments.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@645638 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/NormalizeResultSetNode.java",
"hunks": [
{
"added": [
" *",
" * child result set that needs one. See non-javadoc comments for ",
" * a walk-through of a couple sample code paths.",
" */",
"",
" /*",
" * Below are a couple of sample code paths for NormlizeResultSetNodes.",
" * These samples were derived from Army Brown's write-ups attached to DERBY-3310",
" * and DERBY-3494. The text was changed to include the new code path now that ",
" * all of the NormalizeResultSetNode code has been moved into the init() method.",
" * There are two sections of code in NormalizeResultSetNode.init() that are relevant:",
" * First the code to generate the new node based on the child result set. ",
" * We will call this \"normalize node creation\".",
" * ",
" * ResultSetNode rsn = (ResultSetNode) childResult;",
" * ResultColumnList rcl = rsn.getResultColumns();",
" * ResultColumnList targetRCL = (ResultColumnList) targetResultColumnList;",
" * ...",
" * ResultColumnList prRCList = rcl;",
" * rsn.setResultColumns(rcl.copyListAndObjects());",
" * ...",
" * this.resultColumns = prRCList;",
" *",
" * Next the code to adjust the types for the NormalizeResultSetNode. ",
" * We will call this \"type adjustment\"",
" * ",
" * if (targetResultColumnList != null) {",
" * int size = Math.min(targetRCL.size(), resultColumns.size());",
" * for (int index = 0; index < size; index++) {",
" * ResultColumn sourceRC = (ResultColumn) resultColumns.elementAt(index);",
" * ResultColumn resultColumn = (ResultColumn) targetRCL.elementAt(index);",
" * sourceRC.setType(resultColumn.getTypeServices());",
" * } ",
" * ",
" * --- Sample 1 : Type conversion from Decimal to BigInt on insert --- ",
" * (DERBY-3310 write-up variation) ",
" * The SQL statement on which this sample focuses is:",
" * ",
" * create table d3310 (x bigint);",
" * insert into d3310 select distinct * from (values 2.0, 2.1, 2.2) v; ",
" * ",
" * There are three compilation points of interest for this discussion:",
" * 1. Before the \"normalize node creation\"",
" * 2. Before the \"type adjustment\"",
" * 3. After the \"type adjustment\"",
" * ",
" * Upon completion of the \"type adjustment\", the compilation query ",
" * tree is then manipulated during optimization and code generation, the ",
" * latter of which ultimately determines how the execution-time ResultSet ",
" * tree is going to look.\\u00a0 So for this discussion we walk through the query",
" * tree as it exists at the various points of interest just described.",
" * ",
" * 1) To start, the (simplified) query tree that we have looks something like the following:",
" * ",
" * InsertNode",
" * (RCL_0:ResultColumn_0<BigInt>)",
" * |",
" * SelectNode",
" * (RCL_1:ResultColumn_1<Decimal>)",
" * |",
" * FromSubquery",
" * (RCL_2:ResultColumn_2<Decimal>)",
" * |",
" * UnionNode",
" * (RCL_3:ResultColumn_3<Decimal>)",
" * ",
" * Notation: In the above tree, node names with \"_x\" trailing them are used to",
" * distinguish Java Objects from each other. So if ResultColumn_0 shows up ",
" * more than once, then it is the *same* Java object showing up in different ",
" * parts of the query tree. Type names in angle brackets, such as \"<BigInt>\",",
" * describe the type of the entity immediately preceding the brackets. ",
" * So a line of the form:",
" * ",
" * RCL_0:ResultColumn_0<BigInt>",
" * ",
" * describes a ResultColumnList object containing one ResultColumn object ",
" * whose type is BIGINT. We can see from the above tree that, before ",
" * normalize node creation, the top of the compile tree contains an ",
" * InsertNode, a SelectNode, a FromSubquery, and a UnionNode, all of ",
" * which have different ResultColumnList objects and different ResultColumn ",
" * objects within those lists.",
" * ",
" * 2) After the normalize node creation",
" * The childresult passed to the init method of NormalizeResultSetNode is ",
" * the InsertNode's child, so it ends up creating a new NormalizeResultSetNode ",
" * and putting that node on top of the InsertNode's child--that is, on top of ",
" * the SelectNode.",
" *",
" * At this point it's worth noting that a NormalizeResultSetNode operates ",
" * based on two ResultColumnLists: a) its own (call it NRSN_RCL), and b) ",
" * the ResultColumnList of its child (call it NRSN_CHILD_RCL). More ",
" * specifically, during execution a NormalizeResultSet will take a row ",
" * whose column types match the types of NRSN_CHILD_RCL, and it will ",
" * \"normalize\" the values from that row so that they agree with the ",
" * types of NRSN_RCL. Thus is it possible--and in fact, it should generally ",
" * be the case--that the types of the columns in the NormalizeResultSetNode's ",
" * own ResultColumnList are *different* from the types of the columns in ",
" * its child's ResultColumnList. That should not be the case for most ",
" * (any?) other Derby result set.",
" * ",
" * So we now have:",
" *",
" * InsertNode",
" * (RCL_0:ResultColumn_0<BigInt>)",
" * |",
" * NormalizeResultSetNode",
" * (RCL_1:ResultColumn_1<Decimal> -> VirtualColumnNode<no_type> -> ResultColumn_4<Decimal>)",
" * |",
" * SelectNode",
" * (RCL_4:ResultColumn_4<Decimal>)",
" * |",
" * FromSubquery",
" * (RCL_2:ResultColumn_2<Decimal>)",
" * |",
" * UnionNode",
" * (RCL_3:ResultColumn_3<Decimal>)",
" *",
" * Notice how, when we generate the NormalizeResultSetNode, three things happen:",
" * ",
" * a) The ResultColumList object for the SelectNode is \"pulled up\" into the ",
" * NormalizeResultSetNode.",
" * b) SelectNode is given a new ResultColumnList--namely, a clone of its old",
" * ResultColumnList, including clones of the ResultColumn objects.",
" * c) VirtualColumnNodes are generated beneath NormalizeResultSetNode's ",
" * ResultColumns, and those VCNs point to the *SAME RESULT COLUMN OBJECTS* ",
" * that now sit in the SelectNode's new ResultColumnList. ",
" * Also note how the generated VirtualColumnNode has no type of its own; ",
" * since it is an instance of ValueNode it does have a dataTypeServices ",
" * field, but that field was not set when the NormalizeResultSetNode was ",
" * created. Hence \"<no_type>\" in the above tree.",
" * ",
" * And finally, note that at this point, NormalizeResultSetNode's ",
" * ResultColumnList has the same types as its child's ResultColumnList",
" * --so the NormalizeResultSetNode doesn't actually do anything ",
" * in its current form.",
" * ",
" * 3) Within the \"type adjustment\"",
" * ",
" * The purpose of the \"type adjustment\" is to take the types from ",
" * the InsertNode's ResultColumnList and \"push\" them down to the ",
" * NormalizeResultSetNode. It is this method which sets NRSN_RCL's types ",
" * to match the target (INSERT) table's types--and in doing so, makes them ",
" * different from NRSN_CHILD_RCL's types. Thus this is important because ",
" * without it, NormalizeResultSetNode would never change the types of the ",
" * values it receives.",
" * ",
" * That said, after the call to sourceRC.setType(...) we have:",
" *",
" * InsertNode",
" * (RCL0:ResultColumn_0<BigInt>)",
" * |",
" * NormalizeResultSetNode",
" * (RCL1:ResultColumn_1<BigInt> -> VirtualColumnNode_0<no_type> -> ResultColumn_4<Decimal>)",
" * |",
" * SelectNode",
" * (RCL4:ResultColumn_4<Decimal>)",
" * |",
" * FromSubquery",
" * (RCL2:ResultColumn_2<Decimal>)",
" * |",
" * UnionNode",
" * (RCL3:ResultColumn_3<Decimal>)",
" *",
" * The key change here is that ResultColumn_1 now has a type of BigInt ",
" * intead of Decimal. Since the SelectNode's ResultColumn, ResultColumn_4,",
" * still has a type of Decimal, the NormalizeResulSetNode will take as input",
" * a Decimal value (from SelectNode) and will output that value as a BigInt, ",
" * where output means pass the value further up the tree during execution ",
" * (see below).",
" * ",
" * Note before the fix for DERBY-3310, there was an additional type change ",
" * that caused problems with this case. ",
" * See the writeup attached to DERBY-3310 for details on why this was a problem. ",
" * ",
" * 4) After preprocessing and optimization:",
" * ",
" * After step 3 above, Derby will move on to the optimization phase, which ",
" * begins with preprocessing. During preprocessing the nodes in the tree ",
" * may change shape/content to reflect the needs of the optimizer and/or to ",
" * perform static optimizations/rewrites. In the case of our INSERT statement ",
" * the preprocessing does not change much:",
" *",
" * InsertNode",
" * (RCL0:ResultColumn_0<BigInt>)",
" * |",
" * NormalizeResultSetNode",
" * (RCL1:ResultColumn_1<BigInt> -> VirtualColumnNode<no_type> -> ResultColumn_4<Decimal>)",
" * |",
" * SelectNode",
" * (RCL4:ResultColumn_4<Decimal>)",
" * |",
" * ProjectRestrictNode_0",
" * (RCL2:ResultColumn_2<Decimal>)",
" * |",
" * UnionNode",
" * (RCL3:ResultColumn_3<Decimal>)",
" *",
" * The only thing that has changed between this tree and the one shown in ",
" * step 3 is that the FromSubquery has been replaced with a ProjectRestrictNode.",
" * Note that the ProjectRestrictNode has the same ResultColumnList object as ",
" * the FromSubquery, and the same ResultColumn object as well. That's worth ",
" * noting because it's another example of how Java objects can be \"moved\" ",
" * from one node to another during Derby compilation.",
" * ",
" * 5) After modification of access paths:",
" * As the final stage of optimization Derby will go through the modification ",
" * of access paths phase, in which the query tree is modified to prepare for ",
" * code generation. When we are done modifying access paths, our tree looks ",
" * something like this:",
"",
" InsertNode",
" (RCL0:ResultColumn_0<BigInt>)",
" |",
" NormalizeResultSetNode",
" (RCL1:ResultColumn_1<BigInt> -> VirtualColumnNode<no_type> -> ResultColumn_4<Decimal>)",
" |",
" DistinctNode",
" (RCL4:ResultColumn_4<Decimal> -> VirtualColumnNode<no_type> -> ResultColumn_5<Decimal>)",
" |",
" ProjectRestrictNode_1",
" (RCL5:ResultColumn_5<Decimal>)",
" |",
" ProjectRestrictNode_0",
" (RCL2:ResultColumn_2<Decimal>)",
" |",
" UnionNode",
" (RCL3:ResultColumn_3<Decimal>)",
"",
" * The key thing to note here is that the SelectNode has been replaced with two ",
" * new nodes: a ProjectRestrictNode whose ResultColumnList is a clone of the ",
" * SelectNode's ResultColumnList, and a DistinctNode, whose ResultColumnList ",
" * is the same object as the SelectNode's old ResultColumnList. More ",
" * specifically, all of the following occurred as part of modification of ",
" * access paths:",
" * ",
" * a) The SelectNode was replaced with ProjectRestrictNode_1, whose ",
" * ResultColumnList was the same object as the SelectNode's ResultColumnList.",
" *",
" * b) the ResultColumList object for ProjectRestrictNode_1 was pulled up ",
" * into a new DistinctNode.",
" *",
" * c) ProjectRestrictNode_1 was given a new ResultColumnList--namely, a ",
" * clone of its old ResultColumnList, including clones of the ResultColumn ",
" * objects.",
" * ",
" * d) VirtualColumnNodes were generated beneath the DistinctNode's ",
" * ResultColumns, and those VCNs point to the same result column objects ",
" * that now sit in ProjectRestrictNode_1's new ResultColumnList.",
" * ",
" * 6) After code generation:",
" *",
" * During code generation we will walk the compile-time query tree one final ",
" * time and, in doing so, we will generate code to build the execution-time ",
" * ResultSet tree. As part of that process the two ProjectRestrictNodes will ",
" * be skipped because they are both considered no-ops--i.e. they perform ",
" * neither projections nor restrictions, and hence are not needed. ",
" * (Note that, when checking to see if a ProjectRestrictNode is a no-op, ",
" * column types do *NOT* come into play.)",
" *",
" * Thus the execution tree that we generate ends up looking something like:",
" *",
" * InsertNode",
" * (RCL0:ResultColumn_0<BigInt>)",
" * |",
" * NormalizeResultSetNode",
" * (RCL1:ResultColumn_1<BigInt> -> VirtualColumnNode<no_type> -> ResultColumn_4<Decimal>)",
" * |",
" * DistinctNode",
" * (RCL4:ResultColumn_4<Decimal> -> VirtualColumnNode<no_type> -> ResultColumn_5<Decimal>)",
" * |",
" * ProjectRestrictNode_1",
" * (RCL5:ResultColumn_5<Decimal>)",
" * |",
" * ProjectRestrictNode_0",
" * (RCL2:ResultColumn_2<Decimal>)",
" * |",
" * UnionNode",
" * (RCL3:ResultColumn_3<Decimal>)",
" *",
" * At code generation the ProjectRestrictNodes will again be removed and the ",
" * execution tree will end up looking like this:",
" * ",
" * InsertResultSet",
" * (BigInt)",
" * |",
" * NormalizeResultSet",
" * (BigInt)",
" * |",
" * SortResultSet",
" * (Decimal)",
" * |",
" * UnionResultSet",
" * (Decimal)",
" *",
" * where SortResultSet is generated to enforce the DistinctNode, ",
" * and thus expects the DistinctNode's column type--i.e. Decimal.",
" * ",
" * When it comes time to execute the INSERT statement, then, the UnionResultSet ",
" * will create a row having a column whose type is DECIMAL, i.e. an SQLDecimal ",
" * value. The UnionResultSet will then pass that up to the SortResultSet, ",
" * who is *also* expecting an SQLDecimal value. So the SortResultSet is ",
" * satisfied and can sort all of the rows from the UnionResultSet. ",
" * Then those rows are passed up the tree to the NormalizeResultSet, ",
" * which takes the DECIMAL value from its child (SortResultSet) and normalizes ",
" * it to a value having its own type--i.e. to a BIGINT. The BIGINT is then ",
" * passed up to InsertResultSet, which inserts it into the BIGINT column ",
" * of the target table. And so the INSERT statement succeeds.",
" * ",
" * ---- Sample 2 - NormalizeResultSetNode and Union (DERBY-3494 write-up variation)",
" * Query for discussion",
" * ",
" *",
" * create table t1 (bi bigint, i int);",
" * insert into t1 values (100, 10), (288, 28), (4820, 2);",
" *",
" * select * from",
" * (select bi, i from t1 union select i, bi from t1) u(a,b) where a > 28;",
" *",
" *",
" * Some things to notice about this query:",
" * a) The UNION is part of a subquery.",
" * b) This is *not* a UNION ALL; i.e. we need to eliminate duplicate rows.",
" * c) The left side of the UNION and the right side of the UNION have ",
" * different (but compatible) types: the left has (BIGINT, INT), while the ",
" * right has (INT, BIGINT).",
" * d) There is a predicate in the WHERE clause which references a column ",
" * from the UNION subquery.",
" * e) The table T1 has at least one row.",
" * All of these factors plays a role in the handling of the query and are ",
" * relevant to this discussion.",
" * ",
" * Building the NormalizeResultSetNode. ",
" * When compiling a query, the final stage of optimization in Derby is the ",
" * \"modification of access paths\" phase, in which each node in the query ",
" * tree is given a chance to modify or otherwise perform maintenance in ",
" * preparation for code generation. In the case of a UnionNode, a call ",
" * to modifyAccessPaths() will bring us to the addNewNodes() method, ",
" * which is where the call is made to generate the NormalizeResultSetNode.",
" * ",
" *",
" * if (! columnTypesAndLengthsMatch())",
" * {",
" * treeTop = ",
" * (NormalizeResultSetNode) getNodeFactory().getNode(",
" * C_NodeTypes.NORMALIZE_RESULT_SET_NODE,",
" * treeTop, null, null, Boolean.FALSE,",
" * getContextManager()); ",
" * }",
" *",
" * The fact that the left and right children of the UnionNode have different ",
" * types (observation c above) means that the if condition will return ",
" * true and thus we will generate a NormalizeResultSetNode above the ",
" * UnionNode. At this point (before the NormalizeResultSetNode has been ",
" * generated) our (simplified) query tree looks something like the following.",
" * PRN stands for ProjectRestrictNode, RCL stands for ResultColumnList:",
" *",
" * PRN0",
" * (RCL0)",
" * (restriction: a > 28 {RCL1})",
" * |",
" * UnionNode // <-- Modifying access paths...",
" * (RCL1)",
" * / \\",
" * PRN2 PRN3",
" * | |",
" * PRN4 PRN5",
" * | |",
" * T1 T1",
" *",
" *",
" * where 'a > 28 {RCL1}' means that the column reference A in the predicate a > 28 points to a ResultColumn object in the ResultColumnList that corresponds to \"RCL1\". I.e. at this point, the predicate's column reference is pointing to an object in the UnionNode's RCL.",
" * \"normalize node creation\" will execute:",
" *",
" * ResultColumnList prRCList = rcl;",
" * rsn.setResultColumns(rcl.copyListAndObjects());",
" * // Remove any columns that were generated.",
" * prRCList.removeGeneratedGroupingColumns();",
" * ...",
" * prRCList.genVirtualColumnNodes(rsn, rsn.getResultColumns());",
" * ",
" * this.resultColumns = prRCList;",
" * ",
" * to create a NormalizeResultSetNode whose result column list is prRCList. ",
" * This gives us:",
" *",
" * PRN0",
" * (RCL0)",
" * (restriction: a > 28 {RCL1})",
" * |",
" * NormalizeResultSetNode",
" * (RCL1) // RCL1 \"pulled up\" to NRSN",
" * |",
" * UnionNode",
" * (RCL2) // RCL2 is a (modified) *copy* of RCL1",
" * / \\",
" * PRN2 PRN3",
" * | |",
" * PRN4 PRN5",
" * | |",
" * T1 T1",
" *",
" * Note how RCL1, the ResultColumnList object for the UnionNode, has now been ",
" * *MOVED* so that it belongs to the NormalizeResultSetNode. So the predicate ",
" * a > 28, which (still) points to RCL1, is now pointing to the ",
" * NormalizeResultSetNode instead of to the UnionNode.",
" * ",
" * After this, we go back to UnionNode.addNewNodes() where we see the following:",
" * ",
" *",
" * treeTop = (ResultSetNode) getNodeFactory().getNode(",
" * C_NodeTypes.DISTINCT_NODE,",
" * treeTop.genProjectRestrict(),",
" * Boolean.FALSE,",
" * tableProperties,",
" * getContextManager());",
" *",
" *",
" * I.e. we have to generate a DistinctNode to eliminate duplicates because the query ",
" * specified UNION, not UNION ALL.",
" * ",
" * Note the call to treeTop.genProjectRestrict(). Since NormalizeResultSetNode ",
" * now sits on top of the UnionNode, treeTop is a reference to the ",
" * NormalizeResultSetNode. That means we end up at the genProjectRestrict() ",
" * method of NormalizeResultSetNode. And guess what? The method does ",
" * something very similar to what we did in NormalizeResultSetNode.init(), ",
" * namely:",
" *",
" * ResultColumnList prRCList = resultColumns;",
" * resultColumns = resultColumns.copyListAndObjects();",
" *",
" * and then creates a ProjectRestrictNode whose result column list is prRCList. This gives us:",
" *",
" * PRN0",
" * (RCL0)",
" * (restriction: a > 28 {RCL1})",
" * |",
" * PRN6",
" * (RCL1) // RCL1 \"pulled up\" to new PRN.",
" * |",
" * NormalizeResultSetNode",
" * (RCL3) // RCL3 is a (modified) copy of RCL1",
" * |",
" * UnionNode",
" * (RCL2) // RCL2 is a (modified) copy of RCL1",
" * / \\",
" * PRN2 PRN3",
" * | |",
" * PRN4 PRN5",
" * | |",
" * T1 T1",
" *",
" * On top of that we then put a DistinctNode. And since the init() method ",
" * of DistinctNode does the same kind of thing as the previously-discussed ",
" * methods, we ultimatley end up with:",
" *",
" * PRN0",
" * (RCL0)",
" * (restriction: a > 28 {RCL1})",
" * |",
" * DistinctNode",
" * (RCL1) // RCL1 pulled up to DistinctNode",
" * |",
" * PRN6",
" * (RCL4) // RCL4 is a (modified) copy of RCL1",
" * |",
" * NormalizeResultSetNode",
" * (RCL3) // RCL3 is a (modified) copy of RCL1",
" * |",
" * UnionNode",
" * (RCL2) // RCL2 is a (modified) copy of RCL1",
" * / \\",
" * PRN2 PRN3",
" * | |",
" * PRN4 PRN5",
" * | |",
" * T1 T1",
" *",
" * And thus the predicate a > 28, which (still) points to RCL1, is now ",
" * pointing to the DistinctNode instead of to the UnionNode. And this ",
" * is what we want: i.e. we want the predicate a > 28 to be applied ",
" * to the rows that we retrieve from the node at the *top* of the ",
" * subtree generated for the UnionNode. It is the non-intuitive code ",
" * in the normalize node creation that allows this to happen.",
" *"
],
"header": "@@ -53,8 +53,491 @@ import org.apache.derby.iapi.services.classfile.VMOpcode;",
"removed": [
" * child result set that needs one."
]
}
]
}
] |
derby-DERBY-3311-d6209a80
|
DERBY-3311: Client ResultSet.getHoldabilty will return incorrect value when the ResultSet is obtained from a procedure call
Patch contributed by Daniel John Debrunner
Patch file: derby_3311_diff.txt
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@613169 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/drda/org/apache/derby/impl/drda/DRDAConnThread.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.jdbc.EngineResultSet;"
],
"header": "@@ -61,6 +61,7 @@ import org.apache.derby.iapi.services.sanity.SanityManager;",
"removed": []
},
{
"added": [
"\t\t\twriteSQLDHROW(((EngineResultSet) rs).getHoldability());"
],
"header": "@@ -4127,7 +4128,7 @@ class DRDAConnThread extends Thread {",
"removed": [
"\t\t\twriteSQLDHROW (stmt);"
]
},
{
"added": [
"\t\t\twriteSQLDHROW(ps.getResultSetHoldability());"
],
"header": "@@ -6250,7 +6251,7 @@ class DRDAConnThread extends Thread {",
"removed": [
"\t\t\twriteSQLDHROW (stmt);"
]
},
{
"added": [
"",
" /**",
" * Holdability passed in as it can represent the holdability of",
" * the statement or a specific result set.",
" * @param holdability HOLD_CURSORS_OVER_COMMIT or CLOSE_CURSORS_AT_COMMIT",
" * @throws DRDAProtocolException",
" * @throws SQLException",
" */",
"\tprivate void writeSQLDHROW(int holdability) throws DRDAProtocolException,SQLException"
],
"header": "@@ -6460,9 +6461,15 @@ class DRDAConnThread extends Thread {",
"removed": [
"\t//pass PreparedStatement here so we can send correct holdability on the wire for jdk1.3 and higher",
"\t//For jdk1.3, we provide hold cursor support through reflection.",
"\tprivate void writeSQLDHROW (DRDAStatement stmt) throws DRDAProtocolException,SQLException"
]
}
]
},
{
"file": "java/drda/org/apache/derby/impl/drda/DRDAStatement.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.jdbc.EngineResultSet;"
],
"header": "@@ -43,6 +43,7 @@ import org.apache.derby.iapi.jdbc.BrokeredConnection;",
"removed": []
},
{
"added": [],
"header": "@@ -408,38 +409,6 @@ class DRDAStatement",
"removed": [
"\t/**",
"\t *",
"\t * get resultSetHoldability.",
"\t * ",
"\t * @return the resultSet holdability for the prepared statement",
"\t *",
"\t */",
"\tprotected int getResultSetHoldability() throws SQLException",
"\t{",
"\t\treturn getResultSetHoldability(getResultSet());",
"\t}",
"\t",
"\t/**",
"\t *",
"\t * get resultSetHoldability.",
"\t * ",
"\t * @param rs ResultSet ",
"\t * @return the resultSet holdability for the prepared statement",
"\t *",
"\t */",
"\tint getResultSetHoldability(ResultSet rs) throws SQLException",
"\t{",
"\t\tStatement rsstmt;",
"",
"\t\tif (rs != null)",
"\t\t\trsstmt = rs.getStatement();",
"\t\telse",
"\t\t\trsstmt = getPreparedStatement();",
" ",
" return rsstmt.getResultSetHoldability();",
"\t}\t",
""
]
}
]
},
{
"file": "java/engine/org/apache/derby/iapi/jdbc/EngineResultSet.java",
"hunks": [
{
"added": [
" /**",
" * Fetch the holdability of this ResultSet which may be different",
" * from the holdability of its Statement.",
" * @return HOLD_CURSORS_OVER_COMMIT or CLOSE_CURSORS_AT_COMMIT",
" * @throws SQLException Error.",
" */",
" public int getHoldability() throws SQLException;",
" "
],
"header": "@@ -57,4 +57,12 @@ public interface EngineResultSet extends ResultSet {",
"removed": []
}
]
}
] |
derby-DERBY-3316-3abb5722
|
DERBY-3316 Leak in client if ResultSet not closed
The leak was in SectionManager.positionedUpdateCursorNameToResultSet_ which kept a reference to the ResultSet so it wouldn't get garbage collected. The solution was to use a WeakReference to the ResultSets in positionedUpdateCursorNameToResultSet_ so that they can be garbage collected.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@612262 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/am/SectionManager.java",
"hunks": [
{
"added": [
"import java.lang.ref.WeakReference;",
""
],
"header": "@@ -21,6 +21,8 @@",
"removed": []
}
]
}
] |
derby-DERBY-3319-60d10e68
|
DERBY-1191 (partial) Some SQLExceptions, for example those generated from BrokeredStatements, do not print to derby.log even when derby.stream.error.logSeverityLevel=0
Here is a patch that takes the approach of adding a public static void logAndThrowSQLException(SQLException se) method and then calling that instead of just throwing the exception.
The initial patch only uses the method for EmbedConnection.checkForTransactionInProgress() which is the most important exception to log after the fix for DERBY-3319.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@803948 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/jdbc/Util.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.error.ErrorStringBuilder;",
"import org.apache.derby.iapi.services.property.PropertyUtil;",
"import org.apache.derby.iapi.services.stream.HeaderPrintWriter;",
"import org.apache.derby.iapi.services.monitor.Monitor;",
"import org.apache.derby.iapi.reference.Property;"
],
"header": "@@ -21,15 +21,20 @@",
"removed": []
},
{
"added": [
"",
"\tprivate static int logSeverityLevel = PropertyUtil.getSystemInt(Property.LOG_SEVERITY_LEVEL,",
"\t\tSanityManager.DEBUG ? 0 : ExceptionSeverity.SESSION_SEVERITY);"
],
"header": "@@ -68,6 +73,9 @@ public abstract class Util {",
"removed": []
},
{
"added": [
" * Log SQLException to the error log if the severity exceeds the ",
" * logSeverityLevel and then throw it. This method can be used for ",
" * logging JDBC exceptions to derby.log DERBY-1191.",
" * ",
" * @param se SQLException to log and throw",
" * @throws SQLException",
" */",
" public static void logAndThrowSQLException(SQLException se) throws SQLException {",
" \tif (se.getErrorCode() >= logSeverityLevel){",
" \tlogSQLException(se);",
" \t}",
" \tthrow se;",
" }",
" ",
"\t/**",
"\t * Log an SQLException to the error log or to the console if there is no",
"\t * error log available.",
"\t * This method could perhaps be optimized to have a static shared",
"\t * ErrorStringBuilder and synchronize the method, but this works for now.",
"\t * ",
"\t * @param se SQLException to log",
"\t */",
"\tprivate static void logSQLException(SQLException se) {",
" \tif (se == null)",
" \t\treturn;",
" \tString message = se.getMessage();",
" \tString sqlstate = se.getSQLState();",
" \tif ((sqlstate != null) && (sqlstate.equals(SQLState.LOGIN_FAILED)) && ",
" \t\t\t(message != null) && (message.equals(\"Connection refused : java.lang.OutOfMemoryError\")))\t\t\t\t",
" \t\treturn;",
"",
" \tHeaderPrintWriter errorStream = Monitor.getStream();",
" \tif (errorStream == null) {",
" \t\tse.printStackTrace();",
" \t\treturn;",
" \t}",
" \tErrorStringBuilder\terrorStringBuilder = new ErrorStringBuilder(errorStream.getHeader());",
" \terrorStringBuilder.append(\"\\nERROR \" + se.getSQLState() + \": \" + se.getMessage() + \"\\n\");",
" \terrorStringBuilder.stackTrace(se);",
" \terrorStream.print(errorStringBuilder.get().toString());",
" \terrorStream.flush();",
" \terrorStringBuilder.reset();",
"",
" }",
"",
"\t",
"\t/**"
],
"header": "@@ -75,6 +83,53 @@ public abstract class Util {",
"removed": []
}
]
}
] |
derby-DERBY-3319-902041a6
|
DERBY-3319: Logical connections do not check if a transaction is active on close
Make sure that connections created by the different kinds of data
sources throw an exception on close if they are active. This is done
to get the same behaviour as with connections returned by
DriverManager.
Don't throw exception for connections with auto-commit on (since
they'll auto-commit the transaction as part of the close, and
therefore won't leave uncommitted operations around) or connections
that are part of an XA transaction (since those transactions can still
be committed/aborted via the associated XAResource after the closing
of the connection).
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@675870 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/jdbc/EmbedPooledConnection.java",
"hunks": [
{
"added": [],
"header": "@@ -153,11 +153,6 @@ class EmbedPooledConnection implements javax.sql.PooledConnection, BrokeredConne",
"removed": [
"\t\t// need to do this in case the connection is forcibly removed without",
"\t\t// first being closed.",
"\t\tcloseCurrentConnectionHandle();",
"",
""
]
},
{
"added": [
" // Need to do this in case the connection is forcibly removed without",
" // first being closed. Must be performed after resetRealConnection(),",
" // otherwise closing the logical connection may fail if the transaction",
" // is not idle.",
" closeCurrentConnectionHandle();",
""
],
"header": "@@ -171,6 +166,12 @@ class EmbedPooledConnection implements javax.sql.PooledConnection, BrokeredConne",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/jdbc/EmbedXAConnection.java",
"hunks": [
{
"added": [
" /**",
" * Check if this connection is part of a global XA transaction.",
" *",
" * @return {@code true} if the transaction is global, {@code false} if the",
" * transaction is local",
" */",
" private boolean isGlobal() {",
" return xaRes.getCurrentXid () != null;",
" }",
""
],
"header": "@@ -53,6 +53,16 @@ class EmbedXAConnection extends EmbedPooledConnection",
"removed": []
},
{
"added": [
"\t\tif (autoCommit && isGlobal())"
],
"header": "@@ -69,7 +79,7 @@ class EmbedXAConnection extends EmbedPooledConnection",
"removed": [
"\t\tif (autoCommit && (xaRes.getCurrentXid () != null))"
]
},
{
"added": [
"\t\t\tif (isGlobal()) {"
],
"header": "@@ -86,7 +96,7 @@ class EmbedXAConnection extends EmbedPooledConnection",
"removed": [
"\t\t\tif (xaRes.getCurrentXid () != null) {"
]
},
{
"added": [
"\t\tif (isGlobal())"
],
"header": "@@ -102,7 +112,7 @@ class EmbedXAConnection extends EmbedPooledConnection",
"removed": [
"\t\tif (xaRes.getCurrentXid () != null)"
]
},
{
"added": [
"\t\tif (isGlobal())"
],
"header": "@@ -113,7 +123,7 @@ class EmbedXAConnection extends EmbedPooledConnection",
"removed": [
"\t\tif (xaRes.getCurrentXid () != null)"
]
}
]
}
] |
derby-DERBY-3320-6b95f615
|
DERBY-3320 This commit will ensure that if the Collator support does not exist during
a territory based database creation, then we will throw an exception saying Collator
support does not exist. In case of subsequent boot of a successfully created territory
based database, we will check for the Collator support from JVM at the time first collation
related operation is run on the database. This can happen if the database recovery needs
to be done at the boot time or it can happen after the database has booted and user has
issued a SQL which requires Collator support.
The details of the changes that went into different classes are as follows.
DataDictionaryImpl
With this patch, the collation attribute on JDBC url now gets verified in DVF.boot method
and hence we do not need to verify the value again in DataDictionaryImpl.boot. We can assume
that if the user has provided the collation attribute, then it will be set to UCS_BASIC or
TERRITORY_BASED. If user did not supply this attribute, then we will use the default
territory of UCS_BASIC
BasicDatabase
The boot method here would call DVF.setLocale in order to set the correct locale and
Collator object on DVF. With this patch, DVF.boot method does the setting of locale
and Collator object and hence BasicDatabase.boot does not need to make a special call
on DVF.setLocale. I have removed setLocale method from DVF because there are no other
callers of the setLocale method.
DVF and DVFImpl
The boot method here has been modified so that it will set the locale on itself rather than
rely on BasicDatabase.boot to do it. In addition, it will it will check if the user has
supplied collation attribute on JDBC url at database create time. If none specified, we will
user the default of UCS_BASIC. If user has provided a value for collation attribute, then
this method will verify that it is set to UCS_BASIC or TERRITORY_BASED. If not, it will
throw an exception. During database create time, boot method will make sure that the
Collator support is available for the requested locale for TERRITORY_BASED database. If not,
it will throw an exception. In case of subsequent database boot, the verification for the
Collator support will be done the first time a collation operation is executed on the DB.
This can happen at database recovery time if the database has to be recovered or it can
happen later when user issues a SQL which required a collation operation. This collation
support check is done in the new method called verifyCollatorSupport().
In addition to changes in the boot method, I had to change the signature of existing apis
like getNullChar, getNullVarchar etc to throw StandardException. These methods can throw
an exception if they are the first ones to require collation support and during the
collation support check, we can throw exception if the Collator support from JVM is not
found.
messages.xml and SQLState.java
Added a new SQL State XBM04 for the case when the requested locale for territory based
database does not have Collator support from the jvm.
CollationTest
Added a test case which tries to use a non-existent locale to create a territory based
database. The creation fails as expected and we throw exception that the Collator support
does not exist for the requested locale.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@643292 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/types/DataValueFactory.java",
"hunks": [
{
"added": [],
"header": "@@ -31,8 +31,6 @@ import java.sql.Timestamp;",
"removed": [
"import java.util.Locale;",
""
]
},
{
"added": [
" int collationType)",
" throws StandardException;"
],
"header": "@@ -607,7 +605,8 @@ public interface DataValueFactory",
"removed": [
" int collationType);"
]
},
{
"added": [
" int collationType)",
" throws StandardException;"
],
"header": "@@ -623,7 +622,8 @@ public interface DataValueFactory",
"removed": [
" int collationType);"
]
},
{
"added": [
" int collationType)",
" throws StandardException;"
],
"header": "@@ -639,7 +639,8 @@ public interface DataValueFactory",
"removed": [
" int collationType);"
]
},
{
"added": [
" int collationType)",
" throws StandardException;"
],
"header": "@@ -655,7 +656,8 @@ public interface DataValueFactory",
"removed": [
" int collationType);"
]
},
{
"added": [
" * If this is the first time Collator is being requested for a",
" * database with collation type of TERRITORY_BASED, then we will check ",
" * to make sure that JVM supports the Collator for the database's ",
" * locale. If not, we will throw an exception "
],
"header": "@@ -702,27 +704,16 @@ public interface DataValueFactory",
"removed": [
"",
" /**",
" * Set the locale on DVF. This method gets called by the boot method of",
" * BasicDatabase after BasicDatabase has finished booting DVF. This ",
" * Locale will be either the Locale obtained from the territory ",
" * attribute supplied by the user on the JDBC url at database create ",
" * time or if user didn't provide the territory attribute at database",
" * create time, then it will be set to the default JVM locale. The ",
" * Locale object will be used to construct the Collator object if user ",
" * has requested territory based collation.",
" *",
" * @param localeOfTheDatabase Use this object to construct the ",
" * Collator object",
" */",
" void setLocale(Locale localeOfTheDatabase);"
]
}
]
},
{
"file": "java/engine/org/apache/derby/iapi/types/DataValueFactoryImpl.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.services.monitor.ModuleFactory;",
"import org.apache.derby.iapi.reference.Attribute;",
"import org.apache.derby.iapi.reference.Property;"
],
"header": "@@ -44,11 +44,14 @@ import org.apache.derby.iapi.services.io.FormatIdUtil;",
"removed": []
},
{
"added": [
" \t\t \t\t",
" \t\tNumberDataType.ZERO_DECIMAL = decimalImplementation; \t\t",
"",
" \t\tModuleFactory monitor = Monitor.getMonitor();",
" \t\t//The Locale on monitor has already been set by the boot code in",
" \t\t//BasicDatabase so we can simply do a get here.",
" \t\t//This Locale will be either the Locale obtained from the territory",
" \t\t//attribute supplied by the user on the JDBC url at database create",
" \t\t//time or if user didn't provide the territory attribute at database",
" \t\t//create time, then it will be set to the default JVM locale. The",
" \t\t//Locale object will be used to construct the Collator object which",
" \t\t//will be used if user has requested territory based collation.",
" \t\tdatabaseLocale = monitor.getLocale(this);",
"",
" \t\t//If we are here for database create time, verify that there is ",
" \t\t//Collator support for the database's locale. If not, then we ",
" \t\t//will throw an exception. ",
" \t\t//Notice that this Collator support check is happening only during ",
" \t\t//database creation time. This is because, during database create",
" \t\t//time, DVF has access to collation property of the database and",
" \t\t//hence it can do the Collator support check",
" \t\t//(collation property is available through JDBC url at the database",
" \t\t//create time, if user has asked for a particular collation) eg",
" \t\t//connect 'jdbc:derby:db;create=true;territory=no;collation=TERRITORY_BASED';",
" \t\t//Once the database is created, the collation property gets",
" \t\t//saved in the database and during susbsequent boots of the",
" \t\t//database, collation attribute of the database is only available",
" \t\t//once store has finished reading it. So, during subsequent ",
" \t\t//database boot up time, the collation attribute of the database ",
" \t\t//will be checked the first time a collation operation is done.",
" \t\t//And if the Collator support is not found at that point, user will ",
" \t\t//get an exception for Collator unavailability. This first ",
" \t\t//collation operation can happen if the database needs to be ",
" \t\t//recovered during boot time or otherwise it will happen when the",
" \t\t//user has executed a SQL which requires collation operation.",
"\t \tif (create) {",
"\t \t\t//Get the collation property from the JDBC url(this will be ",
"\t \t\t//available only during database create time). It can only have ",
"\t \t\t//one of the 2 possible values - UCS_BASIC or TERRITORY_BASED.",
"\t \t\t//This property can only be specified at database create time.",
"\t \t\t//If the user has requested for territory based database, then ",
"\t \t\t//verify that JVM has Collator support for the database locale.",
"\t \t\tString userDefinedCollation = ",
"\t \t\t\tproperties.getProperty(Attribute.COLLATION);\t\t",
"\t \t\tif (userDefinedCollation != null) {//Invalid value handling",
"\t \t\t\tif (!userDefinedCollation.equalsIgnoreCase(Property.UCS_BASIC_COLLATION)",
"\t \t\t\t\t\t&& !userDefinedCollation.equalsIgnoreCase(Property.TERRITORY_BASED_COLLATION))",
"\t \t\t\t\tthrow StandardException.newException(SQLState.INVALID_COLLATION, userDefinedCollation);",
"\t \t\t\tif (userDefinedCollation.equalsIgnoreCase(Property.TERRITORY_BASED_COLLATION))",
"\t \t\t\t\tcollatorForCharacterTypes = verifyCollatorSupport();",
"\t \t\t} \t\t",
"\t \t}"
],
"header": "@@ -98,15 +101,61 @@ abstract class DataValueFactoryImpl implements DataValueFactory, ModuleControl",
"removed": [
" \t\t",
" \t\t",
" \t\tNumberDataType.ZERO_DECIMAL = decimalImplementation;",
" \t\t",
" \t\t",
" \t\t"
]
},
{
"added": [
" throws StandardException"
],
"header": "@@ -813,6 +862,7 @@ abstract class DataValueFactoryImpl implements DataValueFactory, ModuleControl",
"removed": []
},
{
"added": [
" throws StandardException"
],
"header": "@@ -844,6 +894,7 @@ abstract class DataValueFactoryImpl implements DataValueFactory, ModuleControl",
"removed": []
},
{
"added": [
" throws StandardException"
],
"header": "@@ -875,6 +926,7 @@ abstract class DataValueFactoryImpl implements DataValueFactory, ModuleControl",
"removed": []
},
{
"added": [
" throws StandardException"
],
"header": "@@ -906,6 +958,7 @@ abstract class DataValueFactoryImpl implements DataValueFactory, ModuleControl",
"removed": []
}
]
}
] |
derby-DERBY-3320-ececdabc
|
Fix for the test that was added for DERBY-3320. The test was using DriverManager.getConnection
but DriverManager is not available in JSR 169 environment. This caused the test to fail
when run in JSR 169 environment. I have fixed the test to use testUtil.getConnection
which is smart enough to use DriverManager or DataSource depending on what VM the test
is getting run in.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@645665 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3321-16164d57
|
DERBY-3321: NullPointerException for 'NOT EXISTS' with nested subquery
Checks fromList for a FromSubquery using a CollectNodesVistior.
The value of bindTargetListOnly is based on the contents of the visitor.
Patch contributed by Thomas Nielsen
Patch file: d3321.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@634316 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3327-99d7d8b3
|
DERBY-3327 SQL roles: Implement authorization stack
Patch DERBY-3327-3 which implements the authorization stack for SQL roles.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@614071 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/sql/conn/LanguageConnectionContext.java",
"hunks": [
{
"added": [],
"header": "@@ -423,22 +423,6 @@ public interface LanguageConnectionContext extends Context {",
"removed": [
"\t/**",
"\t *\tGet the current role authorization identifier",
"\t *",
"\t * @return String\tthe role id",
"\t */",
"\tpublic String getCurrentRoleId();",
"",
"",
" /**",
"\t * Set the current role",
"\t *",
"\t * @param rd\tthe descriptor of the role to be set to current",
"\t */",
" public void setCurrentRole(RoleDescriptor rd);",
"",
""
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/GenericActivationHolder.java",
"hunks": [
{
"added": [
"\tpublic void setNestedCurrentRole(String role) {",
"\t\tac.setNestedCurrentRole(role);",
" }",
"",
" public String getNestedCurrentRole() {",
"\t\treturn ac.getNestedCurrentRole();",
" }",
"",
"\tpublic void setCallActivation(Activation a) {",
"\t\tac.setCallActivation(a);",
"\t}",
"",
"\tpublic Activation getCallActivation() {",
"\t\treturn ac.getCallActivation();",
"\t}",
"",
""
],
"header": "@@ -564,6 +564,23 @@ final class GenericActivationHolder implements Activation",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/conn/GenericLanguageConnectionContext.java",
"hunks": [
{
"added": [
"\t/**",
"\t * 'callers' keeps track of which, if any, stored procedure",
"\t * activations are active. This helps implement the \"authorization",
"\t * stack\" of SQL 2003, vol 2, section 4.34.1.1 and 4.27.3.",
"\t *",
"\t * For the top level, the current role is kept here,",
"\t * cf. 'currentRole'. For dynamic call contexts, the current role",
"\t * is kept in the activation of the calling statement,",
"\t * cf. 'getCurrentRoleId'.",
"\t */",
"\tprivate ArrayList callers = new ArrayList(); // used as a stack only",
""
],
"header": "@@ -163,6 +163,18 @@ public class GenericLanguageConnectionContext",
"removed": []
},
{
"added": [
"\tprotected String currentRole;"
],
"header": "@@ -185,7 +197,7 @@ public class GenericLanguageConnectionContext",
"removed": [
"\tprotected RoleDescriptor currentRole;"
]
},
{
"added": [],
"header": "@@ -1769,27 +1781,6 @@ public class GenericLanguageConnectionContext",
"removed": [
"\t/**",
"\t * Get the current role authorization identifier",
"\t *",
"\t * @return String\tthe role id",
"\t */",
"\tpublic String getCurrentRoleId() {",
"\t\treturn currentRole != null ?",
"\t\t\tcurrentRole.getRoleName() : null;",
"\t}",
"",
"",
"\t/**",
"\t * Set the current role",
"\t *",
"\t * @param rd\tthe descriptor of the role to be set to current",
"\t */",
"\tpublic void setCurrentRole(RoleDescriptor rd) {",
"\t\tthis.currentRole = rd;",
"\t}",
"",
""
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/BaseActivation.java",
"hunks": [
{
"added": [
"\t// Authorization stack frame, cf. SQL 2003 4.31.1.1 and 4.27.3 is",
"\t// implemented as follows: Statements at root connection level",
"\t// (not executed within a stored procedure), maintain the current",
"\t// role in the lcc. In this case, 'callActivation' is null. If",
"\t// we are executing SQL inside a stored procedure (nested",
"\t// connection), then 'callActivation' will be non-null, and we",
"\t// maintain the current role in the activation of the calling",
"\t// statement, see 'setNestedCurrentRole'. The current role of a call",
"\t// context is kept in the field 'nestedCurrentRole'.",
"\t//",
"\t// 'callActivation' is set when activation is created (see",
"\t// GenericPreparedStatement#getActivation based on the top of the",
"\t// dynamic call stack of activation, see",
"\t// GenericLanguageConnectionContext#getCaller.",
"\t//",
"\t// Corner case: When a dynamic result set references current role,",
"\t// the value retrieved will always be that of the current role",
"\t// when the statement is executed (inside), not the current value",
"\t// when the result set is accessed outside the stored procedure.",
"\t//",
"\t// Consequence of this implementation: If more than one nested",
"\t// connection is used inside a shared procedure, they will share",
"\t// the current role setting. Since the same dynamic call context",
"\t// is involved, this seems correct.",
"\t//",
"\tprivate Activation callActivation;",
"\tprivate String nestedCurrentRole;",
""
],
"header": "@@ -140,6 +140,34 @@ public abstract class BaseActivation implements CursorActivation, GeneratedByteC",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/CallStatementResultSet.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;"
],
"header": "@@ -28,6 +28,7 @@ import org.apache.derby.iapi.error.StandardException;",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/SetRoleConstantAction.java",
"hunks": [
{
"added": [
" if (rd == null) {",
" // or if not, via PUBLIC?",
" rd = dd.getRoleGrantDescriptor",
" (thisRoleName,",
" Authorizer.PUBLIC_AUTHORIZATION_ID,",
" dbo);",
"",
" // Nope, we can't set this role, so throw.",
" if (rd == null) {",
" throw StandardException.newException",
" (SQLState. ROLE_INVALID_SPECIFICATION_NOT_GRANTED,",
" thisRoleName);",
" }"
],
"header": "@@ -135,15 +135,19 @@ class SetRoleConstantAction implements ConstantAction",
"removed": [
" if (rd == null &&",
" (dd.getRoleGrantDescriptor",
" (thisRoleName,",
" Authorizer.PUBLIC_AUTHORIZATION_ID,",
" dbo) == null)) {",
"",
" throw StandardException.newException",
" (SQLState.ROLE_INVALID_SPECIFICATION_NOT_GRANTED,",
" thisRoleName);"
]
}
]
}
] |
derby-DERBY-3327-f04dc447
|
DERBY-3327 SQL roles: Implement authorization stack
Some javadoc bugs fixed.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@615109 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/sql/Activation.java",
"hunks": [
{
"added": [
"\t * @param role The name of the current role"
],
"header": "@@ -591,7 +591,7 @@ public interface Activation",
"removed": [
"\t * @arg role The name of the current role"
]
}
]
},
{
"file": "java/engine/org/apache/derby/iapi/sql/conn/LanguageConnectionContext.java",
"hunks": [
{
"added": [
"\t * @param a activation of set role statement"
],
"header": "@@ -1052,7 +1052,7 @@ public interface LanguageConnectionContext extends Context {",
"removed": [
"\t * @param activation activation of set role statement"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/conn/GenericLanguageConnectionContext.java",
"hunks": [
{
"added": [
"\t * @param a activation of set role statement"
],
"header": "@@ -3107,7 +3107,7 @@ public class GenericLanguageConnectionContext",
"removed": [
"\t * @param activation activation of set role statement"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/BaseActivation.java",
"hunks": [
{
"added": [
"\t * @param role The name of the current role"
],
"header": "@@ -1344,7 +1344,7 @@ public abstract class BaseActivation implements CursorActivation, GeneratedByteC",
"removed": [
"\t * @arg role The name of the current role"
]
},
{
"added": [
"\t * @param a The caller's activation"
],
"header": "@@ -1365,7 +1365,7 @@ public abstract class BaseActivation implements CursorActivation, GeneratedByteC",
"removed": [
"\t * @arg a The caller's activation"
]
}
]
}
] |
derby-DERBY-3330-0efe521c
|
DERBY-3330
submitted by Anurag Shekhar
This commit is of the derby-3330_followup_1_modified.diff.
CreateIndexConstantAction.java
I have changed the name of the variable and added comment to explain what its meant for.
I got confused while commenting last time. This property is required if
TransactionCoordinator has to select the non default Sorter.
BTreeController.java
I haven't optimized it yet. But instead of returning error code setting the ret value so the latch
is cleared before return.
IndexDescriptorImpl.java
Updated both the place (using new name in toString () and using the new attribute in equals methods).
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@634540 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/catalog/types/IndexDescriptorImpl.java",
"hunks": [
{
"added": [
"\t\t\tsb.append (\"UNIQUE WITH DUPLICATE NULLS\");"
],
"header": "@@ -216,7 +216,7 @@ public class IndexDescriptorImpl implements IndexDescriptor, Formatable",
"removed": [
"\t\t\tsb.append (\"ALMOST UNIQUE\");"
]
},
{
"added": [
"\t\t\tif ((id.isUnique == this.isUnique) &&",
" (id.isUniqueWithDuplicateNulls == ",
" this.isUniqueWithDuplicateNulls) &&",
" (id.baseColumnPositions.length ==",
" this.baseColumnPositions.length) &&",
" (id.numberOfOrderedColumns == ",
" this.numberOfOrderedColumns) &&",
" (id.indexType.equals(this.indexType)))"
],
"header": "@@ -318,12 +318,14 @@ public class IndexDescriptorImpl implements IndexDescriptor, Formatable",
"removed": [
"\t\t\tif ((id.isUnique == this.isUnique) &&",
"\t\t\t\t(id.isUnique == this.isUnique) &&",
"\t\t\t\t(id.baseColumnPositions.length ==",
"\t\t\t\t\t\t\t\t\t\tthis.baseColumnPositions.length) &&",
"\t\t\t\t(id.numberOfOrderedColumns == this.numberOfOrderedColumns) &&",
"\t\t\t\t\t(id.indexType.equals(this.indexType)))"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/CreateIndexConstantAction.java",
"hunks": [
{
"added": [
"\t\t\tint numColumnOrderings;",
"\t\t\tSortObserver sortObserver = null;",
" Properties sortProperties = null;",
"\t\t\t\t// if the index is a constraint, use constraintname in ",
" // possible error message",
"\t\t\t\t\tConglomerateDescriptor cd = ",
" dd.getConglomerateDescriptor(conglomerateUUID);",
"\t\t\t\t\tif ((isConstraint) && ",
" (cd != null && cd.getUUID() != null && td != null))",
"\t\t\t\t\t\tConstraintDescriptor conDesc = ",
" dd.getConstraintDescriptor(td, cd.getUUID());"
],
"header": "@@ -727,19 +727,23 @@ class CreateIndexConstantAction extends IndexConstantAction",
"removed": [
"\t\t\tint numColumnOrderings;",
"\t\t\tSortObserver sortObserver = null;",
"\t\t\t\t// if the index is a constraint, use constraintname in possible error messagge",
"\t\t\t\t\tConglomerateDescriptor cd = dd.getConglomerateDescriptor(conglomerateUUID);",
"\t\t\t\t\tif ((isConstraint) && (cd != null && cd.getUUID() != null && td != null))",
"\t\t\t\t\t\tConstraintDescriptor conDesc = dd.getConstraintDescriptor(td,",
" cd.getUUID());"
]
},
{
"added": [
" // tell transaction controller to use the unique with ",
" // duplicate nulls sorter, when making createSort() call.",
"\t\t\t\t\tsortProperties = new Properties();",
"\t\t\t\t\tsortProperties.put("
],
"header": "@@ -763,8 +767,10 @@ class CreateIndexConstantAction extends IndexConstantAction",
"removed": [
"\t\t\t\t\tproperties = new Properties();",
"\t\t\t\t\tproperties.put("
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/store/access/btree/BTreeController.java",
"hunks": [
{
"added": [
" if (getConglomerate().isUniqueWithDuplicateNulls()) ",
" {",
" if (ret == MATCH_FOUND) ",
" {",
" ret_val = ConglomerateController.ROWISDUPLICATE;",
" break;",
" }"
],
"header": "@@ -807,11 +807,15 @@ public class BTreeController extends OpenBTree implements ConglomerateController",
"removed": [
" if (getConglomerate().isUniqueWithDuplicateNulls()) {",
" if (ret == MATCH_FOUND)",
" return ConglomerateController.ROWISDUPLICATE;"
]
},
{
"added": [
" if (getConglomerate().isUniqueWithDuplicateNulls()) ",
" {",
" if (ret == MATCH_FOUND) ",
" {",
" ret_val = ConglomerateController.ROWISDUPLICATE;",
" break;",
" }"
],
"header": "@@ -844,11 +848,15 @@ public class BTreeController extends OpenBTree implements ConglomerateController",
"removed": [
" if (getConglomerate().isUniqueWithDuplicateNulls()) {",
" if (ret == MATCH_FOUND)",
" return ConglomerateController.ROWISDUPLICATE;"
]
}
]
}
] |
derby-DERBY-3330-2f994958
|
DERBY-3330
Adding an additional upgrade test case to verify that behavior of unique
constraint on non-null column is consistent in all cases.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@634849 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3330-435735bf
|
DERBY-3330
javadoc fixes, mostly related to DERBY-3330.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@636114 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/execute/CreateIndexConstantAction.java",
"hunks": [
{
"added": [
" * @param uniqueWithDuplicateNulls True means index check and disallow"
],
"header": "@@ -115,7 +115,7 @@ class CreateIndexConstantAction extends IndexConstantAction",
"removed": [
" * @param isUniqueWithDuplicateNulls True means index check and disallow"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/UniqueWithDuplicateNullsIndexSortObserver.java",
"hunks": [
{
"added": [
" public UniqueWithDuplicateNullsIndexSortObserver(",
" boolean doClone, ",
" boolean isConstraint,",
" String indexOrConstraintName, ",
" ExecRow execRow,",
" boolean reuseWrappers, ",
" String tableName) {"
],
"header": "@@ -47,15 +47,17 @@ public class UniqueWithDuplicateNullsIndexSortObserver extends BasicSortObserver",
"removed": [
" * @param distinct\tIf true, toss out duplicates.",
" * \t\tOtherwise, retain them.",
" public UniqueWithDuplicateNullsIndexSortObserver(boolean doClone, boolean isConstraint,",
" String indexOrConstraintName, ExecRow execRow,",
" boolean reuseWrappers, String tableName) {"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/store/access/sort/UniqueWithDuplicateNullsExternalSortFactory.java",
"hunks": [
{
"added": [
"",
"// for javadoc",
"import org.apache.derby.iapi.store.access.conglomerate.MethodFactory;",
"",
"public class UniqueWithDuplicateNullsExternalSortFactory ",
" extends ExternalSortFactory ",
"{",
" private static final String IMPLEMENTATIONID = ",
" \"sort almost unique external\";",
" protected MergeSort getMergeSort() ",
" {",
" public String primaryImplementationType() ",
" {",
" public boolean supportsImplementation(String implementationId) ",
" {",
" return IMPLEMENTATIONID.equals(implementationId);"
],
"header": "@@ -20,28 +20,38 @@",
"removed": [
"public class UniqueWithDuplicateNullsExternalSortFactory extends ExternalSortFactory {",
" private static final String IMPLEMENTATIONID = \"sort almost unique external\";",
" protected MergeSort getMergeSort() {",
" public String primaryImplementationType() {",
" public boolean supportsImplementation(String implementationId) {",
" return IMPLEMENTATIONID.equals (implementationId);"
]
}
]
}
] |
derby-DERBY-3330-62af56f3
|
DERBY-3330
committing on behalf of Anurag Shekhar.
Adding upgrade tests that both confirm existing behavior of indexes and
new behavior of constraints on nullable columns.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@634383 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3330-d07f526b
|
DERBY-3330
contributed by Anurag Shekhar
committing a modified version of sortercomments.diff patch. This change just
updates the comments associated with the class and routine in this file - no
code changes.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@634436 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/store/access/sort/UniqueWithDuplicateNullsMergeSort.java",
"hunks": [
{
"added": [
" * This class extends and customizes MergeSort to support unique indexes with",
" * duplicate nulls.",
" * Compares two keys. ",
" *",
" * If all the parts of the keys are not null then the leading ",
" * (keys.length - 1) parts are compared, else if no part of the key",
" * is null then all parts of the key are compared (keys.length).",
" *",
" * This behavior is useful for implementing unique constraints where",
" * multiple null values are allowed, but uniqueness must still be ",
" * guaranteed for keys with no null values. In this case the leading",
" * parts of the key are the user key columns, while the last column",
" * is a system provided column which is guaranteed unique per base row.",
" *",
" *"
],
"header": "@@ -25,18 +25,29 @@ import org.apache.derby.iapi.error.StandardException;",
"removed": [
" * This class extends and customizes MergeSort to support almost unique index.",
" * Compares two sets of keys. If all the parts of the keys are not null",
" * keys.length - 1 part is compared other wise all the parts are compared.",
" * This methods assumes that last part is location."
]
},
{
"added": [
" // Compare the columns specified in the column ordering array.",
" //if there are any nulls in the row nonull will be false",
" //",
" //if there was no nulls in the row and we are about to ",
" //compare the last field (all fields except for the location",
" //are same), treat them as duplicate. This is used by caller",
" //to implement unique key while ignoring case of keys with",
" //null values.",
" //",
" //if at least one field was null, go ahead and compare the ",
" //location too. This is used to provide proper sorting of",
" //duplicate keys with nulls, they must be ordered properly ",
" //according to the last field also.",
""
],
"header": "@@ -45,12 +56,24 @@ final class UniqueWithDuplicateNullsMergeSort extends MergeSort {",
"removed": [
" // Compare the columns specified in the column",
" // ordering array."
]
},
{
"added": [
" //set nonull to false if the fields are equal and null"
],
"header": "@@ -64,6 +87,7 @@ final class UniqueWithDuplicateNullsMergeSort extends MergeSort {",
"removed": []
},
{
"added": [],
"header": "@@ -73,5 +97,4 @@ final class UniqueWithDuplicateNullsMergeSort extends MergeSort {",
"removed": [
" "
]
}
]
}
] |
derby-DERBY-3333-9c3b91c3
|
DERBY-3333 User name corresponding to authentication identifier PUBLIC must be rejected
Patch DERBY-3333-roles.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@655948 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/execute/GrantRoleConstantAction.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.sql.conn.Authorizer;"
],
"header": "@@ -28,6 +28,7 @@ import java.util.List;",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/RevokeRoleConstantAction.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.sql.conn.Authorizer;"
],
"header": "@@ -28,6 +28,7 @@ import java.util.List;",
"removed": []
}
]
}
] |
derby-DERBY-3341-f432362d
|
DERBY-3341: For Table Functions, coerce data values coming out of Java ResultSets into the SQL data values declared in the signature of the Table Function.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@636004 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/sql/execute/ResultSetFactory.java",
"hunks": [
{
"added": [
"import org.apache.derby.catalog.TypeDescriptor;",
""
],
"header": "@@ -21,6 +21,8 @@",
"removed": []
},
{
"added": [
"\t\t@param returnType The name of the return type (a multi-set) as a string"
],
"header": "@@ -641,6 +643,7 @@ public interface ResultSetFactory {",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/FromVTI.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream;",
"import org.apache.derby.iapi.services.io.FormatIdOutputStream;"
],
"header": "@@ -21,6 +21,8 @@",
"removed": []
},
{
"added": [
"\t\t// Push the return type",
" if ( isDerbyStyleTableFunction )",
" {",
" String returnType = freezeReturnType( methodCall.getRoutineInfo().getReturnType() );",
" mb.push( returnType );",
" }",
" else",
" {",
"\t\t\tmb.pushNull( String.class.getName());",
" }",
"",
"\t\treturn 16;"
],
"header": "@@ -1323,7 +1325,18 @@ public class FromVTI extends FromTable implements VTIEnvironment",
"removed": [
"\t\treturn 15;"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/GenericResultSetFactory.java",
"hunks": [
{
"added": [
"import org.apache.derby.catalog.TypeDescriptor;"
],
"header": "@@ -21,6 +21,7 @@",
"removed": []
},
{
"added": [
" boolean isDerbyStyleTableFunction,",
" String returnType"
],
"header": "@@ -444,7 +445,8 @@ public class GenericResultSetFactory implements ResultSetFactory",
"removed": [
"\t\t\t\t\t\t\t\t\t boolean isDerbyStyleTableFunction"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/VTIResultSet.java",
"hunks": [
{
"added": [
"import org.apache.derby.catalog.TypeDescriptor;"
],
"header": "@@ -21,6 +21,7 @@",
"removed": []
},
{
"added": [
"import org.apache.derby.iapi.types.TypeId;",
"import org.apache.derby.iapi.types.DataTypeDescriptor;",
"import org.apache.derby.iapi.types.VariableSizeDataValue;",
"import org.apache.derby.iapi.services.io.FormatIdInputStream;"
],
"header": "@@ -37,13 +38,17 @@ import org.apache.derby.iapi.sql.execute.NoPutResultSet;",
"removed": []
},
{
"added": [
"import java.io.ByteArrayInputStream;"
],
"header": "@@ -56,6 +61,7 @@ import org.apache.derby.vti.DeferModification;",
"removed": []
},
{
"added": [
" private String returnType;",
"",
" private DataTypeDescriptor[] returnColumnTypes;",
""
],
"header": "@@ -94,6 +100,10 @@ class VTIResultSet extends NoPutResultSetImpl",
"removed": []
},
{
"added": [
"\t\t\t\t boolean isDerbyStyleTableFunction,",
" String returnType"
],
"header": "@@ -114,7 +124,8 @@ class VTIResultSet extends NoPutResultSetImpl",
"removed": [
"\t\t\t\t boolean isDerbyStyleTableFunction"
]
},
{
"added": [
" this.returnType = returnType;"
],
"header": "@@ -129,6 +140,7 @@ class VTIResultSet extends NoPutResultSetImpl",
"removed": []
},
{
"added": [
" DataTypeDescriptor[] columnTypes = null;",
" if ( isDerbyStyleTableFunction )",
" {",
" columnTypes = getReturnColumnTypes();",
" }",
""
],
"header": "@@ -541,6 +553,12 @@ class VTIResultSet extends NoPutResultSetImpl",
"removed": []
},
{
"added": [
"",
" // for Derby-style table functions, coerce the value coming out",
" // of the ResultSet to the declared SQL type of the return",
" // column",
" if ( isDerbyStyleTableFunction )",
" {",
" DataTypeDescriptor dtd = columnTypes[ index ];",
" DataValueDescriptor dvd = columns[ index ];",
"",
" cast( dtd, dvd );",
" }",
"",
" }"
],
"header": "@@ -563,7 +581,19 @@ class VTIResultSet extends NoPutResultSetImpl",
"removed": [
"\t\t\t}"
]
}
]
}
] |
derby-DERBY-3342-7a361f26
|
DERBY-2775 Remove last use of DataTypeDescriptor.setNullability() to make DTD's use immutable wrt nullability. Fixes DERBY-3346 and DERBY-3342 which were cases when a query was getting the incorrect nullability for some columns due to the mutability of DTD when shared across nodes.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@616853 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3343-28ce2663
|
DERBY-3343: Subsequent calls to PreparedStatement cause SQLIntegrityConstraintViolationException on column that is "Generated always"
Patch file: d3343.v3.diff
Modifies the logic in ResultColumn.getOrderableVariantType() so that it behaves
correctly even when a default column is explicitly mentioned in an insert statement.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@615203 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3346-7a361f26
|
DERBY-2775 Remove last use of DataTypeDescriptor.setNullability() to make DTD's use immutable wrt nullability. Fixes DERBY-3346 and DERBY-3342 which were cases when a query was getting the incorrect nullability for some columns due to the mutability of DTD when shared across nodes.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@616853 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3347-a7de0811
|
DERBY-3347: ERROR XSDB3: Container information cannot change once written
On JVMs that support the NIO API, multiple threads may perform I/O
operations concurrently on the same data file. As long as these
operations go through the page cache, only a single thread performs
I/O on a single page at any given time. The data files can also be
accessed by the container cache, which accesses space that it borrows
on the first page in the file. Since these accesses don't go through
the page cache, a mechanism is needed to prevent concurrent access
that page.
This patch makes reading and writing of the first page in a file
synchronize on the container object. Since access to the borrowed
space on the first page also is synchronized on the container,
concurrent I/O on the first page is prevented. (On JVMs that don't
support the NIO API, all page accesses synchronize on the container.)
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@647091 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/store/raw/data/RAFContainer4.java",
"hunks": [
{
"added": [
" {",
" // If this is the first alloc page, there may be another thread",
" // accessing the container information in the borrowed space on the",
" // same page. In that case, we synchronize the entire method call, just",
" // like RAFContainer.readPage() does, in order to avoid conflicts. For",
" // all other pages it is safe to skip the synchronization, since",
" // concurrent threads will access different pages and therefore don't",
" // interfere with each other.",
" if (pageNumber == FIRST_ALLOC_PAGE_NUMBER) {",
" synchronized (this) {",
" readPage0(pageNumber, pageData);",
" }",
" } else {",
" readPage0(pageNumber, pageData);",
" }",
" }",
"",
" private void readPage0(long pageNumber, byte[] pageData)",
" throws IOException, StandardException"
],
"header": "@@ -166,6 +166,25 @@ class RAFContainer4 extends RAFContainer {",
"removed": []
},
{
"added": [
" {",
" // If this is the first alloc page, there may be another thread",
" // accessing the container information in the borrowed space on the",
" // same page. In that case, we synchronize the entire method call, just",
" // like RAFContainer.writePage() does, in order to avoid conflicts. For",
" // all other pages it is safe to skip the synchronization, since",
" // concurrent threads will access different pages and therefore don't",
" // interfere with each other.",
" if (pageNumber == FIRST_ALLOC_PAGE_NUMBER) {",
" synchronized (this) {",
" writePage0(pageNumber, pageData, syncPage);",
" }",
" } else {",
" writePage0(pageNumber, pageData, syncPage);",
" }",
" }",
"",
" private void writePage0(long pageNumber, byte[] pageData, boolean syncPage)",
" throws IOException, StandardException"
],
"header": "@@ -232,6 +251,25 @@ class RAFContainer4 extends RAFContainer {",
"removed": []
}
]
}
] |
derby-DERBY-3347-b44572f2
|
DERBY-3347: ERROR XSDB3: Container information cannot change once written
On JVMs that support the NIO API, use NIO consistently when accessing
the data files. Mixing old style I/O and NIO has caused problems on
some platforms since thread-safety is only guaranteed if all access
happens through the FileChannel interface.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@647139 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/store/raw/data/FileContainer.java",
"hunks": [
{
"added": [],
"header": "@@ -26,19 +26,11 @@ import org.apache.derby.iapi.reference.Property;",
"removed": [
"import org.apache.derby.impl.store.raw.data.BaseContainer;",
"import org.apache.derby.impl.store.raw.data.BaseContainerHandle;",
"import org.apache.derby.impl.store.raw.data.BasePage;",
"import org.apache.derby.impl.store.raw.data.PageVersion;",
"",
"import org.apache.derby.iapi.services.daemon.DaemonService;",
"import org.apache.derby.iapi.services.daemon.Serviceable;",
"import org.apache.derby.iapi.services.io.FormatIdUtil;"
]
},
{
"added": [],
"header": "@@ -46,11 +38,8 @@ import org.apache.derby.iapi.services.io.TypedFormat;",
"removed": [
"import org.apache.derby.iapi.store.raw.LockingPolicy;",
"import org.apache.derby.iapi.store.raw.Loggable;",
"import org.apache.derby.iapi.store.raw.PageTimeStamp;"
]
},
{
"added": [
"import org.apache.derby.io.StorageRandomAccessFile;"
],
"header": "@@ -69,11 +58,11 @@ import org.apache.derby.iapi.util.ByteArray;",
"removed": [
"import java.io.DataOutput;"
]
},
{
"added": [
" Read the container's header.",
"",
" When this method is called, the embryonic page that is passed in must",
" have been read directly from the file or the input stream, even if the",
" alloc page may still be in cache. This is because a stubbify operation",
" only writes the stub to disk, it does not get rid of any stale page",
" from the page cache. So if it so happens that the stubbified container",
" object is aged out of the container cache but the first alloc page",
" hasn't, then when any stale page of this container wants to be written",
" out, the container needs to be reopened, which is when this routine is",
" called. We must not get the alloc page in cache because it may be",
" stale page and it may still say the container has not been dropped.",
" @param epage the embryonic page to read the header from",
"\tprotected void readHeader(byte[] epage)"
],
"header": "@@ -692,39 +681,33 @@ abstract class FileContainer",
"removed": [
"\t\tRead the container's header. Assumes the input stream (fileData)",
"\t\tis positioned at the beginning of the file.",
"",
"\t\tSubclass that implements openContainer is expected to manufacture a DataInput ",
"\t\tstream which is used here to read the header.",
"\tprotected void readHeader(DataInput fileData) ",
"\t\t// Always read the header from the input stread even if the alloc page may",
"\t\t// still be in cache. This is because a stubbify operation only writes",
"\t\t// the stub to disk, it did not get rid of any stale page from the page",
"\t\t// cache. So if it so happen that the stubbified container object is",
"\t\t// aged out of the container cache but the first alloc page hasn't,",
"\t\t// then when any stale page of this container wants to be written out,",
"\t\t// the container needs to be reopened, which is when this routine is",
"\t\t// called. We must not get the alloc page in cache because it may be",
"\t\t// stale page and it may still say the container has not been dropped.",
"",
"\t\tbyte[] epage = getEmbryonicPage(fileData);",
"",
"",
"\t\tepage = null;"
]
},
{
"added": [
"\t\tWrite the container header directly to file."
],
"header": "@@ -869,8 +852,7 @@ abstract class FileContainer",
"removed": [
"\t\tWrite the container header directly to output stream (fileData).",
"\t\tAssumes the output stream is positioned at the beginning of the file."
]
},
{
"added": [
"\tprotected void writeHeader(StorageRandomAccessFile file,",
" boolean create, byte[] epage)"
],
"header": "@@ -880,7 +862,8 @@ abstract class FileContainer",
"removed": [
"\tprotected void writeHeader(DataOutput fileData, boolean create, byte[] epage)"
]
},
{
"added": [
" writeAtOffset(file, epage, FIRST_ALLOC_PAGE_OFFSET);"
],
"header": "@@ -903,7 +886,7 @@ abstract class FileContainer",
"removed": [
"\t\t\tfileData.write(epage);"
]
},
{
"added": [
" /**",
" * Write a sequence of bytes at the given offset in a file. This method",
" * is not thread safe, so the caller must make sure that no other thread",
" * is performing operations that may change current position in the file.",
" *",
" * @param file the file to write to",
" * @param bytes the bytes to write",
" * @param offset the offset to start writing at",
" * @throws IOException if an I/O error occurs while writing",
" */",
" void writeAtOffset(StorageRandomAccessFile file, byte[] bytes, long offset)",
" throws IOException",
" {",
" file.seek(offset);",
" file.write(bytes);",
" }",
""
],
"header": "@@ -911,6 +894,23 @@ abstract class FileContainer",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/store/raw/data/RAFContainer.java",
"hunks": [
{
"added": [
"\t\t\tepage = getEmbryonicPage(file, FIRST_ALLOC_PAGE_OFFSET);"
],
"header": "@@ -679,14 +679,11 @@ class RAFContainer extends FileContainer implements PrivilegedExceptionAction",
"removed": [
"\t\t\tfile.seek(FIRST_ALLOC_PAGE_OFFSET);",
"\t\t\tepage = getEmbryonicPage(file);",
"",
"\t\tfile.seek(FIRST_ALLOC_PAGE_OFFSET);"
]
},
{
"added": [
" readHeader(getEmbryonicPage(fileData,",
" FIRST_ALLOC_PAGE_OFFSET));"
],
"header": "@@ -1393,8 +1390,8 @@ class RAFContainer extends FileContainer implements PrivilegedExceptionAction",
"removed": [
" fileData.seek(FIRST_ALLOC_PAGE_OFFSET);",
" readHeader(fileData);"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/store/raw/data/RAFContainer4.java",
"hunks": [
{
"added": [],
"header": "@@ -24,9 +24,7 @@ package org.apache.derby.impl.store.raw.data;",
"removed": [
"import org.apache.derby.iapi.services.io.FormatIdUtil;",
"import org.apache.derby.impl.store.raw.data.BaseDataFileFactory;"
]
},
{
"added": [
"import org.apache.derby.io.StorageRandomAccessFile;"
],
"header": "@@ -35,6 +33,7 @@ import java.io.RandomAccessFile;",
"removed": []
},
{
"added": [
" /**",
" * Return the {@code FileChannel} for the specified",
" * {@code StorageRandomAccessFile} if it is a {@code RandomAccessFile}.",
" * Otherwise, return {@code null}.",
" *",
" * @param file the file to get the channel for",
" * @return a {@code FileChannel} if {@code file} is an instance of",
" * {@code RandomAccessFile}, {@code null} otherwise",
" */",
" private FileChannel getChannel(StorageRandomAccessFile file) {",
" if (file instanceof RandomAccessFile) {",
" /** XXX - this cast isn't testing friendly.",
" * A testing class that implements StorageRandomAccessFile but isn't",
" * a RandomAccessFile will be \"worked around\" by this class. An",
" * example of such a class is",
" * functionTests/util/corruptio/CorruptRandomAccessFile.java.",
" * An interface rework may be necessary.",
" */",
" return ((RandomAccessFile) file).getChannel();",
" }",
" return null;",
" }",
"",
" /**",
" * <p>",
" * Return the file channel for the current value of the {@code fileData}",
" * field. If {@code fileData} doesn't support file channels, return",
" * {@code null}.",
" * </p>",
" *",
" * <p>",
" * Callers of this method must synchronize on the container object since",
" * two shared fields ({@code fileData} and {@code ourChannel}) are",
" * accessed.",
" * </p>",
" *",
" * @return a {@code FileChannel} object, if supported, or {@code null}",
" */",
" private FileChannel getChannel() {",
" if (ourChannel == null) {",
" ourChannel = getChannel(fileData);",
" }",
" return ourChannel;",
" }",
""
],
"header": "@@ -84,6 +83,51 @@ class RAFContainer4 extends RAFContainer {",
"removed": []
},
{
"added": [
" SanityManager.ASSERT(fileData == null, \"fileData isn't null\");",
" SanityManager.ASSERT(ourChannel == null, \"ourChannel isn't null\");",
" return super.openContainer(newIdentity);"
],
"header": "@@ -95,21 +139,11 @@ class RAFContainer4 extends RAFContainer {",
"removed": [
" boolean result = super.openContainer(newIdentity);",
" if (result == true && super.fileData != null &&",
" super.fileData instanceof java.io.RandomAccessFile) {",
" /** XXX - this cast isn't testing friendly.",
" * A testing class that implements StorageRandomAccessFile but isn't",
" * a RandomAccessFile will be \"worked around\" by this class. An",
" * example of such a class is",
" * functionTests/util/corruptio/CorruptRandomAccessFile.java.",
" * An interface rework may be necessary.",
" */",
" ourChannel = ((RandomAccessFile)super.fileData).getChannel();",
" }",
" return result;"
]
},
{
"added": [
" SanityManager.ASSERT(fileData == null, \"fileData isn't null\");",
" SanityManager.ASSERT(ourChannel == null, \"ourChannel isn't null\");"
],
"header": "@@ -119,14 +153,10 @@ class RAFContainer4 extends RAFContainer {",
"removed": [
"",
" if (super.fileData != null &&",
" super.fileData instanceof java.io.RandomAccessFile) {",
" // XXX - see \"XXX\" comment above.",
" ourChannel = ((RandomAccessFile) super.fileData).getChannel();",
" }"
]
},
{
"added": [
" ioChannel = getChannel();"
],
"header": "@@ -188,18 +218,10 @@ class RAFContainer4 extends RAFContainer {",
"removed": [
" ioChannel = ourChannel;",
" // If ioChannel == null and fileData supports getChannel()",
" // we have a problem. See this.openContainer(ContainerKey ",
" // newIdentity).",
" SanityManager.ASSERT(! ((ioChannel == null) &&",
" super.fileData instanceof java.io.RandomAccessFile),",
" \"RAFContainer4: New style readPage attempted\" +",
" \" with uninitialized ioChannel\");",
""
]
},
{
"added": [
" synchronized (this) {",
" ioChannel = getChannel();",
""
],
"header": "@@ -272,21 +294,14 @@ class RAFContainer4 extends RAFContainer {",
"removed": [
" synchronized(this) {",
" ioChannel = ourChannel;",
" if (SanityManager.DEBUG) {",
" // If ioChannel == null and fileData supports getChannel()",
" // we have a problem",
" SanityManager.ASSERT(! ((ioChannel == null) &&",
" super.fileData instanceof java.io.RandomAccessFile),",
" \"RAFContainer4: New style writePage attempted \" +",
" \"with uninitialized ioChannel\");",
" }"
]
},
{
"added": [
" /**",
" * Write a sequence of bytes at the given offset in a file.",
" *",
" * @param file the file to write to",
" * @param bytes the bytes to write",
" * @param offset the offset to start writing at",
" * @throws IOException if an I/O error occurs while writing",
" */",
" void writeAtOffset(StorageRandomAccessFile file, byte[] bytes, long offset)",
" throws IOException",
" {",
" FileChannel ioChannel = getChannel(file);",
" if (ioChannel != null) {",
" writeFull(ByteBuffer.wrap(bytes), ioChannel, offset);",
" } else {",
" super.writeAtOffset(file, bytes, offset);",
" }",
" }",
"",
" /**",
" * Read an embryonic page (that is, a section of the first alloc page that",
" * is so large that we know all the borrowed space is included in it) from",
" * the specified offset in a {@code StorageRandomAccessFile}.",
" *",
" * @param file the file to read from",
" * @param offset where to start reading (normally",
" * {@code FileContainer.FIRST_ALLOC_PAGE_OFFSET})",
" * @return a byte array containing the embryonic page",
" * @throws IOException if an I/O error occurs while reading",
" */",
" byte[] getEmbryonicPage(StorageRandomAccessFile file, long offset)",
" throws IOException",
" {",
" FileChannel ioChannel = getChannel(file);",
" if (ioChannel != null) {",
" ByteBuffer buffer =",
" ByteBuffer.allocate(AllocPage.MAX_BORROWED_SPACE);",
" readFull(buffer, ioChannel, offset);",
" return buffer.array();",
" } else {",
" return super.getEmbryonicPage(file, offset);",
" }",
" }"
],
"header": "@@ -398,6 +413,49 @@ class RAFContainer4 extends RAFContainer {",
"removed": []
}
]
}
] |
derby-DERBY-3349-cd8df940
|
DERBY-3349: Nested WHERE EXISTS queries need improved testing
Patch contributed by Thomas Nielsen
Patch file: d3349-3.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@628686 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3350-50a8c626
|
DERBY-3350 Ensure that CAST inherits the nullability of its value expression since a cast of a non-null value is always not null. Changes the nullability of a number of JDBC DatabaseMetaData columns since many are derived from CAST expression in the SQL query that forms the result set.
Updated canons where the nullability changed, since if the column display size was less than four but nullable, ij would increase the display size to four to allow for NULL. Once such columns become not nullable their display size drops to less than four.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@616473 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3354-74bac8e0
|
DERBY-3354: Select from large lob table with embedded gives OutOfMemoryError
Contributed by Anurag.
This patch introduces a new WeakHashMap in EmbedConnection. EmbedBlob and EmbedClob objects references are stored in this
map (objects as key and null as value). Adding entry to locater map is
differed till the first call of getLocater.
This ensures that there is entry of LOB objects in locater map if they are invoked in embedded mode.
As the keys of WeakHashMap doesn't prevents the objects from being
garbage collected, once the lob objects are unreferenced lob objects will
be garbage collected releasing the memory.
During commit/rollback or Connection.close, free is invoked on all the lob
objects from WeakHashMap and the map is cleared.
Modified files
java/engine/org/apache/derby/impl/jdbc/EmbedConnection.java
Added a new attribute lobRefrences of type WeakHashMap.
Added a new method addLOBReference to make an entry in new
hash map.
Modified clearLOBMapping to use lobRefrences to fetch and invoke free on lob objects instead of lobHashMap.
java/engine/org/apache/derby/impl/jdbc/EmbedBlob.java
java/engine/org/apache/derby/impl/jdbc/EmbedClob.java
Modified constructs to call connection.lobRefrences instead of conn.addLOBMapping.
Modified getLocater method to check if the locater value is non zero
before returning and if its zero calling conn.addLOBMapping to make
entry of lob objects and getting locater value.
Calling removeLOBMapping in free method.
Cleanup of temporary file is already being taken care by the finalizer of
LOBStreamControl so I haven't added any new cleanup code for
finalizer.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@644755 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/jdbc/EmbedBlob.java",
"hunks": [
{
"added": [
" private int locator;"
],
"header": "@@ -75,7 +75,7 @@ final class EmbedBlob extends ConnectionChild implements Blob, EngineLOB",
"removed": [
" private final int locator;"
]
},
{
"added": [
" con.addLOBReference (this);"
],
"header": "@@ -117,7 +117,7 @@ final class EmbedBlob extends ConnectionChild implements Blob, EngineLOB",
"removed": [
" locator = con.addLOBMapping (this);"
]
},
{
"added": [
" con.addLOBReference (this);"
],
"header": "@@ -193,7 +193,7 @@ final class EmbedBlob extends ConnectionChild implements Blob, EngineLOB",
"removed": [
" this.locator = con.addLOBMapping (this);"
]
},
{
"added": [
" //remove entry from connection",
" localConn.removeLOBMapping(locator);"
],
"header": "@@ -906,6 +906,8 @@ final class EmbedBlob extends ConnectionChild implements Blob, EngineLOB",
"removed": []
},
{
"added": [
" if (locator == 0) {",
" locator = localConn.addLOBMapping(this);",
" }"
],
"header": "@@ -1000,6 +1002,9 @@ final class EmbedBlob extends ConnectionChild implements Blob, EngineLOB",
"removed": []
}
]
}
] |
derby-DERBY-3354-d96e3980
|
DERBY-3354: Select from large lob table with embedded gives OutOfMemoryError
Contributed by Anurag.
This patch introduces a new WeakHashMap in EmbedConnection. EmbedBlob and EmbedClob objects references are stored in this
map (objects as key and null as value). Adding entry to locater map is
differed till the first call of getLocater.
This ensures that there is entry of LOB objects in locater map if they are invoked in embedded mode.
As the keys of WeakHashMap doesn't prevents the objects from being
garbage collected, once the lob objects are unreferenced lob objects will
be garbage collected releasing the memory.
During commit/rollback or Connection.close, free is invoked on all the lob
objects from WeakHashMap and the map is cleared.
Modified files
java/engine/org/apache/derby/impl/jdbc/EmbedConnection.java
Added a new attribute lobRefrences of type WeakHashMap.
Added a new method addLOBReference to make an entry in new
hash map.
Modified clearLOBMapping to use lobRefrences to fetch and invoke free on lob objects instead of lobHashMap.
java/engine/org/apache/derby/impl/jdbc/EmbedBlob.java
java/engine/org/apache/derby/impl/jdbc/EmbedClob.java
Modified constructs to call connection.lobRefrences instead of conn.addLOBMapping.
Modified getLocater method to check if the locater value is non zero
before returning and if its zero calling conn.addLOBMapping to make
entry of lob objects and getting locater value.
Calling removeLOBMapping in free method.
Cleanup of temporary file is already being taken care by the finalizer of
LOBStreamControl so I haven't added any new cleanup code for
finalizer.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@644764 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/jdbc/EmbedBlob.java",
"hunks": [
{
"added": [
" private int locator;"
],
"header": "@@ -75,7 +75,7 @@ final class EmbedBlob extends ConnectionChild implements Blob, EngineLOB",
"removed": [
" private final int locator;"
]
},
{
"added": [
" con.addLOBReference (this);"
],
"header": "@@ -117,7 +117,7 @@ final class EmbedBlob extends ConnectionChild implements Blob, EngineLOB",
"removed": [
" locator = con.addLOBMapping (this);"
]
},
{
"added": [
" con.addLOBReference (this);"
],
"header": "@@ -193,7 +193,7 @@ final class EmbedBlob extends ConnectionChild implements Blob, EngineLOB",
"removed": [
" this.locator = con.addLOBMapping (this);"
]
},
{
"added": [
" //remove entry from connection",
" localConn.removeLOBMapping(locator);"
],
"header": "@@ -906,6 +906,8 @@ final class EmbedBlob extends ConnectionChild implements Blob, EngineLOB",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/jdbc/EmbedClob.java",
"hunks": [
{
"added": [
" private int locator;"
],
"header": "@@ -79,7 +79,7 @@ final class EmbedClob extends ConnectionChild implements Clob, EngineLOB",
"removed": [
" private final int locator;"
]
},
{
"added": [
" con.addLOBReference (this);"
],
"header": "@@ -91,7 +91,7 @@ final class EmbedClob extends ConnectionChild implements Clob, EngineLOB",
"removed": [
" this.locator = con.addLOBMapping (this);"
]
},
{
"added": [
" con.addLOBReference (this);"
],
"header": "@@ -156,7 +156,7 @@ final class EmbedClob extends ConnectionChild implements Clob, EngineLOB",
"removed": [
" this.locator = con.addLOBMapping (this);"
]
},
{
"added": [
" localConn.removeLOBMapping(locator);"
],
"header": "@@ -663,6 +663,7 @@ restartScan:",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/jdbc/EmbedConnection.java",
"hunks": [
{
"added": [
"import java.util.Map;",
"import java.util.WeakHashMap;"
],
"header": "@@ -65,6 +65,8 @@ import java.sql.SQLException;",
"removed": []
},
{
"added": [
" /**",
" * Map to keep track of all the lobs associated with this",
" * connection. These lobs will be cleared after the transaction",
" * is no longer valid or when connection is closed",
" */",
" private WeakHashMap lobReferences = null;",
""
],
"header": "@@ -131,6 +133,13 @@ public abstract class EmbedConnection implements EngineConnection",
"removed": []
},
{
"added": [
"\t\tMap map = rootConnection.lobReferences;",
" Iterator it = map.keySet ().iterator ();",
" if (rootConnection.lobHashMap != null) {",
" rootConnection.lobHashMap.clear ();",
" }"
],
"header": "@@ -2901,14 +2910,17 @@ public abstract class EmbedConnection implements EngineConnection",
"removed": [
"\t\tHashMap map = rootConnection.lobHashMap;",
" Iterator it = map.values().iterator();"
]
},
{
"added": [
" /**",
" * Adds an entry of the lob in WeakHashMap. These entries are used",
" * for cleanup during commit/rollback or close.",
" * @param lobReference LOB Object",
" */",
" void addLOBReference (Object lobReference) {",
" if (rootConnection.lobReferences == null) {",
" rootConnection.lobReferences = new WeakHashMap ();",
" }",
" rootConnection.lobReferences.put (lobReference, null);",
" }",
""
],
"header": "@@ -2939,6 +2951,18 @@ public abstract class EmbedConnection implements EngineConnection",
"removed": []
}
]
}
] |
derby-DERBY-3356-334e9a30
|
DERBY-3361: Make the startSlave connection wait until slave replication mode is confirmed started or until an error occurs.
In addition, this patch adds host and port information to a derby.log message and a few exceptions for easier debugging. DERBY-3356 is also fixed by this patch.
Contributed by Jorgen Loland
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@618944 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/db/SlaveDatabase.java",
"hunks": [
{
"added": [],
"header": "@@ -31,7 +31,6 @@ import org.apache.derby.iapi.services.context.ContextService;",
"removed": [
"import org.apache.derby.impl.services.monitor.UpdateServiceProperties;"
]
},
{
"added": [
"",
" /** True until this database has been successfully booted. Any",
" * exception that occurs while inBoot is true will be handed to",
" * the client thread booting this database. */",
" private volatile boolean inBoot;",
"",
" /** Set by the database boot thread if it fails before slave mode",
" * has been started properly (i.e., if inBoot is true). This",
" * exception will then be reported to the client connection. */",
" private volatile StandardException bootException;"
],
"header": "@@ -74,6 +73,16 @@ public class SlaveDatabase extends BasicDatabase {",
"removed": []
},
{
"added": [
" inBoot = true;"
],
"header": "@@ -109,6 +118,7 @@ public class SlaveDatabase extends BasicDatabase {",
"removed": []
},
{
"added": [
" // Check that the database was booted successfully, or throw",
" // the exception that caused the boot to fail.",
" verifySuccessfulBoot();",
" inBoot = false;"
],
"header": "@@ -118,21 +128,10 @@ public class SlaveDatabase extends BasicDatabase {",
"removed": [
" try {",
" // We cannot claim to be booted until the storage factory",
" // has been set in the startParams because",
" // TopService.bootModule (the caller of this method) uses",
" // the storage factory object. The storage factory is set",
" // in RawStore.boot, and we have to wait for this to",
" // happen.",
" UpdateServiceProperties usp =",
" (UpdateServiceProperties) startParams;",
" while (usp.getStorageFactory() == null){",
" Thread.sleep(500);",
" }",
" } catch (Exception e) {",
" //Todo: report exception to derby.log",
" }"
]
},
{
"added": [],
"header": "@@ -168,10 +167,6 @@ public class SlaveDatabase extends BasicDatabase {",
"removed": [
" public void setSlaveFactory(SlaveFactory f) {",
" slaveFac = f;",
" }",
""
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/services/replication/net/ReplicationMessageReceive.java",
"hunks": [
{
"added": [
""
],
"header": "@@ -1,3 +1,4 @@",
"removed": []
},
{
"added": [
"import org.apache.derby.iapi.reference.MessageId;",
"import org.apache.derby.iapi.services.monitor.Monitor;"
],
"header": "@@ -30,7 +31,9 @@ import java.security.PrivilegedActionException;",
"removed": []
},
{
"added": [
" * @param dbname the name of the database",
" public ReplicationMessageReceive(String hostName, int portNumber, ",
" String dbname)",
" Monitor.logTextMessage(MessageId.REPLICATION_SLAVE_NETWORK_LISTEN, ",
" dbname, getHostName(), ",
" String.valueOf(getPort()));",
" // cannot use getPort because SlaveAddress creator threw",
" // exception and has therefore not been initialized",
" String port;",
" if (portNumber > 0) {",
" port = String.valueOf(portNumber);",
" } else {",
" port = String.valueOf(SlaveAddress.DEFAULT_PORT_NO);",
" }",
" (SQLState.REPLICATION_CONNECTION_EXCEPTION, uhe, ",
" dbname, hostName, port);"
],
"header": "@@ -64,17 +67,31 @@ public class ReplicationMessageReceive {",
"removed": [
" public ReplicationMessageReceive(String hostName, int portNumber)",
" (SQLState.REPLICATION_CONNECTION_EXCEPTION, uhe);"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/services/replication/slave/SlaveController.java",
"hunks": [
{
"added": [],
"header": "@@ -25,7 +25,6 @@ package org.apache.derby.impl.services.replication.slave;",
"removed": [
"import org.apache.derby.iapi.reference.Property;"
]
},
{
"added": [
" /** Whether or not this SlaveController has been successfully",
" * started, including setting up a connection with the master and",
" * starting the log receiver thread. The client connection that",
" * initiated slave replication mode on this database will not",
" * report that slave mode was successfully started (i.e., it will",
" * hang) until startupSuccessful has been set to true */",
" private volatile boolean startupSuccessful = false;",
""
],
"header": "@@ -89,6 +88,14 @@ public class SlaveController",
"removed": []
},
{
"added": [],
"header": "@@ -192,10 +199,6 @@ public class SlaveController",
"removed": [
" slaveDb = (SlaveDatabase)",
" Monitor.findService(Property.DATABASE_MODULE, dbname);",
" slaveDb.setSlaveFactory(this);",
""
]
},
{
"added": [
" receiver = new ReplicationMessageReceive(slavehost, slaveport, dbname);"
],
"header": "@@ -213,7 +216,7 @@ public class SlaveController",
"removed": [
" receiver = new ReplicationMessageReceive(slavehost, slaveport);"
]
},
{
"added": [
" startupSuccessful = true;"
],
"header": "@@ -230,6 +233,7 @@ public class SlaveController",
"removed": []
},
{
"added": [
" if (receiver != null) {",
" receiver.tearDown(); ",
" }"
],
"header": "@@ -251,7 +255,9 @@ public class SlaveController",
"removed": [
" receiver.tearDown(); "
]
},
{
"added": [
" /**",
" * @see SlaveFactory#isStarted",
" */",
" public boolean isStarted() {",
" return startupSuccessful;",
" }",
""
],
"header": "@@ -306,6 +312,13 @@ public class SlaveController",
"removed": []
},
{
"added": [
" (SQLState.REPLICATION_CONNECTION_EXCEPTION, e,",
" dbname, slavehost, String.valueOf(receiver.getPort()));"
],
"header": "@@ -339,7 +352,8 @@ public class SlaveController",
"removed": [
" (SQLState.REPLICATION_CONNECTION_EXCEPTION, e, dbname);"
]
},
{
"added": [
" try {",
" stopSlave();",
" } catch (StandardException se) {",
" ReplicationLogger.",
" logError(MessageId.REPLICATION_FATAL_ERROR, se, dbname);",
" }"
],
"header": "@@ -424,7 +438,12 @@ public class SlaveController",
"removed": [
" // todo: rawStoreFactory.stopReplicationSlave();"
]
}
]
},
{
"file": "java/shared/org/apache/derby/shared/common/reference/MessageId.java",
"hunks": [
{
"added": [
" String REPLICATION_SLAVE_NETWORK_LISTEN = \"R011\"; "
],
"header": "@@ -183,6 +183,7 @@ public interface MessageId {",
"removed": []
}
]
}
] |
derby-DERBY-3357-0c7de0fe
|
DERBY-3357 ; improving run time of test DataSourceTest by only creating
database objects once and by deciding in suite methods whether to run with
network server and/or client, rather than within the fixtures.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@619667 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3357-5b20343a
|
DERBY-3357: Reduce runtime of jdbcapi.DataSourceTest. Reduced lock timeouts.
Patch file: derby-3357-1a-lower_lock_timeout.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@617486 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3357-decb600e
|
DERBY-3357; fixing up a few minor mistakes in DataSourceTest - forgot to change
comments referring to renamed (to baseSuite) method getRunTwiceSuite, failed
to add testXAHoldability, and I had tested testAutoCommitOnXAResourceStart
against both client and server but forgot to remove it from the client suite.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@619722 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3358-46ed45c9
|
DERBY-3358: Do not cache Throwable in printStackAndStopMaster
Contributed by V Narayanan.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@634694 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/services/replication/master/MasterController.java",
"hunks": [
{
"added": [
" * @param e the exception that needs to be handled.",
" private void printStackAndStopMaster(Exception e) {",
" repLogger.logError(MessageId.REPLICATION_LOGSHIPPER_EXCEPTION, e);",
" } catch (StandardException se) {",
" logError(MessageId.REPLICATION_MASTER_STOPPED, se);"
],
"header": "@@ -538,17 +538,17 @@ public class MasterController",
"removed": [
" * @param t the throwable that needs to be handled.",
" private void printStackAndStopMaster(Throwable t) {",
" repLogger.logError(MessageId.REPLICATION_LOGSHIPPER_EXCEPTION, t);",
" } catch (Throwable t_stopmaster) {",
" logError(MessageId.REPLICATION_MASTER_STOPPED, t);"
]
}
]
}
] |
derby-DERBY-3358-fc4e76b6
|
DERBY-3358: After an incorrect(unsuccesfull) startMaster comand, further correct startMaster attempts also fail.
Contributed by V Narayanan.
M java/engine/org/apache/derby/impl/services/replication/master/MasterController.java
* Added a variable boolean that indicates whether the master controller is currently running or has
been stopped. I thought I could manage with the earlier boolean variable stopMasterController but
decided against it because it seemed more tied with the stopMaster operation.
* moved start up parameter initialization from boot to startMaster method.
* changed the startMaster, stopMaster and the startFailover methods to take into account the boolean
active.
* replaced the earlier uses of stopMasterController with active.
M java/engine/org/apache/derby/impl/store/raw/RawStore.java
* The replication properties that are part of the bootServiceModule call no longer contain the
startup parameters. These have instead been moved to the startMaster method.
* The replicationProps are empty and not null because a null passed to a bootServiceModule call
results in a NullPointerException.
M java/engine/org/apache/derby/iapi/services/replication/master/MasterFactory.java
* Changed the startMaster declaration to now accept the startup parameters as the method arguments.
M java/engine/org/apache/derby/loc/messages.xml
M java/shared/org/apache/derby/shared/common/reference/SQLState.java
* Added a new messages for the case when the master has already been booted.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@634218 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/services/replication/master/MasterFactory.java",
"hunks": [
{
"added": [],
"header": "@@ -73,19 +73,7 @@ public interface MasterFactory {",
"removed": [
"",
" /** Property key used to specify which slave host to connect to */",
" public static final String SLAVE_HOST =",
" Property.PROPERTY_RUNTIME_PREFIX + \"replication.master.slavehost\";",
"",
" /** Property key to specify which slave port to connect to */",
" public static final String SLAVE_PORT =",
" Property.PROPERTY_RUNTIME_PREFIX + \"replication.master.slaveport\";",
" /** Property key to specify the name of the database */",
" public static final String MASTER_DB =",
" Property.PROPERTY_RUNTIME_PREFIX + \"replication.master.dbname\";",
""
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/services/replication/master/MasterController.java",
"hunks": [
{
"added": [
" //Indicates whether the Master Controller is currently",
" //active",
" private boolean active = false;"
],
"header": "@@ -76,8 +76,9 @@ public class MasterController",
"removed": [
" //Set to true when stopMaster is called",
" private boolean stopMasterController = false;"
]
},
{
"added": [
" * Used by Monitor.bootServiceModule to start the service. Currently",
" * only used to set up the replication mode.",
" * correct mode. Currently initializes only the",
" * replicationMode property.",
" //The boot method is loaded only once, because of that the",
" //boot time parameters once wrong would result in repeated",
" //startMaster attempts failing. In order to allow for",
" //multiple start master attempts the slave host name, port",
" //number and the dbname have been moved to the startMaster",
" //method."
],
"header": "@@ -95,33 +96,26 @@ public class MasterController",
"removed": [
" * Used by Monitor.bootServiceModule to start the service. Will:",
" *",
" * Set up basic variables",
" * Connect to the slave using the network service (DERBY-2921)",
" *",
" * Not implemented yet",
" * correct mode",
"",
"",
" slavehost = properties.getProperty(MasterFactory.SLAVE_HOST);",
"",
" String port = properties.getProperty(MasterFactory.SLAVE_PORT);",
" if (port != null) {",
" slaveport = new Integer(port).intValue();",
" }",
"",
" dbname = properties.getProperty(MasterFactory.MASTER_DB);"
]
},
{
"added": [
" * @param slavehost The hostname of the slave",
" * @param slaveport The port the slave is listening on",
" * @param dbname The master database that is being replicated.",
" * 1) thrown on replication startup error",
" * 2) thrown if the master has already been",
" * booted.",
" * 3) thrown if the specified replication mode",
" * is not supported.",
" DataFactory dataFac,",
" LogFactory logFac,",
" String slavehost,",
" int slaveport,",
" String dbname)",
" if (active) {",
" //It is wrong to attempt startMaster on a already",
" //started master.",
" throw StandardException.newException",
" (SQLState.REPLICATION_MASTER_ALREADY_BOOTED, dbname);",
" }",
"",
" this.slavehost = slavehost;",
" this.slaveport = new Integer(slaveport).intValue();",
" this.dbname = dbname;",
""
],
"header": "@@ -171,13 +165,34 @@ public class MasterController",
"removed": [
" * thrown on replication startup error. ",
" DataFactory dataFac, LogFactory logFac) ",
" stopMasterController = false;"
]
},
{
"added": [
" //The master has been started successfully.",
" active = true;",
""
],
"header": "@@ -206,6 +221,9 @@ public class MasterController",
"removed": []
},
{
"added": [
" * Will perform all work that is needed to shut down replication.",
" *",
" * @throws StandardException If the replication master has been stopped",
" * already.",
" public void stopMaster() throws StandardException {",
" if (!active) {",
" throw StandardException.newException",
" (SQLState.REPLICATION_NOT_IN_MASTER_MODE);",
" }",
" active = false;"
],
"header": "@@ -215,10 +233,17 @@ public class MasterController",
"removed": [
" * Will perform all work that is needed to shut down replication",
" public void stopMaster() {",
" stopMasterController = true;"
]
},
{
"added": [
" if (!active) {",
" //It is not correct to stop the master and then attempt a failover.",
" //The control would come here because the master module is already",
" //loaded and a findService for the master module will not fail. But",
" //since this module has been stopped failover does not suceed.",
" throw StandardException.newException",
" (SQLState.REPLICATION_NOT_IN_MASTER_MODE);",
" }",
"",
" //A failover stops the master controller and shuts down",
" //the master database.",
" active = false;"
],
"header": "@@ -240,11 +265,22 @@ public class MasterController",
"removed": [
" stopMasterController = true;"
]
},
{
"added": [
" while (active) {"
],
"header": "@@ -431,7 +467,7 @@ public class MasterController",
"removed": [
" while (!stopMasterController) {"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/store/raw/RawStore.java",
"hunks": [
{
"added": [],
"header": "@@ -487,10 +487,6 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup",
"removed": [
" replicationProps.setProperty(MasterFactory.MASTER_DB, dbmaster);",
" replicationProps.setProperty(MasterFactory.SLAVE_HOST, host);",
" replicationProps.setProperty(MasterFactory.SLAVE_PORT,",
" new Integer(port).toString());"
]
}
]
},
{
"file": "java/shared/org/apache/derby/shared/common/reference/SQLState.java",
"hunks": [
{
"added": [
" String REPLICATION_MASTER_ALREADY_BOOTED = \"XRE22\";"
],
"header": "@@ -1785,6 +1785,7 @@ public interface SQLState {",
"removed": []
}
]
}
] |
derby-DERBY-336-75fb1cf8
|
Submitting Dyre Tjeldvoll's patch for DERBY-336
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@230900 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/error/StandardException.java",
"hunks": [
{
"added": [
" SanityManager.ASSERT(messageID != null,",
" \"StandardException with no messageID\");"
],
"header": "@@ -89,8 +89,8 @@ public class StandardException extends Exception",
"removed": [
"\t\t\tSanityManager.ASSERT(messageID != null,",
"\t\t\t\t\t\"StandardException with no messageID\");"
]
},
{
"added": [
"",
" /**",
" * Dummy exception to catch incorrect use of",
" * StandardException.newException(), at compile-time. If you get a",
" * compilation error because this exception isn't caught, it means",
" * that you are using StandardException.newException(...)",
" * incorrectly. The nested exception should always be the second",
" * argument.",
" * @see StandardException#newException(String, Object, Throwable)",
" * @see StandardException#newException(String, Object, Object, Throwable)",
" */",
" public static class BadMessageArgumentException extends Throwable {}",
"",
" /**",
" * Dummy overload which should never be called. Only used to",
" * detect incorrect usage, at compile time.",
" * @param messageID - the sql state id of the message",
" * @param a1 - Message arg",
" * @param t - Incorrectly placed exception to be nested",
" * @return nothing - always throws",
" * @throws BadMessageArgumentException - always (dummy)",
" */",
" public static StandardException newException(String messageID, ",
" Object a1, ",
" Throwable t) ",
" throws BadMessageArgumentException {",
" throw new BadMessageArgumentException();",
" }",
""
],
"header": "@@ -321,6 +321,35 @@ public class StandardException extends Exception",
"removed": []
},
{
"added": [
" ",
" /**",
" * Dummy overload which should never be called. Only used to",
" * detect incorrect usage, at compile time.",
" * @param messageID - the sql state id of the message",
" * @param a1 - First message arg",
" * @param a2 - Second message arg",
" * @param t - Incorrectly placed exception to be nested",
" * @return nothing - always throws",
" * @throws BadMessageArgumentException - always (dummy)",
" */",
" public static StandardException newException(String messageID, ",
" Object a1, ",
" Object a2,",
" Throwable t) ",
" throws BadMessageArgumentException {",
" throw new BadMessageArgumentException(); ",
" }",
""
],
"header": "@@ -332,6 +361,25 @@ public class StandardException extends Exception",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/services/monitor/PersistentServiceImpl.java",
"hunks": [
{
"added": [
" throw StandardException.newException(SQLState.SERVICE_DIRECTORY_CREATE_ERROR, serviceDirectory);"
],
"header": "@@ -673,8 +673,7 @@ final class PersistentServiceImpl implements PersistentService",
"removed": [
" throw StandardException.newException(SQLState.SERVICE_DIRECTORY_CREATE_ERROR,",
" serviceDirectory, null);"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/store/raw/RawStore.java",
"hunks": [
{
"added": [
" renameFailed = true;",
" throw StandardException.",
" newException(SQLState.RAWSTORE_ERROR_RENAMING_FILE,",
" backupcopy, oldbackup);"
],
"header": "@@ -541,10 +541,10 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup",
"removed": [
"\t\t\t\t\trenameFailed = true;",
"\t\t\t\t\tthrow StandardException.newException(",
" SQLState.RAWSTORE_ERROR_RENAMING_FILE,",
" backupcopy, oldbackup, (Throwable)null);"
]
},
{
"added": [
" throw StandardException.",
" newException(SQLState.RAWSTORE_ERROR_COPYING_FILE,",
" dbase, backupcopy);"
],
"header": "@@ -566,9 +566,9 @@ public final class RawStore implements RawStoreFactory, ModuleControl, ModuleSup",
"removed": [
"\t\t\t\tthrow StandardException.newException(",
" SQLState.RAWSTORE_ERROR_COPYING_FILE,",
"\t\t\t\t\tdbase, backupcopy, (Throwable)null);"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/store/raw/data/RAFContainer.java",
"hunks": [
{
"added": [
" throw StandardException.newException( SQLState.FILE_CREATE_NO_CLEANUP, ioe, file, se.toString());",
" throw StandardException.newException( SQLState.FILE_CREATE_NO_CLEANUP, ioe, file, ioe.toString());"
],
"header": "@@ -909,11 +909,11 @@ class RAFContainer extends FileContainer implements PrivilegedExceptionAction",
"removed": [
" throw StandardException.newException( SQLState.FILE_CREATE_NO_CLEANUP, ioe, file, se);",
" throw StandardException.newException( SQLState.FILE_CREATE_NO_CLEANUP, ioe, file, null);"
]
},
{
"added": [
" throw dataFactory.",
" markCorrupt(StandardException.",
" newException(SQLState.",
" FILE_CONTAINER_EXCEPTION, ",
" ioe, this));"
],
"header": "@@ -973,9 +973,11 @@ class RAFContainer extends FileContainer implements PrivilegedExceptionAction",
"removed": [
" throw dataFactory.markCorrupt(",
" StandardException.newException(",
" SQLState.FILE_CONTAINER_EXCEPTION, ioe, this, ioe));"
]
},
{
"added": [
" throw dataFactory.",
" markCorrupt(StandardException.",
" newException(SQLState.",
" FILE_CONTAINER_EXCEPTION,",
" ioe2, this));",
" throw dataFactory.",
" markCorrupt(StandardException.",
" newException(SQLState.",
" FILE_CONTAINER_EXCEPTION,",
" ioe, this));"
],
"header": "@@ -995,18 +997,22 @@ class RAFContainer extends FileContainer implements PrivilegedExceptionAction",
"removed": [
" throw dataFactory.markCorrupt(",
" StandardException.newException(",
" SQLState.FILE_CONTAINER_EXCEPTION, ioe2, this, ioe2));",
" throw dataFactory.markCorrupt(",
" StandardException.newException(",
" SQLState.FILE_CONTAINER_EXCEPTION, ioe, this, ioe));"
]
},
{
"added": [
" throw StandardException.",
" newException(SQLState.FILE_CANNOT_REMOVE_FILE, se, file, ",
" se.toString());"
],
"header": "@@ -1066,8 +1072,9 @@ class RAFContainer extends FileContainer implements PrivilegedExceptionAction",
"removed": [
" throw StandardException.newException(",
" SQLState.FILE_CANNOT_REMOVE_FILE, se, file, se);"
]
}
]
}
] |
derby-DERBY-3360-0a6ce0bc
|
DERBY-3360 Invalid method java.lang.Integer >> void <init>(short) because java.lang.NoSuchMethodException: java.lang.Integer.<init>(short)
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@679815 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3361-334e9a30
|
DERBY-3361: Make the startSlave connection wait until slave replication mode is confirmed started or until an error occurs.
In addition, this patch adds host and port information to a derby.log message and a few exceptions for easier debugging. DERBY-3356 is also fixed by this patch.
Contributed by Jorgen Loland
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@618944 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/db/SlaveDatabase.java",
"hunks": [
{
"added": [],
"header": "@@ -31,7 +31,6 @@ import org.apache.derby.iapi.services.context.ContextService;",
"removed": [
"import org.apache.derby.impl.services.monitor.UpdateServiceProperties;"
]
},
{
"added": [
"",
" /** True until this database has been successfully booted. Any",
" * exception that occurs while inBoot is true will be handed to",
" * the client thread booting this database. */",
" private volatile boolean inBoot;",
"",
" /** Set by the database boot thread if it fails before slave mode",
" * has been started properly (i.e., if inBoot is true). This",
" * exception will then be reported to the client connection. */",
" private volatile StandardException bootException;"
],
"header": "@@ -74,6 +73,16 @@ public class SlaveDatabase extends BasicDatabase {",
"removed": []
},
{
"added": [
" inBoot = true;"
],
"header": "@@ -109,6 +118,7 @@ public class SlaveDatabase extends BasicDatabase {",
"removed": []
},
{
"added": [
" // Check that the database was booted successfully, or throw",
" // the exception that caused the boot to fail.",
" verifySuccessfulBoot();",
" inBoot = false;"
],
"header": "@@ -118,21 +128,10 @@ public class SlaveDatabase extends BasicDatabase {",
"removed": [
" try {",
" // We cannot claim to be booted until the storage factory",
" // has been set in the startParams because",
" // TopService.bootModule (the caller of this method) uses",
" // the storage factory object. The storage factory is set",
" // in RawStore.boot, and we have to wait for this to",
" // happen.",
" UpdateServiceProperties usp =",
" (UpdateServiceProperties) startParams;",
" while (usp.getStorageFactory() == null){",
" Thread.sleep(500);",
" }",
" } catch (Exception e) {",
" //Todo: report exception to derby.log",
" }"
]
},
{
"added": [],
"header": "@@ -168,10 +167,6 @@ public class SlaveDatabase extends BasicDatabase {",
"removed": [
" public void setSlaveFactory(SlaveFactory f) {",
" slaveFac = f;",
" }",
""
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/services/replication/net/ReplicationMessageReceive.java",
"hunks": [
{
"added": [
""
],
"header": "@@ -1,3 +1,4 @@",
"removed": []
},
{
"added": [
"import org.apache.derby.iapi.reference.MessageId;",
"import org.apache.derby.iapi.services.monitor.Monitor;"
],
"header": "@@ -30,7 +31,9 @@ import java.security.PrivilegedActionException;",
"removed": []
},
{
"added": [
" * @param dbname the name of the database",
" public ReplicationMessageReceive(String hostName, int portNumber, ",
" String dbname)",
" Monitor.logTextMessage(MessageId.REPLICATION_SLAVE_NETWORK_LISTEN, ",
" dbname, getHostName(), ",
" String.valueOf(getPort()));",
" // cannot use getPort because SlaveAddress creator threw",
" // exception and has therefore not been initialized",
" String port;",
" if (portNumber > 0) {",
" port = String.valueOf(portNumber);",
" } else {",
" port = String.valueOf(SlaveAddress.DEFAULT_PORT_NO);",
" }",
" (SQLState.REPLICATION_CONNECTION_EXCEPTION, uhe, ",
" dbname, hostName, port);"
],
"header": "@@ -64,17 +67,31 @@ public class ReplicationMessageReceive {",
"removed": [
" public ReplicationMessageReceive(String hostName, int portNumber)",
" (SQLState.REPLICATION_CONNECTION_EXCEPTION, uhe);"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/services/replication/slave/SlaveController.java",
"hunks": [
{
"added": [],
"header": "@@ -25,7 +25,6 @@ package org.apache.derby.impl.services.replication.slave;",
"removed": [
"import org.apache.derby.iapi.reference.Property;"
]
},
{
"added": [
" /** Whether or not this SlaveController has been successfully",
" * started, including setting up a connection with the master and",
" * starting the log receiver thread. The client connection that",
" * initiated slave replication mode on this database will not",
" * report that slave mode was successfully started (i.e., it will",
" * hang) until startupSuccessful has been set to true */",
" private volatile boolean startupSuccessful = false;",
""
],
"header": "@@ -89,6 +88,14 @@ public class SlaveController",
"removed": []
},
{
"added": [],
"header": "@@ -192,10 +199,6 @@ public class SlaveController",
"removed": [
" slaveDb = (SlaveDatabase)",
" Monitor.findService(Property.DATABASE_MODULE, dbname);",
" slaveDb.setSlaveFactory(this);",
""
]
},
{
"added": [
" receiver = new ReplicationMessageReceive(slavehost, slaveport, dbname);"
],
"header": "@@ -213,7 +216,7 @@ public class SlaveController",
"removed": [
" receiver = new ReplicationMessageReceive(slavehost, slaveport);"
]
},
{
"added": [
" startupSuccessful = true;"
],
"header": "@@ -230,6 +233,7 @@ public class SlaveController",
"removed": []
},
{
"added": [
" if (receiver != null) {",
" receiver.tearDown(); ",
" }"
],
"header": "@@ -251,7 +255,9 @@ public class SlaveController",
"removed": [
" receiver.tearDown(); "
]
},
{
"added": [
" /**",
" * @see SlaveFactory#isStarted",
" */",
" public boolean isStarted() {",
" return startupSuccessful;",
" }",
""
],
"header": "@@ -306,6 +312,13 @@ public class SlaveController",
"removed": []
},
{
"added": [
" (SQLState.REPLICATION_CONNECTION_EXCEPTION, e,",
" dbname, slavehost, String.valueOf(receiver.getPort()));"
],
"header": "@@ -339,7 +352,8 @@ public class SlaveController",
"removed": [
" (SQLState.REPLICATION_CONNECTION_EXCEPTION, e, dbname);"
]
},
{
"added": [
" try {",
" stopSlave();",
" } catch (StandardException se) {",
" ReplicationLogger.",
" logError(MessageId.REPLICATION_FATAL_ERROR, se, dbname);",
" }"
],
"header": "@@ -424,7 +438,12 @@ public class SlaveController",
"removed": [
" // todo: rawStoreFactory.stopReplicationSlave();"
]
}
]
},
{
"file": "java/shared/org/apache/derby/shared/common/reference/MessageId.java",
"hunks": [
{
"added": [
" String REPLICATION_SLAVE_NETWORK_LISTEN = \"R011\"; "
],
"header": "@@ -183,6 +183,7 @@ public interface MessageId {",
"removed": []
}
]
}
] |
derby-DERBY-3362-dd2ce24a
|
DERBY-3362
Changed code to use right interface to get the control row and return null
if either page does not exist or if the background thread would have had to
wait for the page. In either case post commit just skips processing.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@634101 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/store/access/btree/BTreePostCommit.java",
"hunks": [
{
"added": [
" LeafControlRow leaf = null;",
" // The following can fail, returning null, either if it can't get",
" // the latch or somehow the page requested no longer exists. In ",
" // either case the post commit work will just skip it.",
" leaf = (LeafControlRow) ",
" ControlRow.getNoWait(open_btree, page_number);",
" if (leaf == null)"
],
"header": "@@ -454,16 +454,18 @@ class BTreePostCommit implements Serviceable",
"removed": [
" ControlRow controlRow = null; ",
"",
" if ((controlRow = ControlRow.get(open_btree, page_number)) == null)",
" LeafControlRow leaf = (LeafControlRow) controlRow;",
""
]
},
{
"added": [
" if (leaf != null)",
" leaf.release();"
],
"header": "@@ -517,8 +519,8 @@ class BTreePostCommit implements Serviceable",
"removed": [
" if (controlRow != null)",
" controlRow.release();"
]
}
]
}
] |
derby-DERBY-3364-5d5aab5f
|
DERBY-3364: Replication failover implementation must be modified to fail at the master after slave has been stopped
Contributed by V Narayanan
Changes:
M java/engine/org/apache/derby/impl/services/replication/net/ReplicationMessageTransmit.java
* Set a timeout on the socket that is translated as a timeout on the
reads on the I/P streams
M java/engine/org/apache/derby/impl/services/replication/master/MasterController.java
* Stop log shipment and tear down network both with successful and
non-successful failover.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@631534 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/services/replication/master/MasterController.java",
"hunks": [
{
"added": [
" //The socket connection that is obtained needs to be torn down.",
" teardownNetwork();"
],
"header": "@@ -274,9 +274,10 @@ public class MasterController",
"removed": [
" logShipper.stopLogShipment();"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/services/replication/net/ReplicationMessageTransmit.java",
"hunks": [
{
"added": [
" //The reads on the InputStreams obtained from the socket on the",
" //transmitter should not hang indefinitely. Use the timeout",
" //used for the connection establishment here to ensure that the",
" //reads timeout after the timeout period mentioned for the",
" //connection.",
" s.setSoTimeout(timeout_);",
" "
],
"header": "@@ -119,6 +119,13 @@ public class ReplicationMessageTransmit {",
"removed": []
}
]
}
] |
derby-DERBY-3365-456b8a1c
|
DERBY-3365 Network Server stores a duplicate entry in the lob hash map for every lob
Change network server to use existing lob hash map entry instead of creating a second entry.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@617186 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/drda/org/apache/derby/impl/drda/DRDAConnThread.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.jdbc.EngineBlob;",
"import org.apache.derby.iapi.jdbc.EngineClob;"
],
"header": "@@ -62,6 +62,8 @@ import org.apache.derby.iapi.services.sanity.SanityManager;",
"removed": []
},
{
"added": [
" * "
],
"header": "@@ -7381,6 +7383,7 @@ class DRDAConnThread extends Thread {",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/jdbc/EmbedBlob.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.jdbc.EngineBlob;"
],
"header": "@@ -24,13 +24,11 @@ package org.apache.derby.impl.jdbc;",
"removed": [
"import org.apache.derby.impl.jdbc.ConnectionChild;",
"import org.apache.derby.impl.jdbc.EmbedConnection;",
"import org.apache.derby.impl.jdbc.Util;"
]
},
{
"added": [
"final class EmbedBlob extends ConnectionChild implements Blob, EngineBlob",
" // locator key for lob. used by Network Server.",
" private final int locator;",
" "
],
"header": "@@ -70,12 +68,15 @@ import java.io.IOException;",
"removed": [
"final class EmbedBlob extends ConnectionChild implements Blob"
]
},
{
"added": [
" locator = con.addLOBMapping (this);"
],
"header": "@@ -116,7 +117,7 @@ final class EmbedBlob extends ConnectionChild implements Blob",
"removed": [
" con.addLOBMapping (this);"
]
},
{
"added": [
" this.locator = con.addLOBMapping (this);"
],
"header": "@@ -192,7 +193,7 @@ final class EmbedBlob extends ConnectionChild implements Blob",
"removed": [
" con.addLOBMapping (this);"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/jdbc/EmbedClob.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.jdbc.EngineClob;"
],
"header": "@@ -24,6 +24,7 @@ package org.apache.derby.impl.jdbc;",
"removed": []
},
{
"added": [
"final class EmbedClob extends ConnectionChild implements Clob, EngineClob"
],
"header": "@@ -66,7 +67,7 @@ import java.sql.Clob;",
"removed": [
"final class EmbedClob extends ConnectionChild implements Clob"
]
},
{
"added": [
" private final int locator;",
" "
],
"header": "@@ -78,6 +79,8 @@ final class EmbedClob extends ConnectionChild implements Clob",
"removed": []
},
{
"added": [
" this.locator = con.addLOBMapping (this);"
],
"header": "@@ -88,7 +91,7 @@ final class EmbedClob extends ConnectionChild implements Clob",
"removed": [
" con.addLOBMapping (this);"
]
},
{
"added": [
" this.locator = con.addLOBMapping (this);"
],
"header": "@@ -153,7 +156,7 @@ final class EmbedClob extends ConnectionChild implements Clob",
"removed": [
" con.addLOBMapping (this);"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/jdbc/LOBStoredProcedure.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.jdbc.EngineBlob;",
"import org.apache.derby.iapi.jdbc.EngineClob;"
],
"header": "@@ -26,6 +26,8 @@ import java.sql.Clob;",
"removed": []
},
{
"added": [
" EngineClob clob = (EngineClob) getEmbedConnection().createClob();",
" return clob.getLocator();"
],
"header": "@@ -41,8 +43,8 @@ public class LOBStoredProcedure {",
"removed": [
" Clob clob = getEmbedConnection().createClob();",
" return getEmbedConnection().addLOBMapping(clob);"
]
}
]
}
] |
derby-DERBY-337-1fa1fd04
|
DERBY-337: Enhance dblook to support SQL functions.
1 - Renames "impl/tools/dblook/DB_StoredProcedure.java" to "impl/tools/dblook/DB_Alias.java" because that file now handles stored procedures AND functions AND synonyms, all of which are based on the SYSALIASES system catalog.
2 - Adds logic to new DB_Alias.java file to generate DDL for functions.
3 - Modifies the "toString()" method of the catalog/types/RoutineAliasInfo file to generate a string that is SYNTACTICALLY VALID based on whether an instance of that class is for a PROCEDURE or for a FUNCTION. The reason this change is required is because the "ALIASINFO" column that is returned as part of the SYS.SYSALIASES result set is an instance of RoutineAliasInfo, and thus a call to ResultSet.getString() on the ALIASINFO column ultimately makes a call to RoutineAliasInfo.toString(). That said, the dblook utility makes a "getString()" call on the ALIASINFO column and uses the result to generate the corresponding DDL. Before this patch, the result of the toString() method always corresponded to the PROCEDURE syntax; but now, since dblook is generating DDL for FUNCTIONs, the RoutineAliasInfo.toString() method must recognize if an instance is a PROCEDURE or FUNCTION and return the appropriate syntax.
4 - Adds test cases for FUNCTIONS to the dblook tests and updates the master files accordingly.
Submitted by Army Brown (qozinx@sbcglobal.net)
git-svn-id: https://svn.apache.org/repos/asf/incubator/derby/code/trunk@189650 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/catalog/types/RoutineAliasInfo.java",
"hunks": [
{
"added": [
"\t// What type of alias is this: PROCEDURE or FUNCTION?",
"\tprivate char aliasType;",
""
],
"header": "@@ -88,6 +88,9 @@ public class RoutineAliasInfo extends MethodAliasInfo",
"removed": []
},
{
"added": [
"\t\tsetAliasType();"
],
"header": "@@ -119,6 +122,7 @@ public class RoutineAliasInfo extends MethodAliasInfo",
"removed": []
},
{
"added": [
"\t\tsetAliasType();"
],
"header": "@@ -228,6 +232,7 @@ public class RoutineAliasInfo extends MethodAliasInfo",
"removed": []
},
{
"added": [
"\t/**",
"\t * Get this alias info as a string. NOTE: The \"ALIASINFO\" column",
"\t * in the SYSALIASES table will return the result of this method",
"\t * on a ResultSet.getString() call. That said, since the dblook",
"\t * utility uses ResultSet.getString() to retrieve ALIASINFO and",
"\t * to generate the DDL, THIS METHOD MUST RETURN A STRING THAT",
"\t * IS SYNTACTICALLY VALID, or else the DDL generated by dblook",
"\t * will be incorrect.",
"\t */"
],
"header": "@@ -263,6 +268,15 @@ public class RoutineAliasInfo extends MethodAliasInfo",
"removed": []
},
{
"added": [
"\t\t\tif (aliasType == AliasInfo.ALIAS_TYPE_PROCEDURE_AS_CHAR) {",
"\t\t\t// This is a PROCEDURE. We only want to print the",
"\t\t\t// parameter mode (ex. \"IN\", \"OUT\", \"INOUT\") for procedures--",
"\t\t\t// we don't do it for functions since use of the \"IN\" keyword",
"\t\t\t// is not part of the FUNCTION syntax.",
"\t\t\t\tsb.append(RoutineAliasInfo.parameterMode(parameterModes[i]));",
"\t\t\t\tsb.append(' ');",
"\t\t\t}",
"\t\tif (aliasType == AliasInfo.ALIAS_TYPE_FUNCTION_AS_CHAR) {",
"\t\t// this a FUNCTION, so syntax requires us to append the return type.",
"\t\t\tsb.append(\" RETURNS \" + returnType.getSQLstring());",
"\t\t}",
"",
"\t\tif ((aliasType == AliasInfo.ALIAS_TYPE_PROCEDURE_AS_CHAR) &&",
"\t\t\t(dynamicResultSets != 0))",
"\t\t{ // Only print dynamic result sets if this is a PROCEDURE",
"\t\t // because it's not valid syntax for FUNCTIONs.",
"\t\tif (aliasType == AliasInfo.ALIAS_TYPE_FUNCTION_AS_CHAR) {",
"\t\t// this a FUNCTION, so append the syntax telling what to",
"\t\t// do with a null parameter.",
"\t\t\tsb.append(calledOnNullInput ? \" CALLED \" : \" RETURNS NULL \");",
"\t\t\tsb.append(\"ON NULL INPUT\");",
"\t\t}",
"\t\t"
],
"header": "@@ -272,21 +286,42 @@ public class RoutineAliasInfo extends MethodAliasInfo",
"removed": [
"\t\t\tsb.append(RoutineAliasInfo.parameterMode(parameterModes[i]));",
"\t\t\tsb.append(' ');",
"\t\tif (dynamicResultSets != 0) {"
]
}
]
},
{
"file": "java/tools/org/apache/derby/impl/tools/dblook/DB_Alias.java",
"hunks": [
{
"added": [
" Derby - Class org.apache.derby.impl.tools.dblook.DB_Alias"
],
"header": "@@ -1,6 +1,6 @@",
"removed": [
" Derby - Class org.apache.derby.impl.tools.dblook.DB_StoredProcedure"
]
},
{
"added": [
"public class DB_Alias {",
"\t * Generate the DDL for all stored procedures and",
"\t * functions in a given database and write it to",
"\t * output via Logs.java.",
"\tpublic static void doProceduresAndFunctions(Connection conn)",
"\t\t// First do stored procedures.",
"\t\tgenerateDDL(rs, 'P');\t// 'P' => for PROCEDURES",
"",
"\t\t// Now do functions.",
"\t\trs = stmt.executeQuery(\"SELECT ALIAS, ALIASINFO, \" +",
"\t\t\t\"ALIASID, SCHEMAID, JAVACLASSNAME, SYSTEMALIAS FROM SYS.SYSALIASES \" +",
"\t\t\t\"WHERE ALIASTYPE='F'\");",
"\t\tgenerateDDL(rs, 'F');\t// 'F' => for FUNCTIONS",
"",
"\t\trs.close();",
"\t\tstmt.close();",
"\t\treturn;",
"",
"\t}",
"",
"\t/* ************************************************",
"\t * Generate the DDL for either stored procedures or",
"\t * functions in a given database, depending on the",
"\t * the received aliasType.",
"\t * @param rs Result set holding either stored procedures",
"\t * or functions.",
"\t * @param aliasType Indication of whether we're generating",
"\t * stored procedures or functions.",
"\t ****/",
"\tprivate static void generateDDL(ResultSet rs, char aliasType)",
"\t\tthrows SQLException",
"\t{"
],
"header": "@@ -30,26 +30,52 @@ import java.sql.DatabaseMetaData;",
"removed": [
"public class DB_StoredProcedure {",
"\t * Generate the DDL for all stored procedures in a given",
"\t * database.",
"\t * @return The DDL for the stored procedures has been",
"\t * written to output via Logs.java.",
"\tpublic static void doStoredProcedures(Connection conn)"
]
},
{
"added": [
"\t\t\t\tLogs.reportMessage((aliasType == 'P')",
"\t\t\t\t\t? \"DBLOOK_StoredProcHeader\"",
"\t\t\t\t\t: \"DBLOOK_FunctionHeader\");",
"\t\t\tString aliasName = rs.getString(1);",
"\t\t\tString fullName = dblook.addQuotes(",
"\t\t\t\tdblook.expandDoubleQuotes(aliasName));",
"\t\t\tfullName = procSchema + \".\" + fullName;",
"\t\t\tString creationString = createProcOrFuncString(",
"\t\t\t\tfullName, rs, aliasType);"
],
"header": "@@ -64,16 +90,19 @@ public class DB_StoredProcedure {",
"removed": [
"\t\t\t\tLogs.reportMessage(\"DBLOOK_StoredProcHeader\");",
"\t\t\tString procName = rs.getString(1);",
"\t\t\tString procFullName = dblook.addQuotes(",
"\t\t\t\tdblook.expandDoubleQuotes(procName));",
"\t\t\tprocFullName = procSchema + \".\" + procFullName;",
"\t\t\tString creationString = createProcString(procFullName, rs);"
]
}
]
},
{
"file": "java/tools/org/apache/derby/tools/dblook.java",
"hunks": [
{
"added": [
"import org.apache.derby.impl.tools.dblook.DB_Alias;"
],
"header": "@@ -44,7 +44,7 @@ import org.apache.derby.impl.tools.dblook.DB_Jar;",
"removed": [
"import org.apache.derby.impl.tools.dblook.DB_StoredProcedure;"
]
},
{
"added": [
"\t\t\t\tDB_Alias.doProceduresAndFunctions(this.conn);",
"\t\t\tDB_Alias.doSynonyms(this.conn);"
],
"header": "@@ -523,12 +523,12 @@ public class dblook {",
"removed": [
"\t\t\t\tDB_StoredProcedure.doStoredProcedures(this.conn);",
"\t\t\tDB_StoredProcedure.doSynonyms(this.conn);"
]
}
]
}
] |
derby-DERBY-3371-3e92fd96
|
DERBY-3371 (partial) Made T_AccessFactory.alterTable() run with both
temporary and non-temporary conglomerates to get better test coverage
in RAMTransaction
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@618299 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/unitTests/store/T_AccessFactory.java",
"hunks": [
{
"added": [
" && alterTable(tc, false)",
" && alterTable(tc, true)"
],
"header": "@@ -144,7 +144,8 @@ public class T_AccessFactory extends T_Generic",
"removed": [
" && alterTable(tc)"
]
},
{
"added": [
" * @param temporary flag which tells whether or not the conglomerate",
" * used in the test should be temporary",
"\tprivate boolean alterTable(TransactionController tc, boolean temporary)"
],
"header": "@@ -1159,12 +1160,13 @@ public class T_AccessFactory extends T_Generic",
"removed": [
"\tprotected boolean alterTable(",
" TransactionController tc)"
]
},
{
"added": [
" int temporaryFlag = temporary ?",
" TransactionController.IS_TEMPORARY :",
" TransactionController.IS_DEFAULT;"
],
"header": "@@ -1173,6 +1175,9 @@ public class T_AccessFactory extends T_Generic",
"removed": []
},
{
"added": [
" temporaryFlag);"
],
"header": "@@ -1180,7 +1185,7 @@ public class T_AccessFactory extends T_Generic",
"removed": [
" TransactionController.IS_DEFAULT); // not temporary"
]
}
]
}
] |
derby-DERBY-3376-52a0f73d
|
DERBY-3376: Test case in GrantRevokeDDLTest commented out
This change adds some additional tests to GrantRevokeDDLTest.
The test cases were originally contributed by Yip Ng (yipng168 at gmail dot com)
The tests were originally written during buddy testing of Derby 10.2, and
were committed as part of DERBY-1736, but were commented out at the
time due to DERBY-1589. The test cases now pass, and so this change
enables the tests as part of GrantRevokeDDLTest.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@831762 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3379-5d59e9db
|
DERBY-3379: "No Current connection" on PooledConnection.getConnection() if pooled connection is reused during connectionClosed processing.
Patch file: derby-3379-3a-minimal_fix_and_test_enabling.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@642942 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/ClientPooledConnection.java",
"hunks": [
{
"added": [
" // Null out the reference to the logical connection that is currently",
" // being closed.",
" this.logicalConnection_ = null;",
""
],
"header": "@@ -335,6 +335,10 @@ public class ClientPooledConnection implements javax.sql.PooledConnection {",
"removed": []
}
]
}
] |
derby-DERBY-3379-f1567ead
|
DERBY-3379 (partial): Updated some comments in buildDss, and converted comments for method finalizeDssLength into JavaDoc.
Patch file: derby-3379-2a-comment_and_JavaDoc.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@637805 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/net/Request.java",
"hunks": [
{
"added": [
" // RQSDSS header is 6 bytes long: (ll)(Cf)(rc)",
" // Save the position of the length bytes, so they can be updated with a",
" // different value at a later time.",
" // Dummy values for the DSS length (token ll above).",
" // The correct length will be inserted when the DSS is finalized.",
" // Insert the mandatory 0xD0 (token C).",
" // Insert the dssType (token f), which also tells if the DSS is chained",
" // or not. See DSSFMT in the DRDA specification for details."
],
"header": "@@ -191,19 +191,21 @@ public class Request {",
"removed": [
" // save the length position and skip",
" // note: the length position is saved so it can be updated",
" // with a different value later.",
" // always turn on chaining flags... this is helpful for lobs...",
" // these bytes will get rest if dss lengths are finalized.",
" // insert the manditory 0xD0 and the dssType"
]
},
{
"added": [
" // Write the request correlation id (two bytes, token rc)."
],
"header": "@@ -212,7 +214,7 @@ public class Request {",
"removed": [
" // write the request correlation id"
]
},
{
"added": [
" /**",
" * Signal the completion of a DSS Layer A object.",
" * <p>",
" * The length of the DSS object will be calculated based on the difference",
" * between the start of the DSS, saved in the variable",
" * {@link #dssLengthLocation_}, and the current offset into the buffer which",
" * marks the end of the data.",
" * <p>",
" * In the event the length requires the use of continuation DSS headers,",
" * one for each 32k chunk of data, the data will be shifted and the",
" * continuation headers will be inserted with the correct values as needed.",
" * Note: In the future, we may try to optimize this approach",
" * in an attempt to avoid these shifts.",
" */"
],
"header": "@@ -821,15 +823,20 @@ public class Request {",
"removed": [
" // signal the completion of a Dss Layer A object. The length of",
" // dss object will be calculated based on the difference between the",
" // start of the dss, saved on the beginDss call, and the current",
" // offset into the buffer which marks the end of the data. In the event",
" // the length requires the use of continuation Dss headers, one for each 32k",
" // chunk of data, the data will be shifted and the continuation headers",
" // will be inserted with the correct values as needed.",
" // Note: In the future, we may try to optimize this approach",
" // in an attempt to avoid these shifts."
]
}
]
}
] |
derby-DERBY-3382-29fd25c8
|
DERBY-3382: Slave must inform master if DBs are out of sync.
Adds a check of the log files to the replication initialization so that replication does not start if the log files are out of synch. The master will be notified whether or not the log files are synched.
Tidies up if starting of replication fails.
Contributed by Jorgen Loland
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@630207 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/services/replication/master/AsynchronousLogShipper.java",
"hunks": [
{
"added": [
" /** The highest log instant in failedChunk */",
" private long failedChunkHighestInstant = -1;",
" /** The highest log instant shipped so far */",
" private long highestShippedInstant = -1;"
],
"header": "@@ -93,6 +93,10 @@ public class AsynchronousLogShipper extends Thread implements",
"removed": []
},
{
"added": [
" highestShippedInstant = failedChunkHighestInstant;"
],
"header": "@@ -206,6 +210,7 @@ public class AsynchronousLogShipper extends Thread implements",
"removed": []
},
{
"added": [
" highestShippedInstant = logBuffer.getLastInstant();"
],
"header": "@@ -217,6 +222,7 @@ public class AsynchronousLogShipper extends Thread implements",
"removed": []
},
{
"added": [
" if (mesg != null) {",
" failedChunk = mesg;",
" failedChunkHighestInstant = logBuffer.getLastInstant();",
" }"
],
"header": "@@ -229,7 +235,10 @@ public class AsynchronousLogShipper extends Thread implements",
"removed": [
" failedChunk = (mesg==null) ? failedChunk : mesg;"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/services/replication/master/MasterController.java",
"hunks": [
{
"added": [
" try {",
" logFactory.startReplicationMasterRole(this);",
" rawStoreFactory.unfreeze();",
" setupConnection();",
" if (replicationMode.equals(MasterFactory.ASYNCHRONOUS_MODE)) {",
" logShipper = new AsynchronousLogShipper(logBuffer,",
" transmitter,",
" this);",
" ((Thread)logShipper).start();",
" }",
" } catch (StandardException se) {",
" // cleanup everything that may have been started before",
" // the exception was thrown",
" ReplicationLogger.logError(MessageId.REPLICATION_FATAL_ERROR, null,",
" dbname);",
" logFactory.stopReplicationMasterRole();",
" teardownNetwork();",
" throw se;"
],
"header": "@@ -183,17 +183,27 @@ public class MasterController",
"removed": [
" logFactory.startReplicationMasterRole(this);",
" rawStoreFactory.unfreeze();",
" setupConnection();",
" if (replicationMode.equals(MasterFactory.ASYNCHRONOUS_MODE)) {",
" logShipper = new AsynchronousLogShipper(logBuffer,",
" transmitter,",
" this);",
" ((Thread)logShipper).start();"
]
},
{
"added": [
" teardownNetwork();"
],
"header": "@@ -212,13 +222,7 @@ public class MasterController",
"removed": [
" ",
" logShipper.stopLogShipment();",
"",
" ReplicationMessage mesg = new ReplicationMessage(",
" ReplicationMessage.TYPE_STOP, null);",
"",
" transmitter.sendMessage(mesg);"
]
},
{
"added": [
" // getHighestShippedInstant is -1 until the first log",
" // chunk has been shipped to the slave. If a log chunk has",
" // been shipped, use the instant of the latest shipped log",
" // record to synchronize log files. If no log has been",
" // shipped yet, use the end position of the log (i.e.,",
" // logToFile.getFirstUnflushedInstantAsLong). ",
" if (logShipper != null && ",
" logShipper.getHighestShippedInstant() != -1) {",
" transmitter.initConnection(SLAVE_CONNECTION_ATTEMPT_TIMEOUT,",
" logShipper.",
" getHighestShippedInstant());",
" } else {",
" transmitter.initConnection(SLAVE_CONNECTION_ATTEMPT_TIMEOUT,",
" logFactory.",
" getFirstUnflushedInstantAsLong());",
" }"
],
"header": "@@ -368,7 +372,22 @@ public class MasterController",
"removed": [
" transmitter.initConnection(SLAVE_CONNECTION_ATTEMPT_TIMEOUT);"
]
},
{
"added": [
" } catch (StandardException se) {",
" throw se;"
],
"header": "@@ -376,6 +395,8 @@ public class MasterController",
"removed": []
},
{
"added": [
"",
" // see comment in setupConnection",
" if (logShipper != null &&",
" logShipper.getHighestShippedInstant() != -1) {",
" transmitter.",
" initConnection(SLAVE_CONNECTION_ATTEMPT_TIMEOUT,",
" logShipper.",
" getHighestShippedInstant());",
" } else {",
" transmitter.",
" initConnection(SLAVE_CONNECTION_ATTEMPT_TIMEOUT,",
" logFactory.",
" getFirstUnflushedInstantAsLong());",
" }",
""
],
"header": "@@ -401,8 +422,21 @@ public class MasterController",
"removed": [
" transmitter.initConnection",
" (SLAVE_CONNECTION_ATTEMPT_TIMEOUT);"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/services/replication/net/ReplicationMessage.java",
"hunks": [
{
"added": [],
"header": "@@ -31,19 +31,6 @@ import java.io.ObjectOutput;",
"removed": [
" *",
" * For now the following message types are defined",
" *",
" * TYPE_LOG - This flag will be used for all messages will carry LogRecords.",
" * TYPE_ACK - this flag is used to send a acknowledgment of successful",
" * completion of a requested operation. It will however not",
" * be used to signify reception for every message transmission",
" * since tcp would automatically take care of this.",
" * TYPE_ERROR - Indicates that the requested operation was not able to be",
" * completed successfully.",
" * TYPE_INITIATE - used during the intial handshake between the master and",
" * the slave. The initial handshake helps to negotiate the",
" * message UIDs and send a appropriate error or acknowledgment."
]
},
{
"added": [
" /**",
" * used during the intial handshake between the master and",
" * the slave. The initial handshake helps to negotiate the",
" * message UIDs and send a appropriate error or acknowledgment.",
" * The object this message contains will be a <code>Long</code>.",
" * IMPORTANT: This constant must not be changed in future versions since ",
" * we need it to decide slave/master version mismatch",
" */",
" public static final int TYPE_INITIATE_VERSION = 0;",
"",
" /**",
" * Used during the intial handshake between the master and",
" * the slave. Messages of this type are used to ensure that master and ",
" * slave have identical log files by checking that they will insert ",
" * the next log record on the same byte position in the log.",
" * The object this message contains will be a <code>Long</code>.",
" */",
" public static final int TYPE_INITIATE_INSTANT = 1;",
"",
" public static final int TYPE_LOG = 10;"
],
"header": "@@ -65,13 +52,32 @@ public class ReplicationMessage implements Externalizable {",
"removed": [
" public static final int TYPE_LOG = 0;"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/services/replication/net/ReplicationMessageReceive.java",
"hunks": [
{
"added": [
"import org.apache.derby.impl.store.raw.log.LogCounter;"
],
"header": "@@ -34,6 +34,7 @@ import org.apache.derby.iapi.error.StandardException;",
"removed": []
},
{
"added": [
" * @param synchOnInstant the slave log instant, used to check that",
" * the master and slave log files are in synch. If no chunks of log",
" * records have been received from the master yet, this is the",
" * end position in the current log file. If a chunk of log has been",
" * received, this is the instant of the log record received last.",
" * Note that there is a difference!",
" * @param dbname the name of the replicated database"
],
"header": "@@ -105,6 +106,13 @@ public class ReplicationMessageReceive {",
"removed": []
},
{
"added": [
" public void initConnection(int timeout, long synchOnInstant, String dbname)",
" throws"
],
"header": "@@ -117,7 +125,8 @@ public class ReplicationMessageReceive {",
"removed": [
" public void initConnection(int timeout) throws"
]
},
{
"added": [
" // exchange initiator messages to check that master and slave are at ",
" // the same version...",
" parseAndAckVersion(readMessage(), dbname);",
" // ...and have equal log files",
" parseAndAckInstant(readMessage(), synchOnInstant, dbname);"
],
"header": "@@ -143,20 +152,11 @@ public class ReplicationMessageReceive {",
"removed": [
" //wait for the initiator message on the SocketConnection",
" ReplicationMessage initMesg = readMessage();",
" ",
" //Check if this message is an initiator message, if not",
" //throw an exception",
" if (initMesg.getType() != ReplicationMessage.TYPE_INITIATE) {",
" //The message format was not recognized. Hence throw",
" //an unexpected exception.",
" throw StandardException.newException",
" (SQLState.REPLICATION_UNEXPECTED_EXCEPTION);",
" }",
" ",
" //parse the initiator message and perform appropriate action",
" parseInitiatorMessage(initMesg);"
]
},
{
"added": [
" * @param dbname the name of the replicated database",
" private void parseAndAckVersion(ReplicationMessage initiatorMessage, ",
" String dbname)",
"",
" //Check if this message is an initiate version message, if not",
" //throw an exception",
" if (initiatorMessage.getType() != ",
" ReplicationMessage.TYPE_INITIATE_VERSION) {",
" // The message format was not recognized. Notify master and throw",
" // an exception",
" String expectedMsgId = String.",
" valueOf(ReplicationMessage.TYPE_INITIATE_VERSION);",
" String receivedMsgId = String.valueOf(initiatorMessage.getType());",
" handleUnexpectedMessage(dbname, expectedMsgId, receivedMsgId);",
" }",
""
],
"header": "@@ -204,17 +204,32 @@ public class ReplicationMessageReceive {",
"removed": [
" private void parseInitiatorMessage(ReplicationMessage initiatorMessage)"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/services/replication/net/ReplicationMessageTransmit.java",
"hunks": [
{
"added": [
" * @param synchOnInstant the master log instant, used to check",
" * that the master and slave log files are in synch. If no chunks",
" * of log records have been shipped to the slave yet, this is the",
" * end position in the current log file. If a chunk of log has",
" * been shipped, this is the instant of the log record shipped",
" * last. Note that there is a difference!"
],
"header": "@@ -76,6 +76,12 @@ public class ReplicationMessageTransmit {",
"removed": []
},
{
"added": [
" public void initConnection(int timeout, long synchOnInstant) throws"
],
"header": "@@ -89,7 +95,7 @@ public class ReplicationMessageTransmit {",
"removed": [
" public void initConnection(int timeout) throws"
]
},
{
"added": [
" sendInitiatorAndReceiveAck(synchOnInstant);",
" /**",
" * Tear down the network connection established with the",
" * other replication peer",
" *",
" * @throws IOException if an exception occurs while trying to tear",
" * down the network connection",
" */",
" public void tearDown() throws IOException {",
" socketConn.tearDown();",
" }",
""
],
"header": "@@ -116,9 +122,20 @@ public class ReplicationMessageTransmit {",
"removed": [
" sendInitiatorAndReceiveAck();"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/services/replication/slave/SlaveController.java",
"hunks": [
{
"added": [
"import org.apache.derby.impl.store.raw.log.LogCounter;"
],
"header": "@@ -31,6 +31,7 @@ import org.apache.derby.iapi.services.monitor.ModuleSupportable;",
"removed": []
},
{
"added": [
" /** The instant of the latest log record received from the master ",
" * and processed so far. Used to check that master and slave log files ",
" * are in synch */",
" private volatile long highestLogInstant = -1;",
"",
" /**",
" * Whether or not replication slave mode is still on. Will be set",
" * to false when slave replication is shut down. The value of this",
" * variable is checked after every timeout when trying to set up a",
" * connection to the master, and by the thread that applies log",
" * chunks received from the master. */"
],
"header": "@@ -81,11 +82,17 @@ public class SlaveController",
"removed": [
" // Whether or not replication slave mode is still on. Will be set",
" // to false when slave replication is shut down. The value of this",
" // variable is checked after every timeout when trying to set up a",
" // connection to the master, and by the thread that applies log",
" // chunks received from the master."
]
},
{
"added": [
" logToFile.stopReplicationSlaveRole();"
],
"header": "@@ -262,8 +269,7 @@ public class SlaveController",
"removed": [
" logToFile.flushAll();",
" logToFile.stopReplicationSlaveMode();"
]
},
{
"added": [
" // highestLogInstant is -1 until the first log chunk has",
" // been received from the master. If a log chunk has been",
" // received, use the instant of the latest received log",
" // record to synchronize log files. If no log has been",
" // received yet, use the end position of the log (i.e.,",
" // logToFile.getFlushedInstant)",
" if (highestLogInstant != -1) {",
" // timeout to check if still in replication slave mode",
" receiver.initConnection(DEFAULT_SOCKET_TIMEOUT,",
" highestLogInstant,",
" dbname);",
" } else {",
" // timeout to check if still in replication slave mode",
" receiver.initConnection(DEFAULT_SOCKET_TIMEOUT,",
" logToFile.",
" getFirstUnflushedInstantAsLong(),",
" dbname);",
" }"
],
"header": "@@ -337,8 +343,24 @@ public class SlaveController",
"removed": [
" // timeout to check if still in replication slave mode",
" receiver.initConnection(DEFAULT_SOCKET_TIMEOUT);"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/store/raw/log/LogToFile.java",
"hunks": [
{
"added": [
" public synchronized long getFirstUnflushedInstantAsLong() {",
" if (SanityManager.DEBUG) {",
" SanityManager.ASSERT(logFileNumber > 0 && lastFlush > 0);",
" }",
" return LogCounter.makeLogInstantAsLong(logFileNumber,lastFlush);",
" }"
],
"header": "@@ -4352,6 +4352,12 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport",
"removed": []
}
]
},
{
"file": "java/shared/org/apache/derby/shared/common/reference/SQLState.java",
"hunks": [
{
"added": [
" String REPLICATION_UNEXPECTED_MESSAGEID = \"XRE12\";"
],
"header": "@@ -1775,6 +1775,7 @@ public interface SQLState {",
"removed": []
}
]
}
] |
derby-DERBY-3382-9e93a53d
|
DERBY-3382: Adds regression test for out of sync checking between master and slave.
Contributed by Jorgen Loland
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@641221 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3383-dd55fd82
|
DERBY-3383: Replication: Make sure stopSlave does not boot the database if not already booted.
Contributed by Jorgen Loland
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@620444 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/jdbc/EmbedConnection.java",
"hunks": [
{
"added": [
" // DERBY-3383: stopSlave must be performed before",
" // bootDatabase so that we don't accidentally boot the db",
" // if stopSlave is requested on an unbooted db",
" if (isStopReplicationSlaveBoot(info)) {",
" // An exception is always thrown from this method. If",
" // stopSlave is requested, we never get past this point",
" handleStopReplicationSlave(database, info);",
" } else if (isInternalShutdownSlaveDatabase(info)) {",
" internalStopReplicationSlave(database, info);",
" return;",
" }",
""
],
"header": "@@ -265,6 +265,18 @@ public abstract class EmbedConnection implements EngineConnection",
"removed": []
},
{
"added": [],
"header": "@@ -291,13 +303,6 @@ public abstract class EmbedConnection implements EngineConnection",
"removed": [
"\t\t\tif (isStopReplicationSlaveBoot(info)) {",
"\t\t\t\thandleStopReplicationSlave(info);",
"\t\t\t} else if (isInternalShutdownSlaveDatabase(info)) {",
"\t\t\t\tinternalStopReplicationSlave(info);",
"\t\t\t\treturn;",
"\t\t\t}",
""
]
},
{
"added": [
" *",
" * @param database The database the stop slave operation will be",
" * performed on",
" * @exception SQLException Thrown if the database has not been",
" * booted or if stopSlave is performed successfully",
" private void handleStopReplicationSlave(Database database, Properties p)",
" // Cannot get the database by using getTR().getDatabase()",
" // because getTR().setDatabase() has not been called in the",
" // constructor at this point.",
" if (database == null) {",
" // Do not clear the TransactionResource context. It will",
" // be restored as part of the finally clause of the constructor.",
" this.setInactive();",
" throw newSQLException(SQLState.REPLICATION_NOT_IN_SLAVE_MODE);"
],
"header": "@@ -712,25 +717,32 @@ public abstract class EmbedConnection implements EngineConnection",
"removed": [
" * ",
" * @exception SQLException Thrown if the database is not found",
" private void handleStopReplicationSlave(Properties p)",
" if (getTR().getDatabase() == null) {",
" handleDBNotFound();",
" Database database = getTR().getDatabase();",
""
]
},
{
"added": [
" * @param database The database the internal stop slave operation",
" * will be performed on",
" * @exception SQLException Thrown if the database has not been",
" * booted or if this connection was not made internally from",
" * SlaveDatabase",
" private void internalStopReplicationSlave(Database database, Properties p)",
" // Cannot get the database by using getTR().getDatabase()",
" // because getTR().setDatabase() has not been called in the",
" // constructor at this point.",
" if (database == null) {",
" // Do not clear the TransactionResource context. It will",
" // be restored as part of the finally clause of the constructor.",
" this.setInactive();",
" throw newSQLException(SQLState.REPLICATION_NOT_IN_SLAVE_MODE);",
" // We should only get here if the connection is made from",
" // inside SlaveDatabase. To verify, we ask SlaveDatabase",
" // if it requested this shutdown. If it didn't,",
" // verifyShutdownSlave will throw an exception",
" if (! (database instanceof SlaveDatabase)) {",
" throw newSQLException(SQLState.REPLICATION_NOT_IN_SLAVE_MODE);",
" ((SlaveDatabase)database).verifyShutdownSlave();",
"",
" // Will shutdown the database without writing to the log",
" // since the SQLException with state",
" // REPLICATION_SLAVE_SHUTDOWN_OK will be reported anyway",
" handleException(tr.shutdownDatabaseException());"
],
"header": "@@ -746,41 +758,45 @@ public abstract class EmbedConnection implements EngineConnection",
"removed": [
" * @exception SQLException Thrown if the database is not found",
" private void internalStopReplicationSlave(Properties p)",
" if (getTR().getDatabase() == null) {",
" handleDBNotFound();",
" Database database = getTR().getDatabase();",
"",
" if (isInternalShutdownSlaveDatabase(p)) {",
" // We should only get here if the connection is made from",
" // inside SlaveDatabase. To verify, we ask SlaveDatabase",
" // if it requested this shutdown. If it didn't,",
" // verifyShutdownSlave will throw an exception",
" if (! (database instanceof SlaveDatabase)) {",
" throw newSQLException(",
" SQLState.REPLICATION_NOT_IN_SLAVE_MODE,",
" getTR().getDBName());",
" }",
" ((SlaveDatabase)database).verifyShutdownSlave();",
"",
" // Will shutdown the database without writing to the log",
" // since the SQLException with state",
" // REPLICATION_SLAVE_SHUTDOWN_OK will be reported anyway",
" handleException(tr.shutdownDatabaseException());"
]
}
]
}
] |
derby-DERBY-3388-1d0892eb
|
DERBY-3388: Improve message handling for replication messages to derby.log
Add timestamp to replication messages written to derby.log and
introduce the following configurable properties:
* derby.replication.verbose -> true/false - replication messages are
written to log
* derby.replication.logBufferSize -> the size of the replication log
buffers
* derby.replication.minLogShippingInterval -> the shortest interval
between two consecutive log shipments
* derby.replication.maxLogShippingInterval -> the longest interval
between two consecutive log shipments (a "soft" guarantee that the
slave will not deviate more than this amount of millis from the
master)
Contributed by Jørgen Løland <Jorgen.Loland@Sun.COM>.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@634586 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/services/replication/ReplicationLogger.java",
"hunks": [
{
"added": [
"import java.util.Date;",
"import org.apache.derby.iapi.reference.Property;",
"import org.apache.derby.iapi.services.property.PropertyUtil;",
" * \"derby.replication.verbose=false\".",
" private final boolean verbose;",
" /** The name of the replicated database */",
" private final String dbname;",
"",
" public ReplicationLogger(String dbname) {",
" verbose = PropertyUtil.getSystemBoolean(Property.REPLICATION_VERBOSE,",
" true);",
" this.dbname = dbname;",
" }",
" * log (usually derby.log) provided that verbose",
" * is true. If verbose is false, nothing is",
" public void logError(String msgId, Throwable t) {",
" if (verbose) {",
" Monitor.logTextMessage(MessageId.REPLICATION_ERROR_BEGIN,",
" new Date());"
],
"header": "@@ -22,36 +22,45 @@",
"removed": [
" * \"derby.replication.logerrormessages=true\"",
" // TODO: make this configurable through the aforementioned",
" // property",
" private static final boolean LOG_REPLICATION_MESSAGES = true;",
" * log (usually derby.log) provided that LOG_REPLICATION_MESSAGES",
" * is true. If LOG_REPLICATION_MESSAGES is false, nothing is",
" * @param dbname The name of the replicated database",
" public static void logError(String msgId, Throwable t, String dbname) {",
" if (LOG_REPLICATION_MESSAGES) {",
" Monitor.logTextMessage(MessageId.REPLICATION_ERROR_BEGIN);"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/services/replication/master/AsynchronousLogShipper.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.reference.Property;",
"import org.apache.derby.iapi.services.property.PropertyUtil;",
"import org.apache.derby.iapi.services.sanity.SanityManager;",
"import org.apache.derby.impl.services.replication.ReplicationLogger;"
],
"header": "@@ -25,8 +25,12 @@ import java.io.IOException;",
"removed": []
},
{
"added": [
" /**",
" * Minimum interval (in milliseconds) between log shipping.",
" * Defaults to MIN, but can be configured using system property",
" * derby.replication.minLogShippingInterval",
" * @see MIN",
" */",
" private long minShippingInterval;",
" /**",
" * Minimum interval (in milliseconds) between log shipping.",
" * Defaults to MAX, but can be configured using system property",
" * derby.replication.maxLogShippingInterval",
" * @see MAX",
" */",
" private long maxShippingInterval;"
],
"header": "@@ -68,6 +72,20 @@ public class AsynchronousLogShipper extends Thread implements",
"removed": []
},
{
"added": [
" private final ReplicationLogger repLogger;",
""
],
"header": "@@ -128,6 +146,8 @@ public class AsynchronousLogShipper extends Thread implements",
"removed": []
},
{
"added": [
" * @param repLogger The replication logger that will write messages to",
" * the log file (typically derby.log)",
" ReplicationMessageTransmit transmitter,",
" MasterController masterController,",
" ReplicationLogger repLogger) {",
" this.repLogger = repLogger;",
"",
" getLogShipperProperties();",
" shippingInterval = minShippingInterval;",
""
],
"header": "@@ -138,16 +158,23 @@ public class AsynchronousLogShipper extends Thread implements",
"removed": [
" ReplicationMessageTransmit transmitter,",
" MasterController masterController) {",
" shippingInterval = MIN;"
]
},
{
"added": [
" * c) Else If the time elapsed since last ship is greater than",
" * minShippingInterval"
],
"header": "@@ -322,7 +349,8 @@ public class AsynchronousLogShipper extends Thread implements",
"removed": [
" * c) Else If the time elapsed since last ship is greater than MIN"
]
},
{
"added": [
" (System.currentTimeMillis() - lastShippingTime) >",
" minShippingInterval) {"
],
"header": "@@ -332,7 +360,8 @@ public class AsynchronousLogShipper extends Thread implements",
"removed": [
" (System.currentTimeMillis() - lastShippingTime) > MIN) {"
]
},
{
"added": [
" * b) FI > FI_LOW and FI < FI_HIGH return minShippingInterval",
" * c) FI <= FI_LOW return maxShippingInterval."
],
"header": "@@ -345,8 +374,8 @@ public class AsynchronousLogShipper extends Thread implements",
"removed": [
" * b) FI > FI_LOW and FI < FI_HIGH return MIN",
" * c) FI <= FI_LOW return MAX."
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/services/replication/master/MasterController.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.services.property.PropertyUtil;",
"import org.apache.derby.iapi.services.sanity.SanityManager;",
"import org.apache.derby.iapi.reference.Property;"
],
"header": "@@ -30,6 +30,9 @@ import org.apache.derby.iapi.reference.SQLState;",
"removed": []
},
{
"added": [
" private static final int LOG_BUFFER_SIZE_MIN = 8192; //8KB",
" private static final int LOG_BUFFER_SIZE_MAX = 1024*1024; //1MB"
],
"header": "@@ -63,6 +66,8 @@ public class MasterController",
"removed": []
},
{
"added": [
" private ReplicationLogger repLogger;",
" private int logBufferSize = 0;"
],
"header": "@@ -70,11 +75,13 @@ public class MasterController",
"removed": []
},
{
"added": [
"",
" repLogger = new ReplicationLogger(dbname);",
" getMasterProperties();",
" logBuffer = new ReplicationLogBuffer(logBufferSize, this);"
],
"header": "@@ -196,7 +203,10 @@ public class MasterController",
"removed": [
" logBuffer = new ReplicationLogBuffer(DEFAULT_LOG_BUFFER_SIZE, this);"
]
},
{
"added": [
" this,",
" repLogger);",
" repLogger.logError(MessageId.REPLICATION_FATAL_ERROR, se);"
],
"header": "@@ -208,14 +218,14 @@ public class MasterController",
"removed": [
" this);",
" ReplicationLogger.logError(MessageId.REPLICATION_FATAL_ERROR, null,",
" dbname);"
]
},
{
"added": [
" repLogger.",
" logError(MessageId.REPLICATION_LOGSHIPPER_EXCEPTION, ioe);",
" repLogger.",
" logError(MessageId.REPLICATION_LOGSHIPPER_EXCEPTION, se);"
],
"header": "@@ -248,13 +258,11 @@ public class MasterController",
"removed": [
" ReplicationLogger.",
" logError(MessageId.REPLICATION_LOGSHIPPER_EXCEPTION,",
" ioe, dbname);",
" ReplicationLogger.",
" logError(MessageId.REPLICATION_LOGSHIPPER_EXCEPTION, ",
" se, dbname);"
]
},
{
"added": [
"",
" /**",
" * Load relevant system property: replication log buffer size",
" */",
" private void getMasterProperties() {",
" logBufferSize =",
" PropertyUtil.getSystemInt(Property.REPLICATION_LOG_BUFFER_SIZE,",
" DEFAULT_LOG_BUFFER_SIZE);",
"",
" if (logBufferSize < LOG_BUFFER_SIZE_MIN) {",
" logBufferSize = LOG_BUFFER_SIZE_MIN;",
" if (SanityManager.DEBUG) {",
" repLogger.logText(\"Replication log buffer size \" +",
" \"property too small. Set to \" +",
" \"minimum value: \" + logBufferSize,",
" false);",
" }",
" } else if (logBufferSize > LOG_BUFFER_SIZE_MAX) {",
" logBufferSize = LOG_BUFFER_SIZE_MAX;",
" if (SanityManager.DEBUG) {",
" repLogger.logText(\"Replication log buffer size \" +",
" \"property too big. Set to \" +",
" \"maximum value: \" + logBufferSize,",
" false);",
" }",
" }",
" }"
],
"header": "@@ -331,6 +339,33 @@ public class MasterController",
"removed": []
},
{
"added": [
" repLogger.logError(MessageId.REPLICATION_LOGSHIPPER_EXCEPTION,",
" exception);"
],
"header": "@@ -462,9 +497,8 @@ public class MasterController",
"removed": [
" ReplicationLogger.",
" logError(MessageId.REPLICATION_LOGSHIPPER_EXCEPTION, ",
" exception, dbname);"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/services/replication/slave/SlaveController.java",
"hunks": [
{
"added": [
" private ReplicationLogger repLogger;"
],
"header": "@@ -74,6 +74,7 @@ public class SlaveController",
"removed": []
},
{
"added": [
" repLogger = new ReplicationLogger(dbname);"
],
"header": "@@ -138,6 +139,7 @@ public class SlaveController",
"removed": []
},
{
"added": [
" repLogger.logError(MessageId.REPLICATION_SLAVE_LOST_CONN, e);"
],
"header": "@@ -368,8 +370,7 @@ public class SlaveController",
"removed": [
" ReplicationLogger.",
" logError(MessageId.REPLICATION_SLAVE_LOST_CONN, e, dbname);"
]
},
{
"added": [
" repLogger.logError(MessageId.REPLICATION_FATAL_ERROR, e);",
" repLogger.logError(MessageId.REPLICATION_FATAL_ERROR, se);"
],
"header": "@@ -428,15 +429,13 @@ public class SlaveController",
"removed": [
" ReplicationLogger.",
" logError(MessageId.REPLICATION_FATAL_ERROR, e, dbname);",
" ReplicationLogger.",
" logError(MessageId.REPLICATION_FATAL_ERROR, se, dbname);"
]
}
]
},
{
"file": "java/shared/org/apache/derby/shared/common/reference/MessageId.java",
"hunks": [
{
"added": [
" String REPLICATION_ONELINE_MSG_HEADER = \"R013\";"
],
"header": "@@ -190,6 +190,7 @@ public interface MessageId {",
"removed": []
}
]
}
] |
derby-DERBY-339-88c25bba
|
DERBY-339 Network client XA should only keep XA state for transaction branch association
Network client XA should only keep XA state for transaction branch association, to track whether to send commit in autocommit mode. All other state and state related decisions should be defered to the server.
The client tries to track XA state to make decisions based on current XA state. Most of this state handling was removed with DERBY246, but it still was not being handled properly. This is evidenced by multiple failures in xaSimplePostive that now that it gets past DERBY-246.
This fix will have the client track only branch association as outlined in the XA+ specification. Table 6-2, State Table for Transaction Branch Association. The client will track only
XA_TO_NOT_ASSOCIATED
XA_T1_ASSOCIATED
Association Suspended (T2) will map to XA_TO_NOT_ASSOCIATED for the client's pupurposes.
The client commit in autocommit mode only for
XA_TO_NOT_ASSOCIATED.
git-svn-id: https://svn.apache.org/repos/asf/incubator/derby/code/trunk@189710 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/am/Connection.java",
"hunks": [
{
"added": [
" // The client needs to keep track of the connection's transaction branch association",
" // per table 2.6 in the XA+ specification in order to determine if commits should flow in",
" // autocommit mode. There is no need to keep track of suspended transactions separately from",
" // XA_TO_NOT_ASSOCIATED.",
" // ",
" /**",
" * <code>XA_T0_NOT_ASSOCIATED</code>",
" * This connection is not currently associated with an XA transaction",
" * In this state commits will flow in autocommit mode.",
" */",
" public static final int XA_T0_NOT_ASSOCIATED = 0; ",
" ",
" /**",
" * <code>XA_T1_ASSOCIATED</code>",
" * In this state commits will not flow in autocommit mode.",
" */",
" public static final int XA_T1_ASSOCIATED = 1; ",
" ",
" //TODO: Remove XA_RECOVER entirely once indoubtlist is gone. ",
" protected int xaState_ = XA_T0_NOT_ASSOCIATED;"
],
"header": "@@ -85,13 +85,29 @@ public abstract class Connection implements java.sql.Connection,",
"removed": [
" public static final int XA_LOCAL = 0; // No global transaction in process",
" public static final int XA_GLOBAL = 1; // Global transaction in process",
" //TODO: Remove entirely once indoubtlist is gone. ",
" protected int xaState_ = XA_LOCAL;"
]
},
{
"added": [
" if ((xaState_ == XA_T0_NOT_ASSOCIATED) ) {"
],
"header": "@@ -524,7 +540,7 @@ public abstract class Connection implements java.sql.Connection,",
"removed": [
" if ((xaState_ == XA_LOCAL) ) {"
]
},
{
"added": [
" if ((xaState_ == XA_T0_NOT_ASSOCIATED) ) {"
],
"header": "@@ -541,7 +557,7 @@ public abstract class Connection implements java.sql.Connection,",
"removed": [
" if ((xaState_ == XA_LOCAL) ) {"
]
}
]
},
{
"file": "java/client/org/apache/derby/client/am/Statement.java",
"hunks": [
{
"added": [
" return (connection_.xaState_ == Connection.XA_T0_NOT_ASSOCIATED) ;"
],
"header": "@@ -1189,7 +1189,7 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface",
"removed": [
" return (connection_.xaState_ == Connection.XA_LOCAL) ;"
]
}
]
},
{
"file": "java/client/org/apache/derby/client/net/NetConnection.java",
"hunks": [
{
"added": [
" \tif (xaState_ == XA_T0_NOT_ASSOCIATED) {"
],
"header": "@@ -1367,7 +1367,7 @@ public class NetConnection extends org.apache.derby.client.am.Connection {",
"removed": [
" \tif (xaState_ == XA_LOCAL) {"
]
}
]
},
{
"file": "java/client/org/apache/derby/client/net/NetXAConnection.java",
"hunks": [
{
"added": [
" if (xaState == XA_T0_NOT_ASSOCIATED){"
],
"header": "@@ -132,7 +132,7 @@ public class NetXAConnection extends org.apache.derby.client.net.NetConnection {",
"removed": [
" if (xaState == XA_LOCAL){"
]
},
{
"added": [
" if (xaState == XA_T0_NOT_ASSOCIATED) {"
],
"header": "@@ -147,7 +147,7 @@ public class NetXAConnection extends org.apache.derby.client.net.NetConnection {",
"removed": [
" if (xaState == XA_LOCAL) {"
]
}
]
},
{
"file": "java/client/org/apache/derby/client/net/NetXAResource.java",
"hunks": [
{
"added": [
" if (conn_.isPhysicalConnClosed()) {"
],
"header": "@@ -183,7 +183,7 @@ public class NetXAResource implements XAResource {",
"removed": [
" if (conn_.isPhysicallyClosed()) {"
]
},
{
"added": [],
"header": "@@ -215,9 +215,6 @@ public class NetXAResource implements XAResource {",
"removed": [
" else {",
" \tconn_.setXAState(Connection.XA_LOCAL);",
" }"
]
},
{
"added": [
" if (conn_.isPhysicalConnClosed()) {"
],
"header": "@@ -247,7 +244,7 @@ public class NetXAResource implements XAResource {",
"removed": [
" if (conn_.isPhysicallyClosed()) {"
]
},
{
"added": [
" }else {",
" \tconn_.setXAState(Connection.XA_T0_NOT_ASSOCIATED);"
],
"header": "@@ -278,8 +275,9 @@ public class NetXAResource implements XAResource {",
"removed": [
""
]
},
{
"added": [
" if (conn_.isPhysicalConnClosed()) {"
],
"header": "@@ -299,7 +297,7 @@ public class NetXAResource implements XAResource {",
"removed": [
" if (conn_.isPhysicallyClosed()) {"
]
},
{
"added": [
" if (conn_.isPhysicalConnClosed()) {"
],
"header": "@@ -352,7 +350,7 @@ public class NetXAResource implements XAResource {",
"removed": [
" if (conn_.isPhysicallyClosed()) {"
]
},
{
"added": [
" if (conn_.isPhysicalConnClosed()) {"
],
"header": "@@ -381,7 +379,7 @@ public class NetXAResource implements XAResource {",
"removed": [
" if (conn_.isPhysicallyClosed()) {"
]
},
{
"added": [
" if (conn_.isPhysicalConnClosed()) {"
],
"header": "@@ -447,7 +445,7 @@ public class NetXAResource implements XAResource {",
"removed": [
" if (conn_.isPhysicallyClosed()) {"
]
},
{
"added": [
" if (conn_.isPhysicalConnClosed()) {"
],
"header": "@@ -512,7 +510,7 @@ public class NetXAResource implements XAResource {",
"removed": [
" if (conn_.isPhysicallyClosed()) {"
]
},
{
"added": [
" "
],
"header": "@@ -543,9 +541,7 @@ public class NetXAResource implements XAResource {",
"removed": [
" else {",
" \tconn_.setXAState(Connection.XA_LOCAL);",
" }"
]
},
{
"added": [
" if (conn_.isPhysicalConnClosed()) {"
],
"header": "@@ -585,7 +581,7 @@ public class NetXAResource implements XAResource {",
"removed": [
" if (conn_.isPhysicallyClosed()) {"
]
},
{
"added": [
" conn_.setXAState(Connection.XA_T1_ASSOCIATED);"
],
"header": "@@ -609,7 +605,7 @@ public class NetXAResource implements XAResource {",
"removed": [
" conn_.setXAState(Connection.XA_GLOBAL);"
]
},
{
"added": [],
"header": "@@ -626,7 +622,6 @@ public class NetXAResource implements XAResource {",
"removed": [
" // By default, throwXAException will reset the state of the failed connection"
]
},
{
"added": [
" setXaStateForXAException(rc); ",
"",
" /**",
" * Reset the transaction branch association state to XA_T0_NOT_ASSOCIATED",
" * for XAER_RM* and XA_RB* Exceptions. All other exeptions leave the state ",
" * unchanged",
" * ",
" * @param rc // return code from XAException",
" * @throws XAException",
" */",
" private void setXaStateForXAException(int rc) {",
" \tswitch (rc)",
"\t\t{",
" \t// Reset to T0, not associated for XA_RB*, RM*",
" // XAER_RMFAIL and XAER_RMERR will be fatal to the connection",
" // but that is not dealt with here",
" case javax.transaction.xa.XAException.XAER_RMFAIL:",
" case javax.transaction.xa.XAException.XAER_RMERR:",
" case javax.transaction.xa.XAException.XA_RBROLLBACK:",
" case javax.transaction.xa.XAException.XA_RBCOMMFAIL:",
" case javax.transaction.xa.XAException.XA_RBDEADLOCK:",
" case javax.transaction.xa.XAException.XA_RBINTEGRITY:",
" case javax.transaction.xa.XAException.XA_RBOTHER:",
" case javax.transaction.xa.XAException.XA_RBPROTO:",
" case javax.transaction.xa.XAException.XA_RBTIMEOUT:",
" case javax.transaction.xa.XAException.XA_RBTRANSIENT:",
" \tconn_.setXAState(Connection.XA_T0_NOT_ASSOCIATED);",
" break;",
" // No change for other XAExceptions",
" // javax.transaction.xa.XAException.XA_NOMIGRATE",
" //javax.transaction.xa.XAException.XA_HEURHAZ",
" // javax.transaction.xa.XAException.XA_HEURCOM",
" // javax.transaction.xa.XAException.XA_HEURRB",
" // javax.transaction.xa.XAException.XA_HEURMIX",
" // javax.transaction.xa.XAException.XA_RETRY",
" // javax.transaction.xa.XAException.XA_RDONLY",
" // javax.transaction.xa.XAException.XAER_ASYNC",
" // javax.transaction.xa.XAException.XAER_NOTA",
" // javax.transaction.xa.XAException.XAER_INVAL ",
" // javax.transaction.xa.XAException.XAER_PROTO",
" // javax.transaction.xa.XAException.XAER_DUPID",
" // javax.transaction.xa.XAException.XAER_OUTSIDE \t",
" default:",
" \t\t\t return;",
"\t\t}\t",
" }",
""
],
"header": "@@ -735,9 +730,56 @@ public class NetXAResource implements XAResource {",
"removed": []
},
{
"added": [
" if (conn_.isPhysicalConnClosed()) {"
],
"header": "@@ -745,7 +787,7 @@ public class NetXAResource implements XAResource {",
"removed": [
" if (conn_.isPhysicallyClosed()) {"
]
}
]
}
] |
derby-DERBY-3390-b75cf01c
|
DERBY-3390; preventing ClassCastException and disconnect on SQLException thrown from a user function
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@700948 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/drda/org/apache/derby/impl/drda/DRDAConnThread.java",
"hunks": [
{
"added": [
"\t\t\tsqlerrmc = buildTokenizedSqlerrmc(se);"
],
"header": "@@ -6005,7 +6005,7 @@ class DRDAConnThread extends Thread {",
"removed": [
"\t\t\tsqlerrmc = buildTokenizedSqlerrmc((EmbedSQLException) se);"
]
}
]
}
] |
derby-DERBY-3391-58f22ba5
|
DERBY-3391 Remove dead code that restored isolation level during a compile, since isolation cannot be changed by a compile
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@619981 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/CompilerContextImpl.java",
"hunks": [
{
"added": [],
"header": "@@ -617,22 +617,6 @@ public class CompilerContextImpl extends ContextImpl",
"removed": [
"\t/**",
"\t * @see CompilerContext#setEntryIsolationLevel",
"\t */",
"\tpublic void setEntryIsolationLevel(int isolationLevel)",
"\t{",
"\t\tthis.entryIsolationLevel = isolationLevel;",
"\t}",
"",
"\t/**",
"\t * @see CompilerContext#getScanIsolationLevel",
"\t */",
"\tpublic int getEntryIsolationLevel()",
"\t{",
"\t\treturn entryIsolationLevel;",
"\t}",
""
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/conn/GenericLanguageConnectionContext.java",
"hunks": [
{
"added": [],
"header": "@@ -1914,9 +1914,6 @@ public class GenericLanguageConnectionContext",
"removed": [
"\t\t// Save off the current isolation level on entry so that it gets restored",
"\t\tcc.setEntryIsolationLevel( getCurrentIsolationLevel());",
""
]
},
{
"added": [],
"header": "@@ -1954,9 +1951,6 @@ public class GenericLanguageConnectionContext",
"removed": [
"\t\t// Restore the isolation level at the time of entry to CompilerContext",
"\t\tisolationLevel = cc.getEntryIsolationLevel();",
""
]
}
]
}
] |
derby-DERBY-3397-683c673d
|
DERBY-3397: Derby 10.3.1.4 and 10.3.2.1 break scrollable result sets? Hibernate Query.setFirstResult and/or setMaxResults.
Added two simple regression tests.
Patch file: derby-3397-2a-junit_reg_test.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@650786 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3398-841c8990
|
DERBY-5546 ResultSet#updateBigDecimal on a REAL column does not do underflow checking
Patch derby-5546-2. For both Real and Double, check for underflow. For
Double underflow is currently detected but only because we didn't fix
DERBY-3398 yet, so we introduce the same check now as for Real. Once
DERBY-3398 it will no longer be redundant. The tests are still guarded
by a check for embedded until DERBY-5534 is fixed.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1447996 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/types/SQLDouble.java",
"hunks": [
{
"added": [
"import java.math.BigDecimal;"
],
"header": "@@ -36,6 +36,7 @@ import java.io.ObjectOutput;",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/iapi/types/SQLReal.java",
"hunks": [
{
"added": [
"import java.math.BigDecimal;"
],
"header": "@@ -36,6 +36,7 @@ import java.io.ObjectOutput;",
"removed": []
}
]
}
] |
derby-DERBY-3400-8935ca15
|
DERBY-3400 testgetTypeInfo Fails with ibm16 while running the UpgradeTests
Workaround for IBM JVM issue. Make new arrays for ODBC nullability instead of doing arrayCopy.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@628151 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3401-2f817f3f
|
Add commented out code that exposes the bug described by DERBY-3401. Removing a listener from a pooled connection during logical connection close processing causes other listeners to be ignored.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@619991 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3401-431eec83
|
DERBY-3401 (partial) Fixed ConcurrentModificationException if a
statement event listener adds or removes a listener on the same pooled
connection.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@660959 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/ClientPooledConnection40.java",
"hunks": [
{
"added": [
"import java.util.concurrent.CopyOnWriteArrayList;"
],
"header": "@@ -23,7 +23,7 @@ package org.apache.derby.client;",
"removed": [
"import java.util.ArrayList;"
]
},
{
"added": [
" /**",
" * List of statement event listeners. The list is copied on each write,",
" * ensuring that it can be safely iterated over even if other threads or",
" * the listeners fired in the same thread add or remove listeners.",
" */",
" private final CopyOnWriteArrayList<StatementEventListener>",
" statementEventListeners =",
" new CopyOnWriteArrayList<StatementEventListener>();"
],
"header": "@@ -38,10 +38,14 @@ import org.apache.derby.jdbc.ClientBaseDataSource;",
"removed": [
" /** List of statement event listeners. */",
" //@GuardedBy(\"this\")",
" private final ArrayList<StatementEventListener> statementEventListeners = ",
" new ArrayList<StatementEventListener>();"
]
},
{
"added": [
" public void addStatementEventListener(StatementEventListener listener) {"
],
"header": "@@ -73,7 +77,7 @@ public class ClientPooledConnection40 extends ClientPooledConnection {",
"removed": [
" public synchronized void addStatementEventListener(StatementEventListener listener){"
]
},
{
"added": [
" public void removeStatementEventListener(StatementEventListener listener) {"
],
"header": "@@ -89,7 +93,7 @@ public class ClientPooledConnection40 extends ClientPooledConnection {",
"removed": [
" public synchronized void removeStatementEventListener(StatementEventListener listener){"
]
},
{
"added": [
" public void onStatementClose(PreparedStatement statement) {"
],
"header": "@@ -104,7 +108,7 @@ public class ClientPooledConnection40 extends ClientPooledConnection {",
"removed": [
" public synchronized void onStatementClose(PreparedStatement statement) {"
]
}
]
},
{
"file": "java/client/org/apache/derby/client/ClientXAConnection40.java",
"hunks": [
{
"added": [
"import java.util.concurrent.CopyOnWriteArrayList;"
],
"header": "@@ -23,14 +23,9 @@ package org.apache.derby.client;",
"removed": [
"import java.util.Enumeration;",
"import java.util.Vector;",
"import org.apache.derby.client.am.SqlException;",
"import org.apache.derby.client.net.NetLogWriter;",
"import org.apache.derby.client.net.NetXAConnection;",
"import org.apache.derby.jdbc.ClientDataSource;"
]
},
{
"added": [
" /**",
" * List of statement event listeners. The list is copied on each write,",
" * ensuring that it can be safely iterated over even if other threads or",
" * the listeners fired in the same thread add or remove listeners.",
" */",
" private final CopyOnWriteArrayList<StatementEventListener>",
" statementEventListeners =",
" new CopyOnWriteArrayList<StatementEventListener>();"
],
"header": "@@ -38,10 +33,14 @@ import org.apache.derby.jdbc.ClientXADataSource;",
"removed": [
" //using generics to avoid casting problems",
" protected final Vector<StatementEventListener> statementEventListeners = ",
" new Vector<StatementEventListener>();",
""
]
},
{
"added": [
" statementEventListeners.remove(listener);"
],
"header": "@@ -73,7 +72,7 @@ public class ClientXAConnection40 extends ClientXAConnection {",
"removed": [
" statementEventListeners.removeElement(listener);"
]
},
{
"added": [
" statementEventListeners.add(listener);"
],
"header": "@@ -92,7 +91,7 @@ public class ClientXAConnection40 extends ClientXAConnection {",
"removed": [
" statementEventListeners.addElement(listener);"
]
},
{
"added": [
" for (StatementEventListener l : statementEventListeners) {",
" l.statementClosed(event);"
],
"header": "@@ -103,12 +102,8 @@ public class ClientXAConnection40 extends ClientXAConnection {",
"removed": [
" //synchronized block on statementEventListeners to make it thread",
" //safe",
" synchronized(statementEventListeners) {",
" for (StatementEventListener l : statementEventListeners) {",
" l.statementClosed(event);",
" }"
]
}
]
},
{
"file": "java/engine/org/apache/derby/jdbc/EmbedPooledConnection40.java",
"hunks": [
{
"added": [
"import java.util.concurrent.CopyOnWriteArrayList;"
],
"header": "@@ -21,11 +21,9 @@",
"removed": [
"import java.sql.Connection;",
"import java.util.Enumeration;",
"import java.util.Vector;"
]
},
{
"added": [
"",
" /**",
" * List of statement event listeners. The list is copied on each write,",
" * ensuring that it can be safely iterated over even if other threads or",
" * the listeners fired in the same thread add or remove listeners.",
" */",
" private final CopyOnWriteArrayList<StatementEventListener>",
" statementEventListeners =",
" new CopyOnWriteArrayList<StatementEventListener>();"
],
"header": "@@ -41,11 +39,15 @@ import javax.sql.StatementEventListener;",
"removed": [
" ",
" //using generics to avoid casting problems",
" protected final Vector<StatementEventListener> statementEventListeners =",
" new Vector<StatementEventListener>();",
" "
]
},
{
"added": [
" statementEventListeners.remove(listener);"
],
"header": "@@ -66,7 +68,7 @@ class EmbedPooledConnection40 extends EmbedPooledConnection {",
"removed": [
" statementEventListeners.removeElement(listener);"
]
},
{
"added": [
" statementEventListeners.add(listener);"
],
"header": "@@ -89,7 +91,7 @@ class EmbedPooledConnection40 extends EmbedPooledConnection {",
"removed": [
" statementEventListeners.addElement(listener);"
]
},
{
"added": [
" for (StatementEventListener l : statementEventListeners) {",
" l.statementClosed(event);"
],
"header": "@@ -100,12 +102,8 @@ class EmbedPooledConnection40 extends EmbedPooledConnection {",
"removed": [
" //synchronized block on statementEventListeners to make it thread",
" //safe",
" synchronized(statementEventListeners) {",
" for (StatementEventListener l : statementEventListeners) {",
" l.statementClosed(event);",
" }"
]
}
]
},
{
"file": "java/engine/org/apache/derby/jdbc/EmbedXAConnection40.java",
"hunks": [
{
"added": [
"import java.util.concurrent.CopyOnWriteArrayList;"
],
"header": "@@ -21,9 +21,9 @@",
"removed": [
"import java.util.Vector;"
]
},
{
"added": [
" /**",
" * List of statement event listeners. The list is copied on each write,",
" * ensuring that it can be safely iterated over even if other threads or",
" * the listeners fired in the same thread add or remove listeners.",
" */",
" private final CopyOnWriteArrayList<StatementEventListener>",
" statementEventListeners =",
" new CopyOnWriteArrayList<StatementEventListener>();"
],
"header": "@@ -35,9 +35,14 @@ import org.apache.derby.iapi.jdbc.ResourceAdapter;",
"removed": [
" //using generics to avoid casting problems",
" protected final Vector<StatementEventListener> statementEventListeners =",
" new Vector<StatementEventListener>();"
]
},
{
"added": [
" statementEventListeners.remove(listener);"
],
"header": "@@ -67,7 +72,7 @@ final class EmbedXAConnection40 extends EmbedXAConnection",
"removed": [
" statementEventListeners.removeElement(listener);"
]
},
{
"added": [
" statementEventListeners.add(listener);"
],
"header": "@@ -90,7 +95,7 @@ final class EmbedXAConnection40 extends EmbedXAConnection",
"removed": [
" statementEventListeners.addElement(listener);"
]
},
{
"added": [
" for (StatementEventListener l : statementEventListeners) {",
" l.statementClosed(event);"
],
"header": "@@ -101,12 +106,8 @@ final class EmbedXAConnection40 extends EmbedXAConnection",
"removed": [
" //synchronized block on statementEventListeners to make it thread",
" //safe",
" synchronized(statementEventListeners) {",
" for (StatementEventListener l : statementEventListeners) {",
" l.statementClosed(event);",
" }"
]
}
]
}
] |
derby-DERBY-3401-af3cf0c5
|
DERBY-3401: Removing a ConnectionEventListener from a PooledConnection during its connectionClosed() callback causes other ConnectionEventListener callbacks to be missed
Clone the list of listeners if it is modified while the listeners are executing.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@663641 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/ClientPooledConnection.java",
"hunks": [
{
"added": [
" /** List of {@code ConnectionEventListener}s. Never {@code null}. */",
" private ArrayList listeners_ = new ArrayList();",
"",
" /**",
" * The number of iterators going through the list of connection event",
" * listeners at the current time. Only one thread may be iterating over the",
" * list at any time (because of synchronization), but a single thread may",
" * have multiple iterators if for instance an event listener performs",
" * database calls that trigger a new event.",
" */",
" private int eventIterators;",
""
],
"header": "@@ -46,7 +46,18 @@ public class ClientPooledConnection implements javax.sql.PooledConnection {",
"removed": [
" private ArrayList listeners_ = null;"
]
},
{
"added": [],
"header": "@@ -85,7 +96,6 @@ public class ClientPooledConnection implements javax.sql.PooledConnection {",
"removed": [
" listeners_ = new ArrayList();"
]
},
{
"added": [],
"header": "@@ -138,7 +148,6 @@ public class ClientPooledConnection implements javax.sql.PooledConnection {",
"removed": [
" listeners_ = new ArrayList();"
]
},
{
"added": [
" if (eventIterators > 0) {",
" // DERBY-3401: Someone is iterating over the ArrayList, and since",
" // we were able to synchronize on this, that someone is us. Clone",
" // the list of listeners in order to prevent invalidation of the",
" // iterator.",
" listeners_ = (ArrayList) listeners_.clone();",
" }"
],
"header": "@@ -302,6 +311,13 @@ public class ClientPooledConnection implements javax.sql.PooledConnection {",
"removed": []
},
{
"added": [
" if (eventIterators > 0) {",
" // DERBY-3401: Someone is iterating over the ArrayList, and since",
" // we were able to synchronize on this, that someone is us. Clone",
" // the list of listeners in order to prevent invalidation of the",
" // iterator.",
" listeners_ = (ArrayList) listeners_.clone();",
" }"
],
"header": "@@ -310,6 +326,13 @@ public class ClientPooledConnection implements javax.sql.PooledConnection {",
"removed": []
},
{
"added": [
" fireConnectionEventListeners(null);"
],
"header": "@@ -328,12 +351,7 @@ public class ClientPooledConnection implements javax.sql.PooledConnection {",
"removed": [
" for (Iterator e = listeners_.iterator(); e.hasNext();) {",
" ConnectionEventListener listener =",
" (ConnectionEventListener)e.next();",
" ConnectionEvent event = new ConnectionEvent(this);",
" listener.connectionClosed(event);",
" }"
]
}
]
},
{
"file": "java/engine/org/apache/derby/jdbc/EmbedPooledConnection.java",
"hunks": [
{
"added": [
"import java.util.ArrayList;",
"import java.util.Iterator;"
],
"header": "@@ -43,12 +43,10 @@ import java.sql.Statement;",
"removed": [
"import java.util.Vector;",
"import java.util.Enumeration;",
"import javax.sql.DataSource;",
"import javax.sql.PooledConnection;"
]
},
{
"added": [
"",
" /**",
" * The list of {@code ConnectionEventListener}s. It is initially {@code",
" * null} and will be initialized lazily when the first listener is added.",
" */",
" private ArrayList eventListener;",
"",
" /**",
" * The number of iterators going through the list of connection event",
" * listeners at the current time. Only one thread may be iterating over the",
" * list at any time (because of synchronization), but a single thread may",
" * have multiple iterators if for instance an event listener performs",
" * database calls that trigger a new event.",
" */",
" private int eventIterators;"
],
"header": "@@ -75,8 +73,21 @@ class EmbedPooledConnection implements javax.sql.PooledConnection, BrokeredConne",
"removed": [
" ",
"\tprivate Vector eventListener; // who wants to know I am closed or error"
]
},
{
"added": [
"\t\t\tArrayList tmpEventListener = eventListener;"
],
"header": "@@ -191,7 +202,7 @@ class EmbedPooledConnection implements javax.sql.PooledConnection, BrokeredConne",
"removed": [
"\t\t\tVector tmpEventListener = eventListener;"
]
},
{
"added": [
" if (eventListener == null) {",
" eventListener = new ArrayList();",
" } else if (eventIterators > 0) {",
" // DERBY-3401: Someone is iterating over the ArrayList, and since",
" // we were able to synchronize on this, that someone is us. Clone",
" // the list of listeners in order to prevent invalidation of the",
" // iterator.",
" eventListener = (ArrayList) eventListener.clone();",
" }",
" eventListener.add(listener);"
],
"header": "@@ -271,9 +282,16 @@ class EmbedPooledConnection implements javax.sql.PooledConnection, BrokeredConne",
"removed": [
"\t\tif (eventListener == null)",
"\t\t\teventListener = new Vector();",
"\t\teventListener.addElement(listener);"
]
},
{
"added": [
" if (listener == null || eventListener == null) {",
" }",
" if (eventIterators > 0) {",
" // DERBY-3401: Someone is iterating over the ArrayList, and since",
" // we were able to synchronize on this, that someone is us. Clone",
" // the list of listeners in order to prevent invalidation of the",
" // iterator.",
" eventListener = (ArrayList) eventListener.clone();",
" }",
" eventListener.remove(listener);"
],
"header": "@@ -281,10 +299,17 @@ class EmbedPooledConnection implements javax.sql.PooledConnection, BrokeredConne",
"removed": [
"\t\tif (listener == null)",
"\t\tif (eventListener != null)",
"\t\t\teventListener.removeElement(listener);"
]
},
{
"added": [
" fireConnectionEventListeners(exception);",
" /**",
" * Fire all the {@code ConnectionEventListener}s registered. Callers must",
" * synchronize on {@code this} to prevent others from modifying the list of",
" * listeners.",
" *",
" * @param exception the exception that caused the event, or {@code null} if",
" * it is a close event",
" */",
" private void fireConnectionEventListeners(SQLException exception) {",
" if (eventListener != null && !eventListener.isEmpty()) {",
" ConnectionEvent event = new ConnectionEvent(this, exception);",
" eventIterators++;",
" try {",
" for (Iterator it = eventListener.iterator(); it.hasNext();) {",
" ConnectionEventListener l =",
" (ConnectionEventListener) it.next();",
" if (exception == null) {",
" l.connectionClosed(event);",
" } else {",
" l.connectionErrorOccurred(event);",
" }",
" }",
" } finally {",
" eventIterators--;",
" }",
" }",
" }"
],
"header": "@@ -323,22 +348,36 @@ class EmbedPooledConnection implements javax.sql.PooledConnection, BrokeredConne",
"removed": [
"\t\tif (eventListener != null && eventListener.size() > 0)",
"\t\t{",
"\t\t\tConnectionEvent errorEvent = new ConnectionEvent(this, exception);",
"",
"\t\t\tfor (Enumeration e = eventListener.elements();",
"\t\t\t\t e.hasMoreElements(); )",
"\t\t\t{",
"\t\t\t\tConnectionEventListener l =",
"\t\t\t\t\t(ConnectionEventListener)e.nextElement();",
"\t\t\t\tl.connectionErrorOccurred(errorEvent);",
"\t\t\t}",
"\t\t}",
"",
" "
]
}
]
}
] |
derby-DERBY-3404-5a17d020
|
DERBY-3404: EmbedResultSet.getString() returns wrong value after auto-commit with CLOSE_CURSORS_AT_COMMIT
Made EmbedResultSet.checkIfClose() detect that the language ResultSet
had been closed because of the auto-commit, and throw an SQLException.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@629712 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/jdbc/EmbedResultSet.java",
"hunks": [
{
"added": [],
"header": "@@ -2240,7 +2240,6 @@ public abstract class EmbedResultSet extends ConnectionChild",
"removed": [
"\t//4)Make sure underneath language resultset is not closed"
]
},
{
"added": [],
"header": "@@ -2251,13 +2250,6 @@ public abstract class EmbedResultSet extends ConnectionChild",
"removed": [
" //in case of autocommit on, if there was an exception which caused runtime rollback in this transaction prior to this call,",
" //the rollback code will mark the language resultset closed (it doesn't mark the JDBC ResultSet closed).",
" //That is why alongwith the earlier checkIfClosed call in this method, there is a check for language resultset close as well.",
"",
" //4)Make sure underneath language resultset is not closed",
" if (theResults.isClosed())",
" throw Util.generateCsSQLException(SQLState.LANG_RESULT_SET_NOT_OPEN, methodName);"
]
},
{
"added": [],
"header": "@@ -2273,7 +2265,6 @@ public abstract class EmbedResultSet extends ConnectionChild",
"removed": [
" * 4) Make sure underneath language resultset is not closed"
]
},
{
"added": [],
"header": "@@ -2287,11 +2278,6 @@ public abstract class EmbedResultSet extends ConnectionChild",
"removed": [
"",
" // 4)Make sure underneath language resultset is not closed",
" if (theResults.isClosed()) {",
" throw Util.generateCsSQLException(SQLState.LANG_RESULT_SET_NOT_OPEN, \"insertRow\");",
" }"
]
}
]
}
] |
derby-DERBY-3406-d588984f
|
DERBY-3406: Fix unsynchronized calls to notify
Contributed by V Narayanan
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@628078 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/services/replication/master/AsynchronousLogShipper.java",
"hunks": [
{
"added": [
" if (fi >= FI_HIGH || ",
" (System.currentTimeMillis() - lastShippingTime) > MIN) {",
" synchronized (this) {",
" notify();",
" }"
],
"header": "@@ -313,11 +313,11 @@ public class AsynchronousLogShipper extends Thread implements",
"removed": [
" if (fi >= FI_HIGH) {",
" notify();",
" } else if ((System.currentTimeMillis() - lastShippingTime) > MIN) {",
" // Minimum MIN time between messages unless buffer is almost full",
" notify();"
]
}
]
}
] |
derby-DERBY-3412-7fe51c32
|
DERBY-3306 (+DERBY-3412): jdbc4.StatementEventsTest cannot be run individually in a clean environment.
This commit backs out the previous patch committed for this issue (revision 620480). Manual work was required because J2EEDataSourceTest has been split off DataSourceTest in the mean time.
This patch also fixes StatementEventsTest by setting the createDatabase to create.
Patch file: derby-3306-2a-backout_and_alternative_fix.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@629894 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/JDBCDataSource.java",
"hunks": [
{
"added": [
" * configuration. The getConnection() method will return",
" * a connection identical to TestConfiguration.openDefaultConnection()."
],
"header": "@@ -37,9 +37,8 @@ public class JDBCDataSource {",
"removed": [
" * configuration.",
" * <p>The getConnection() method will return a connection identical to",
" * {@link TestConfiguration#openDefaultConnection}."
]
},
{
"added": [],
"header": "@@ -85,10 +84,6 @@ public class JDBCDataSource {",
"removed": [
" * <p>",
" * If no properties are passed in, defaults are obtained from the",
" * current <code>TestConfiguration</code> and the data source will be",
" * configured to create the specified database if it does not exist."
]
},
{
"added": [],
"header": "@@ -101,10 +96,6 @@ public class JDBCDataSource {",
"removed": [
" * <p>",
" * If no properties are passed in, defaults are obtained from the",
" * current <code>TestConfiguration</code> and the data source will be",
" * configured to create the specified database if it does not exist."
]
},
{
"added": [],
"header": "@@ -132,8 +123,6 @@ public class JDBCDataSource {",
"removed": [
" // By default non-existing databases will be created.",
" beanProperties.put(\"createDatabase\", \"create\");"
]
}
]
}
] |
derby-DERBY-3414-002bf942
|
DERBY-3304 and DERBY-3414
This serves as a test case for both the jira entries above.
Number of code changes for transaction ending time went in as part of DERBY-3304 and this
new test will check those code changes for specific case of rollback inside a java procedure
call.
The test case is currently disabled for network server because the rollback inside the
procedure is not closing all the resultsets and DERBY-3414 is to track this behavior of
network server. Once DERBY-3414 is fixed, we should enable the test for network server.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@627673 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3415-8f89c096
|
DERBY-3415: 'DataSourceSerializationTestjunit.framework.AssertionFailedError' on Windows.
Added closing of the FileInputStream to the test. Without it, the test failed on Windows platforms during tear down, and it is of course good practice to always clean up :)
Patch file: derby-3415-1a.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@627734 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3417-84fa650c
|
DERBY-4175 Instability in some replication tests under load, since tests don't wait long enough for final state or anticipate intermediate states
Patch derby-4175-3 (+ resolved some conflicts arising from commit of DERBY-3417).
It makes three replication tests less sensitive to load by making
them accept intermediate states without failing or wait for longer
before giving up on seeing the final end state of a replication state
change.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@769602 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3417-8c9d41ec
|
DERBY-3417 slave side stop in a client server mode results in SQLState printed without proper error message
Patch derby-3417-2.
A set of replication errors have been made session level (they are),
also having the effect of preformatting the error message strings on
the server, solving this issue. Also removed usage of
org.apache.derby.shared.common.reference.SQLState's strings in the
replication tests to make them self contained.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@769596 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3420-1b0debd1
|
DERBY-3420: Remove obsolete -ca option from ij.
The -ca command-line-option for the ij command is obsolete and no longer
needed, since the ij connection URL can now accept all the necessary
connection attribute information.
This change removes the implementation of the -ca option from the ij code,
and removes the option from the ij "usage" message, and from the test output.
There is a release note attached to DERBY-3420, noting that the option
has been removed, and that users of this option (hopefully there are none)
should instead pass connection attributes in the URL.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@737948 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/tools/org/apache/derby/impl/tools/ij/Main.java",
"hunks": [
{
"added": [],
"header": "@@ -82,7 +82,6 @@ public class Main {",
"removed": [
"\t\tProperties connAttributeDefaults = null;"
]
},
{
"added": [],
"header": "@@ -96,9 +95,6 @@ public class Main {",
"removed": [
"\t\t// get the default connection attributes",
"\t\tconnAttributeDefaults = util.getConnAttributeArg(args);",
""
]
},
{
"added": [
"\t\tme.go(in, out);"
],
"header": "@@ -178,7 +174,7 @@ public class Main {",
"removed": [
"\t\tme.go(in, out, connAttributeDefaults);"
]
}
]
},
{
"file": "java/tools/org/apache/derby/impl/tools/ij/util.java",
"hunks": [
{
"added": [],
"header": "@@ -142,46 +142,6 @@ public final class util implements java.security.PrivilegedAction {",
"removed": [
"\t/**",
"\t\tij is started with \"-ca[r] file OtherArgs\";",
"\t\tthe file contains connection attibute properties ",
"\t\tto pass to getConnection",
"\t\t<p>",
"\t\tgetConnAttributeArg will look at the args and take out a ",
"\t\t\"-ca[r] <file>\" pair and returning the Properties",
"\t\t<p>",
"",
"\t\t@exception IOException thrown if file not found",
"",
"\t\t@param args\tthe argument list to consider.",
"\t\t@return properties in the file",
"\t */",
"\tstatic public Properties getConnAttributeArg(String[] args) ",
"\t\tthrows IOException ",
"\t{",
"\t\tString n;",
"\t\tInputStream in1;",
"\t\tProperties p = new Properties();",
"",
"\t\tif ((n = getArg(\"-ca\", args))!= null){",
"\t\t\tin1 = new FileInputStream(n);",
"\t\t\tin1 = new BufferedInputStream(in1);",
"\t\t}",
"\t\telse if ((n = getArg(\"-car\", args)) != null) {",
"\t\t\tin1 = getResourceAsStream(n);",
"\t\t\tif (in1 == null) throw ijException.resourceNotFound();",
"\t\t}",
"\t\telse",
"\t\t\treturn null;",
"",
"\t\t// Trim off excess whitespace in property file, if any, and",
"\t\t// then load those properties into 'p'.",
"\t\tutil.loadWithTrimmedValues(in1, p);",
"",
"\t\treturn p;",
"\t}",
"",
""
]
},
{
"added": [],
"header": "@@ -270,8 +230,6 @@ public final class util implements java.security.PrivilegedAction {",
"removed": [
"\t\t\t args[ix].equals(\"-ca\") ||",
"\t\t\t args[ix].equals(\"-car\") ||"
]
}
]
},
{
"file": "java/tools/org/apache/derby/impl/tools/ij/utilMain.java",
"hunks": [
{
"added": [],
"header": "@@ -65,7 +65,6 @@ public class utilMain implements java.security.PrivilegedAction {",
"removed": [
"\tprivate Properties connAttributeDefaults;"
]
},
{
"added": [
"\tpublic void go(LocalizedInput[] in, LocalizedOutput out)",
"\t\t\t\t throws ijFatalException"
],
"header": "@@ -182,13 +181,11 @@ public class utilMain implements java.security.PrivilegedAction {",
"removed": [
"\t * @param connAttributeDefaults connection attributes from -ca ij arg",
"\tpublic void go(LocalizedInput[] in, LocalizedOutput out,",
"\t\t\t\t Properties connAttributeDefaults) throws ijFatalException",
"\t\tthis.connAttributeDefaults = connAttributeDefaults;"
]
}
]
}
] |
derby-DERBY-3421-7ad8ff6b
|
DERBY-3421: Remove unused code for caching of connect bytes.
Patch file: derby-3421-1b-removal.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@628679 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/net/NetConnection.java",
"hunks": [
{
"added": [],
"header": "@@ -57,10 +57,6 @@ public class NetConnection extends org.apache.derby.client.am.Connection {",
"removed": [
"",
" // byte[] to save the connect flows for connection reset",
" protected byte[] cachedConnectBytes_ = null;",
" protected boolean wroteConnectFromCache_ = false;"
]
},
{
"added": [
" // NetConfiguration.SECMEC_USRIDPWD",
" if (securityMechanism_ == NetConfiguration.SECMEC_USRIDPWD) {",
" writeAllConnectCommandsChained(NetConfiguration.SECMEC_USRIDPWD,",
" user_,",
" getDeferredResetPassword());",
" }",
" // NetConfiguration.SECMEC_USRIDONL",
" else if (securityMechanism_ == NetConfiguration.SECMEC_USRIDONL) {",
" writeAllConnectCommandsChained(NetConfiguration.SECMEC_USRIDONL,",
" user_,",
" null); //password",
" }",
" // Either NetConfiguration.SECMEC_USRENCPWD,",
" // NetConfiguration.SECMEC_EUSRIDPWD or",
" // NetConfiguration.SECMEC_USRSSBPWD",
" else {",
" if (securityMechanism_ == NetConfiguration.SECMEC_USRSSBPWD)",
" initializeClientSeed();",
" else // SECMEC_USRENCPWD, SECMEC_EUSRIDPWD",
" initializePublicKeyForEncryption();",
"",
" // Set the resetConnectionAtFirstSql_ to false to avoid going in an",
" // infinite loop, since all the flow methods call beginWriteChain which then",
" // calls writeDeferredResetConnection where the check for resetConnectionAtFirstSql_",
" // is done. By setting the resetConnectionAtFirstSql_ to false will avoid calling the",
" // writeDeferredReset method again.",
" resetConnectionAtFirstSql_ = false;",
"",
" if (securityMechanism_ == NetConfiguration.SECMEC_USRSSBPWD)",
" flowSeedExchange(securityMechanism_, sourceSeed_);",
" else // SECMEC_USRENCPWD, SECMEC_EUSRIDPWD",
" flowServerAttributesAndKeyExchange(securityMechanism_, publicKey_);",
"",
" agent_.beginWriteChainOutsideUOW();",
"",
" // Reset the resetConnectionAtFirstSql_ to true since we are done",
" // with the flow method.",
" resetConnectionAtFirstSql_ = true;",
"",
" // NetConfiguration.SECMEC_USRENCPWD",
" if (securityMechanism_ == NetConfiguration.SECMEC_USRENCPWD) {",
" writeSecurityCheckAndAccessRdb(NetConfiguration.SECMEC_USRENCPWD,",
" null, //password",
" null, //encryptedUserid",
" encryptedPasswordForUSRENCPWD(getDeferredResetPassword()));",
" // NetConfiguration.SECMEC_USRSSBPWD",
" else if (securityMechanism_ == NetConfiguration.SECMEC_USRSSBPWD) {",
" writeSecurityCheckAndAccessRdb(NetConfiguration.SECMEC_USRSSBPWD,",
" null,",
" null,",
" passwordSubstituteForUSRSSBPWD(getDeferredResetPassword()));",
" else { // NetConfiguration.SECMEC_EUSRIDPWD",
" writeSecurityCheckAndAccessRdb(NetConfiguration.SECMEC_EUSRIDPWD,",
" null, //user",
" null, //password",
" encryptedUseridForEUSRIDPWD(),",
" encryptedPasswordForEUSRIDPWD(getDeferredResetPassword()));"
],
"header": "@@ -870,108 +866,79 @@ public class NetConnection extends org.apache.derby.client.am.Connection {",
"removed": [
" private void cacheConnectBytes(int beginOffset, int endOffset) {",
" int length = endOffset - beginOffset;",
" cachedConnectBytes_ = new byte[length];",
" netAgent_.netConnectionRequest_.finalizePreviousChainedDss(false);",
" System.arraycopy(netAgent_.netConnectionRequest_.bytes_,",
" beginOffset,",
" cachedConnectBytes_,",
" 0,",
" length);",
" netAgent_.netConnectionRequest_.setDssLengthLocation(netAgent_.netConnectionRequest_.offset_);",
" }",
"",
" if (canUseCachedConnectBytes_ && cachedConnectBytes_ != null &&",
" (securityMechanism_ == NetConfiguration.SECMEC_USRIDPWD ||",
" securityMechanism_ == NetConfiguration.SECMEC_USRIDONL)) {",
" writeDeferredResetFromCache();",
" wroteConnectFromCache_ = true;",
" } else {",
" int beginOffset = netAgent_.netConnectionRequest_.offset_;",
" int endOffset = 0;",
" // NetConfiguration.SECMEC_USRIDPWD",
" if (securityMechanism_ == NetConfiguration.SECMEC_USRIDPWD) {",
" writeAllConnectCommandsChained(NetConfiguration.SECMEC_USRIDPWD,",
" getDeferredResetPassword());",
" endOffset = netAgent_.netConnectionRequest_.offset_;",
" cacheConnectBytes(beginOffset, endOffset);",
" // NetConfiguration.SECMEC_USRIDONL",
" else if (securityMechanism_ == NetConfiguration.SECMEC_USRIDONL) {",
" writeAllConnectCommandsChained(NetConfiguration.SECMEC_USRIDONL,",
" null); //password",
" endOffset = netAgent_.netConnectionRequest_.offset_;",
" cacheConnectBytes(beginOffset, endOffset);",
" // Either NetConfiguration.SECMEC_USRENCPWD,",
" // NetConfiguration.SECMEC_EUSRIDPWD or",
" // NetConfiguration.SECMEC_USRSSBPWD",
" else {",
" if (securityMechanism_ == NetConfiguration.SECMEC_USRSSBPWD)",
" initializeClientSeed();",
" else // SECMEC_USRENCPWD, SECMEC_EUSRIDPWD",
" initializePublicKeyForEncryption();",
"",
" // Set the resetConnectionAtFirstSql_ to false to avoid going in an",
" // infinite loop, since all the flow methods call beginWriteChain which then",
" // calls writeDeferredResetConnection where the check for resetConnectionAtFirstSql_",
" // is done. By setting the resetConnectionAtFirstSql_ to false will avoid calling the",
" // writeDeferredReset method again.",
" resetConnectionAtFirstSql_ = false;",
"",
" if (securityMechanism_ == NetConfiguration.SECMEC_USRSSBPWD)",
" flowSeedExchange(securityMechanism_, sourceSeed_);",
" else // SECMEC_USRENCPWD, SECMEC_EUSRIDPWD",
" flowServerAttributesAndKeyExchange(securityMechanism_, publicKey_);",
"",
" agent_.beginWriteChainOutsideUOW();",
"",
" // Reset the resetConnectionAtFirstSql_ to true since we are done",
" // with the flow method.",
" resetConnectionAtFirstSql_ = true;",
"",
" // NetConfiguration.SECMEC_USRENCPWD",
" if (securityMechanism_ == NetConfiguration.SECMEC_USRENCPWD) {",
" writeSecurityCheckAndAccessRdb(NetConfiguration.SECMEC_USRENCPWD,",
" user_,",
" null, //password",
" null, //encryptedUserid",
" encryptedPasswordForUSRENCPWD(getDeferredResetPassword()));",
" }",
" // NetConfiguration.SECMEC_USRSSBPWD",
" else if (securityMechanism_ == NetConfiguration.SECMEC_USRSSBPWD) {",
" writeSecurityCheckAndAccessRdb(NetConfiguration.SECMEC_USRSSBPWD,",
" user_,",
" null,",
" null,",
" passwordSubstituteForUSRSSBPWD(getDeferredResetPassword()));",
" }",
" else { // NetConfiguration.SECMEC_EUSRIDPWD",
" writeSecurityCheckAndAccessRdb(NetConfiguration.SECMEC_EUSRIDPWD,",
" null, //user",
" null, //password",
" encryptedUseridForEUSRIDPWD(),",
" encryptedPasswordForEUSRIDPWD(getDeferredResetPassword()));",
" }",
" if (wroteConnectFromCache_) {",
" netAgent_.netConnectionReply_.verifyDeferredReset();",
" return;",
" }"
]
}
]
},
{
"file": "java/client/org/apache/derby/client/net/NetConnectionReply.java",
"hunks": [
{
"added": [],
"header": "@@ -57,18 +57,6 @@ public class NetConnectionReply extends Reply",
"removed": [
" void verifyDeferredReset() throws SqlException {",
" readDssHeader();",
" verifyConnectReply(CodePoint.EXCSATRD);",
" readDssHeader();",
" verifyConnectReply(CodePoint.ACCSECRD);",
" readDssHeader();",
" verifyConnectReply(CodePoint.SECCHKRM);",
" readDssHeader();",
" verifyConnectReply(CodePoint.ACCRDBRM);",
" agent_.checkForChainBreakingException_();",
" }",
""
]
}
]
}
] |
derby-DERBY-3422-cd903151
|
DERBY-3422: Embedded returns wrong value for DatabaseMetaData.autoCommitFailureClosesAllResultSets()
Return the correct value (true) and update the test so that it is able
to detect differences between returned value and actual behaviour.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@629395 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3423-7e4dcd98
|
DERBY-3423: Minor cleanup in DatabaseMetaData implementation classes
- removed providesQueryObjectGenerator()
- removed unused variables and imports
- fixed signatures for getSchemas(String,String) and
getClientInfoProperties() to match the signatures in the interface
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@629785 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/am/DatabaseMetaData.java",
"hunks": [
{
"added": [],
"header": "@@ -23,7 +23,6 @@ package org.apache.derby.client.am;",
"removed": [
"import org.apache.derby.jdbc.ClientDataSource;"
]
},
{
"added": [],
"header": "@@ -2577,19 +2576,6 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {",
"removed": [
" /**",
" * Retrieves whether this JDBC driver provides its own",
" * <code>QueryObjectGenerator</code>.",
" *",
" * @return <code>false</code>, since Derby does not provide its",
" * own generator",
" * @exception SQLException if a database access error occurs",
" */",
" public final boolean providesQueryObjectGenerator() throws SQLException {",
" checkForClosedConnection();",
" return false;",
" }",
""
]
},
{
"added": [
" public java.sql.ResultSet getSchemas(String catalog, String schemaPattern)"
],
"header": "@@ -2610,7 +2596,7 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {",
"removed": [
" public ResultSet getSchemas(String catalog, String schemaPattern)"
]
}
]
},
{
"file": "java/client/org/apache/derby/client/net/NetDatabaseMetaData.java",
"hunks": [
{
"added": [],
"header": "@@ -23,13 +23,9 @@ package org.apache.derby.client.net;",
"removed": [
"import org.apache.derby.shared.common.reference.JDBC30Translation;",
"",
" private final NetAgent netAgent_;",
""
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/jdbc/EmbedDatabaseMetaData40.java",
"hunks": [
{
"added": [],
"header": "@@ -21,22 +21,14 @@",
"removed": [
"import java.sql.ResultSet;",
"import java.sql.DatabaseMetaData;",
"import java.sql.PreparedStatement;",
"import org.apache.derby.impl.jdbc.Util;",
"",
" private final String url;",
" ",
" this.url = url;"
]
},
{
"added": [],
"header": "@@ -75,12 +67,7 @@ public class EmbedDatabaseMetaData40 extends EmbedDatabaseMetaData {",
"removed": [
" ",
" public boolean providesQueryObjectGenerator() throws SQLException {",
" return false;",
" }",
" "
]
}
]
}
] |
derby-DERBY-3424-57655605
|
DERBY-3424 (partial) Improve JMXManagementService to spearate registering of mbeans with Derby's management service from registering with the JMX service. This allows the ManagementMBEan to correctly implement the stop and start management service to unregister (from the platform mbean server) Derby's MBean when the stopManagement() is called and then re-register them when startManagement() is called.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@628585 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/services/jmx/JMXManagementService.java",
"hunks": [
{
"added": [
"import java.util.HashMap;",
"import java.util.Map;"
],
"header": "@@ -22,12 +22,11 @@ import java.security.AccessController;",
"removed": [
"import java.util.Collections;",
"import java.util.Set;",
"import javax.management.InstanceNotFoundException;"
]
},
{
"added": [
" * ",
" * An mbean registered with this service remains until it is unregistered.",
" * While registered with this service it may be registered and unregistered",
" * with the jmx service a number of times.",
"public final class JMXManagementService implements ManagementService, ModuleControl {",
" * Platform MBean server, from ManagementFactory.getPlatformMBeanServer().",
" * If not null then this service has registered mbeans with the",
" * plaform MBean server.",
" * If null then this service either has no mbeans registered",
" * or one mbean registered (representing itself).",
" * The set of mbeans registered to this service by",
" * Derby's code. These beans are registered with",
" * the platform mbean server if mbeanServer is not null.",
" private Map<ObjectName,StandardMBean> registeredMbeans;",
" ",
" /**",
" * If this object is registered as a management mbean",
" * then myManagementBean represents its name. This will",
" * be non-null when derby.system.jmx is true.",
" */",
" private ObjectName myManagementBean;",
" ",
" private MBeanServer myManagementServer;"
],
"header": "@@ -40,27 +39,45 @@ import org.apache.derby.iapi.services.jmx.ManagementService;",
"removed": [
"import org.apache.derby.mbeans.Management;",
"public class JMXManagementService implements ManagementService, ModuleControl {",
" * Platfrom MBean server, from ManagementFactory.getPlatformMBeanServer().",
" * The set of mbeans registered by this service.",
" private Set<ObjectName> registeredMbeans;"
]
},
{
"added": [
" ",
" registeredMbeans = new HashMap<ObjectName,StandardMBean>();",
" ",
" if (PropertyUtil.getSystemBoolean(Property.JMX)) {",
" findServer();",
" ",
" myManagementBean = (ObjectName) registerMBean(this,",
" ManagementMBean.class,",
" \"type=Management\");",
" myManagementServer = mbeanServer;",
" }",
" ",
" registerMBean(new Version(Monitor.getMonitor().getEngineVersion()),",
" VersionMBean.class,",
" \"type=Version,jar=derby.jar\");",
" ",
" ",
" // If we are currently not registering any mbeans",
" // then we might still have this registered as",
" // a management mbean. Need to explicitly remove this",
" // using the mbean server that created it, which",
" // possibly could not be the same as the current server.",
" if (mbeanServer == null && myManagementBean != null)",
" {",
" mbeanServer = myManagementServer;",
" unregisterMBean(myManagementBean);",
" mbeanServer = null;",
" }",
" for (ObjectName mbeanName :",
" new HashSet<ObjectName>(registeredMbeans.keySet()))",
" ",
" // registeredMbeans == null indicates service is not active",
" registeredMbeans = null;",
" ",
" myManagementServer = null;"
],
"header": "@@ -77,22 +94,50 @@ public class JMXManagementService implements ManagementService, ModuleControl {",
"removed": [
"",
" if (PropertyUtil.getSystemBoolean(Property.JMX))",
" initialize();",
"",
" if (mbeanServer == null)",
" return;",
" for (ObjectName mbeanName : new HashSet<ObjectName>(registeredMbeans))"
]
},
{
"added": [
" private synchronized void findServer() {"
],
"header": "@@ -103,9 +148,8 @@ public class JMXManagementService implements ManagementService, ModuleControl {",
"removed": [
" private synchronized void initialize() throws StandardException {",
" registeredMbeans = new HashSet<ObjectName>();"
]
},
{
"added": [
" // TODO: just ignoring inability to create or",
" // find the mbean server."
],
"header": "@@ -113,17 +157,10 @@ public class JMXManagementService implements ManagementService, ModuleControl {",
"removed": [
"",
" registerMBean(new Version(Monitor.getMonitor().getEngineVersion()),",
" VersionMBean.class,",
" \"type=Version,jar=derby.jar\");",
" ",
" registerMBean(this,",
" ManagementMBean.class,",
" \"type=Management\");",
" // TODO: just ignoring inability to create the mbean server."
]
},
{
"added": [
" ",
" registeredMbeans.put(beanName, standardMBean);",
" if (mbeanServer != null)",
" jmxRegister(standardMBean, beanName);",
" ",
" return beanName;",
" ",
" } catch (JMException jme) {",
" throw StandardException.plainWrapException(jme);",
" }",
" }",
" ",
" /**",
" * Register an mbean with the platform mbean server.",
" */",
" private void jmxRegister(final StandardMBean standardMBean,",
" final ObjectName beanName) throws JMException",
" {",
" try {",
" AccessController",
" .doPrivileged(new PrivilegedExceptionAction<Object>() {",
" public Object run() throws JMException {",
" mbeanServer.registerMBean(standardMBean, beanName);",
" return null;",
" }",
" });",
" } catch (PrivilegedActionException pae) {",
" throw (JMException) pae.getException();",
" * Unregister an mbean using an object previous returned from registerMBean."
],
"header": "@@ -148,40 +185,48 @@ public class JMXManagementService implements ManagementService, ModuleControl {",
"removed": [
" if (mbeanServer == null)",
" return null;",
"",
" try {",
" AccessController",
" .doPrivileged(new PrivilegedExceptionAction<Object>() {",
" public Object run() throws JMException {",
" mbeanServer.registerMBean(standardMBean, beanName);",
" return null;",
" }",
" });",
" ",
" registeredMbeans.add(beanName);",
" return beanName;",
" } catch (PrivilegedActionException pae) {",
" throw (JMException) pae.getException();",
" }",
" } catch (JMException jme) {",
" throw StandardException.plainWrapException(jme);",
" * Unregister an mbean using an object previous returned",
" * from registerMBean."
]
},
{
"added": [
" * Unregisters an mbean from this service and JMX plaform server",
" if (registeredMbeans.remove(mbeanName) == null)",
" return;",
" ",
" if (mbeanServer == null)",
" ",
" jmxUnregister(mbeanName);",
" }",
" ",
" /**",
" * Unregister an mbean from the JMX plaform server",
" * but leave it registered to this service. This",
" * is so that if jmx is reenabled we can reestablish",
" * all vaid mbeans (that are still registered with this service).",
" * @param mbeanName",
" */",
" private void jmxUnregister(final ObjectName mbeanName) {"
],
"header": "@@ -191,13 +236,28 @@ public class JMXManagementService implements ManagementService, ModuleControl {",
"removed": [
" * Unregisters an mbean that was registered by this service.",
" if (!registeredMbeans.remove(mbeanName))"
]
},
{
"added": [
" ",
" //Has this service been shut down?",
" if (registeredMbeans == null)",
" return;",
" ",
" // Already active?",
" if (isManagementActive())",
" return;",
" ",
" findServer();",
" ",
" // If we can't find the server then we can't register.",
" if (mbeanServer == null)",
" return;",
" ",
" for (ObjectName mbeanName : registeredMbeans.keySet())",
" {",
" // If we registered this as a management bean",
" // then leave it registered to allow the mbeans",
" // to be re-registered with JMX",
" if (mbeanName.equals(myManagementBean))",
" continue;",
" ",
" try {",
" jmxRegister(registeredMbeans.get(mbeanName), mbeanName);",
" } catch (JMException e) {",
" // TODO - what to do here?",
" }",
" }",
" ",
" // Has this service been shut down?",
" if (registeredMbeans == null)",
" return;",
" ",
" if (isManagementActive()) {",
" for (ObjectName mbeanName : registeredMbeans.keySet())",
" {",
" // If we registered this as a management bean",
" // then leave it registered to allow the mbeans",
" // to be re-registered with JMX",
" if (mbeanName.equals(myManagementBean))",
" continue;",
" jmxUnregister(mbeanName);",
" }",
" mbeanServer = null;",
" }"
],
"header": "@@ -228,10 +288,54 @@ public class JMXManagementService implements ManagementService, ModuleControl {",
"removed": [
" // TODO:",
" // TODO:"
]
}
]
}
] |
derby-DERBY-3424-b69d62b9
|
DERBY-3424 Change getSystemModule() to return null if the monitor is not booted, rather than a NullPointerException
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@632398 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/services/monitor/Monitor.java",
"hunks": [
{
"added": [
" /**",
" * Return a system module. If it cannot be found or the monitor is",
" * not running then null is returned.",
" */",
" public static Object getSystemModule(String factoryInterface)",
" {",
" ModuleFactory monitor = getMonitor();",
" if (monitor == null)",
" return null;",
" ",
"\t\tObject module = monitor.findModule((Object) null,"
],
"header": "@@ -387,9 +387,17 @@ public class Monitor {",
"removed": [
"\tpublic static Object getSystemModule(String factoryInterface)",
"\t{",
"\t\tObject module = getMonitor().findModule((Object) null,"
]
}
]
}
] |
derby-DERBY-3424-e6a963f7
|
DERBY-3424 DERBY-3385 Add test for the ManagementMBean when Derby is not running.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@633643 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/services/jmx/JMXManagementService.java",
"hunks": [
{
"added": [
" systemIdentifier = null;"
],
"header": "@@ -150,6 +150,7 @@ public final class JMXManagementService implements ManagementService, ModuleCont",
"removed": []
}
]
}
] |
derby-DERBY-3424-f8bc0199
|
DERBY-3424 (partial) Add initial apis and bean implementations for a management
mbean to control Derby's JMX behaviour. Start and stop the management are
present as operations but currently do nothing. Also fix a bug in DERBY-1387
where shutting down Derby would fail with jmx when multiple mbeans were registered.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@628142 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/services/jmx/ManagementService.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.reference.Module;",
"import org.apache.derby.mbeans.ManagementMBean;"
],
"header": "@@ -18,6 +18,8 @@",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/services/jmx/JMXManagementService.java",
"hunks": [
{
"added": [
"import java.util.Collections;"
],
"header": "@@ -22,6 +22,7 @@ import java.security.AccessController;",
"removed": []
},
{
"added": [
"import org.apache.derby.mbeans.Management;",
"import org.apache.derby.mbeans.ManagementMBean;"
],
"header": "@@ -39,6 +40,8 @@ import org.apache.derby.iapi.services.jmx.ManagementService;",
"removed": []
},
{
"added": [
" "
],
"header": "@@ -53,7 +56,7 @@ public class JMXManagementService implements ManagementService, ModuleControl {",
"removed": [
" "
]
},
{
"added": [
"",
" // Need a copy of registeredMbeans since unregisterMBean will remove",
" // items from registeredMbeans and thus invalidate any iterator",
" // on it directly.",
" for (ObjectName mbeanName : new HashSet<ObjectName>(registeredMbeans))",
" ",
" mbeanServer = null;"
],
"header": "@@ -82,9 +85,14 @@ public class JMXManagementService implements ManagementService, ModuleControl {",
"removed": [
" ",
" for (ObjectName mbeanName : registeredMbeans)"
]
},
{
"added": [
" registerMBean(this,",
" ManagementMBean.class,",
" \"type=Management\");",
" "
],
"header": "@@ -110,6 +118,10 @@ public class JMXManagementService implements ManagementService, ModuleControl {",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/services/monitor/BaseMonitor.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.reference.Module;"
],
"header": "@@ -44,6 +44,7 @@ import org.apache.derby.iapi.services.sanity.SanityManager;",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/jdbc/InternalDriver.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.reference.Module;"
],
"header": "@@ -23,6 +23,7 @@",
"removed": []
},
{
"added": [
" Monitor.getSystemModule(Module.JMX)).registerMBean("
],
"header": "@@ -92,7 +93,7 @@ public abstract class InternalDriver implements ModuleControl {",
"removed": [
" Monitor.getSystemModule(ManagementService.MODULE)).registerMBean("
]
}
]
},
{
"file": "java/engine/org/apache/derby/mbeans/ManagementMBean.java",
"hunks": [
{
"added": [
"/*",
"",
" Derby - Class org.apache.derby.mbeans.ManagementMBean",
"",
" Licensed to the Apache Software Foundation (ASF) under one or more",
" contributor license agreements. See the NOTICE file distributed with",
" this work for additional information regarding copyright ownership.",
" The ASF licenses this file to you under the Apache License, Version 2.0",
" (the \"License\"); you may not use this file except in compliance with",
" the License. You may obtain a copy of the License at",
"",
" http://www.apache.org/licenses/LICENSE-2.0",
"",
" Unless required by applicable law or agreed to in writing, software",
" distributed under the License is distributed on an \"AS IS\" BASIS,",
" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
" See the License for the specific language governing permissions and",
" limitations under the License.",
"",
"*/",
"",
"package org.apache.derby.mbeans;",
"",
"/**",
" * JMX MBean inteface to control visibility of Derby's MBeans.",
" */",
"public interface ManagementMBean {",
" ",
" /**",
" * Is Derby's JMX management active. If active then Derby",
" * has registered MBeans relevant to its current state.",
" * @return true Derby has registered beans, false Derby has not",
" * registered any beans.",
" */",
" public boolean isManagementActive();",
" ",
" /**",
" * Inform Derby to start its JMX management by registering",
" * MBeans relevant to its current state. If Derby is not",
" * booted then no action is taken.",
" */",
" public void startManagement();",
" ",
" /**",
" * Inform Derby to stop its JMX management by unregistering",
" * its MBeans. If Derby is not booted then no action is taken.",
" */",
" public void stopManagement();",
"}"
],
"header": "@@ -0,0 +1,49 @@",
"removed": []
}
]
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.