id
stringlengths 22
25
| commit_message
stringlengths 137
6.96k
| diffs
listlengths 0
63
|
|---|---|---|
derby-DERBY-3151-04353ef8
|
DERBY-3151: Reduce dependency on NetworkServerControl in TestConfiguration.
Added a NetworkServerControl wrapper using reflection to use the class and invoke its methods.
The purpose of the change is to be able to run JUnit tests without having derbynet.jar (and derbyrun.jar) on the classpath.
Patch file: derby-3151-4b-wrapper_with_reflection.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@669808 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/TestConfiguration.java",
"hunks": [
{
"added": [],
"header": "@@ -31,8 +31,6 @@ import java.util.Properties;",
"removed": [
"import org.apache.derby.drda.NetworkServerControl;",
""
]
},
{
"added": [
" NetworkServerControlWrapper networkServer =",
" new NetworkServerControlWrapper();"
],
"header": "@@ -1387,8 +1385,8 @@ public class TestConfiguration {",
"removed": [
" NetworkServerControl networkServer =",
" NetworkServerTestSetup.getNetworkServerControl();"
]
},
{
"added": [
" NetworkServerControlWrapper networkServer =",
" new NetworkServerControlWrapper();",
""
],
"header": "@@ -1409,9 +1407,9 @@ public class TestConfiguration {",
"removed": [
" NetworkServerControl networkServer =",
" NetworkServerTestSetup.getNetworkServerControl();",
"\t "
]
}
]
}
] |
derby-DERBY-3155-03170a55
|
DERBY-3155: Add a test for MERGE statements which sometimes put nulls into generated columns and sometimes don't; commit derby-3155-41-aa-nullGeneratedColumns.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1576710 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3155-0ae3b6de
|
DERBY-3155: Simplify processing of then rows for the DELETE actions of MERGE statements; commit derby-3155-50-aa-revampDeleteThenRows.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1579937 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MatchingClauseNode.java",
"hunks": [
{
"added": [],
"header": "@@ -87,7 +87,6 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" private int[] _deleteColumnOffsets;"
]
},
{
"added": [],
"header": "@@ -775,33 +774,6 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" /**",
" * <p>",
" * Calculate the 1-based offsets which define the \"then\" rows which will be buffered up",
" * for a DELETE action at run-time. The rows are constructed",
" * from the columns in the SELECT list of the driving left joins. This method",
" * calculates an array of offsets into the SELECT list. The columns at those",
" * offsets will form the row which is buffered up for the DELETE",
" * action.",
" * </p>",
" */",
" private void bindDeleteThenColumns( ResultColumnList selectList )",
" throws StandardException",
" {",
" int thenCount = _thenColumns.size();",
" int selectCount = selectList.size();",
" ",
" _deleteColumnOffsets = new int[ thenCount ];",
"",
" for ( int bidx = 0; bidx < thenCount; bidx++ )",
" {",
" ResultColumn thenRC = _thenColumns.elementAt( bidx );",
" ValueNode thenExpression = thenRC.getExpression();",
"",
" _deleteColumnOffsets[ bidx ] = getSelectListOffset( selectList, thenExpression );",
" }",
" }",
""
]
},
{
"added": [],
"header": "@@ -1096,100 +1068,6 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" ////////////////////////",
" //",
" // BIND THE THEN ROW",
" //",
" ////////////////////////",
"",
" /**",
" * <p>",
" * Bind the row which will go into the temporary table at run-time.",
" * </p>",
" */",
" void bindThenColumns( ResultColumnList selectList )",
" throws StandardException",
" {",
" if ( isDeleteClause() ) { bindDeleteThenColumns( selectList ); }",
" }",
"",
" /**",
" * <p>",
" * Find a column reference in the SELECT list of the driving left join",
" * and return its 1-based offset into that list. Returns -1 if the column",
" * can't be found.",
" * </p>",
" */",
" private int getSelectListOffset( ResultColumnList selectList, ValueNode thenExpression )",
" throws StandardException",
" {",
" int selectCount = selectList.size();",
"",
" if ( thenExpression instanceof ColumnReference )",
" {",
" ColumnReference thenCR = (ColumnReference) thenExpression;",
" String thenCRName = thenCR.getColumnName();",
" int thenCRMergeTableID = getMergeTableID( thenCR );",
"",
" // loop through the SELECT list to find this column reference",
" for ( int sidx = 0; sidx < selectCount; sidx++ )",
" {",
" ResultColumn selectRC = selectList.elementAt( sidx );",
" ValueNode selectExpression = selectRC.getExpression();",
" ColumnReference selectCR = selectExpression instanceof ColumnReference ?",
" (ColumnReference) selectExpression : null;",
"",
" if ( selectCR != null )",
" {",
" if (",
" ( getMergeTableID( selectCR ) == thenCRMergeTableID) &&",
" thenCRName.equals( selectCR.getColumnName() )",
" )",
" {",
" return sidx + 1;",
" }",
" }",
" }",
" ",
" if (SanityManager.DEBUG)",
" {",
" SanityManager.THROWASSERT",
" (",
" \"Can't find select list column corresponding to \" + thenCR.getSQLColumnName() +",
" \" with merge table id = \" + thenCRMergeTableID",
" );",
" }",
" }",
" else if ( thenExpression instanceof CurrentRowLocationNode )",
" {",
" //",
" // There is only one RowLocation in the SELECT list, the row location for the",
" // tuple from the target table. The RowLocation is always the last column in",
" // the SELECT list.",
" //",
" return selectCount;",
" }",
"",
" return -1;",
" }",
"",
" /** Find the MERGE table id of the indicated column */",
" private int getMergeTableID( ColumnReference cr )",
" {",
" int mergeTableID = cr.getMergeTableID();",
"",
" if (SanityManager.DEBUG)",
" {",
" SanityManager.ASSERT",
" (",
" ( (mergeTableID == ColumnReference.MERGE_SOURCE) || (mergeTableID == ColumnReference.MERGE_TARGET) ),",
" \"Column \" + cr.getSQLColumnName() + \" has illegal MERGE table id: \" + mergeTableID",
" );",
" }",
"",
" return mergeTableID;",
" }",
""
]
},
{
"added": [],
"header": "@@ -1307,7 +1185,6 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" _deleteColumnOffsets,"
]
},
{
"added": [
" generateInsertUpdateRow( acb, selectList, generatedScan, hojn );"
],
"header": "@@ -1353,7 +1230,7 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" if ( isInsertClause() || isUpdateClause() ) { generateInsertUpdateRow( acb, selectList, generatedScan, hojn ); }"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/GenericConstantActionFactory.java",
"hunks": [
{
"added": [],
"header": "@@ -1155,7 +1155,6 @@ public class GenericConstantActionFactory",
"removed": [
" int[] thenColumns,"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/MatchingClauseConstantAction.java",
"hunks": [
{
"added": [],
"header": "@@ -74,7 +74,6 @@ public class MatchingClauseConstantAction implements ConstantAction, Formatable",
"removed": [
" private int[] _deleteColumnOffsets;"
]
},
{
"added": [
" * @param rowMakingMethodName Name of the method which populates the \"then\" row with expressions from the driving left join."
],
"header": "@@ -100,7 +99,7 @@ public class MatchingClauseConstantAction implements ConstantAction, Formatable",
"removed": [
" * @param thenColumns Column positions (1-based) from the driving left join which are needed to execute the THEN clause."
]
},
{
"added": [],
"header": "@@ -111,7 +110,6 @@ public class MatchingClauseConstantAction implements ConstantAction, Formatable",
"removed": [
" int[] thenColumns,"
]
},
{
"added": [],
"header": "@@ -121,7 +119,6 @@ public class MatchingClauseConstantAction implements ConstantAction, Formatable",
"removed": [
" _deleteColumnOffsets = ArrayUtil.copy( thenColumns );"
]
},
{
"added": [
" ExecRow thenRow = bufferThenRow( activation );"
],
"header": "@@ -248,18 +245,7 @@ public class MatchingClauseConstantAction implements ConstantAction, Formatable",
"removed": [
" ExecRow thenRow;",
"",
" switch( _clauseType )",
" {",
" case WHEN_MATCHED_THEN_DELETE:",
" thenRow = bufferThenRowForDelete( activation, selectRow );",
" break;",
"",
" default:",
" thenRow = bufferThenRow( activation );",
" break;",
" }"
]
},
{
"added": [
" // CONSTRUCT ROWS TO PUT INTO THE TEMPORARY TABLE"
],
"header": "@@ -285,35 +271,10 @@ public class MatchingClauseConstantAction implements ConstantAction, Formatable",
"removed": [
" // MINIONS",
" /**",
" * <p>",
" * Construct and buffer a row for the DELETE",
" * action corresponding to this MATCHED clause. The buffered row",
" * is built from columns in the passed-in row. The passed-in row is the SELECT list",
" * of the MERGE statement's driving left join.",
" * </p>",
" */",
" private ExecRow bufferThenRowForDelete",
" (",
" Activation activation,",
" ExecRow selectRow",
" )",
" throws StandardException",
" {",
" int thenRowLength = _thenColumnSignature.getColumnCount();",
" ValueRow thenRow = new ValueRow( thenRowLength );",
" for ( int i = 0; i < thenRowLength; i++ )",
" {",
" thenRow.setColumn( i + 1, selectRow.getColumn( _deleteColumnOffsets[ i ] ) );",
" }",
"",
" return thenRow;",
" }",
""
]
},
{
"added": [],
"header": "@@ -370,7 +331,6 @@ public class MatchingClauseConstantAction implements ConstantAction, Formatable",
"removed": [
" _deleteColumnOffsets = ArrayUtil.readIntArray( in );"
]
},
{
"added": [],
"header": "@@ -392,7 +352,6 @@ public class MatchingClauseConstantAction implements ConstantAction, Formatable",
"removed": [
" ArrayUtil.writeIntArray( out, _deleteColumnOffsets );"
]
}
]
}
] |
derby-DERBY-3155-1484b835
|
DERBY-3155: Forbid subqueries in the WHEN [ NOT ] MATCHED clauses of MERGE statements; commit derby-3155-19-aa-forbidSubqueriesInMatchedClauses.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1561671 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MatchingClauseNode.java",
"hunks": [
{
"added": [
" //",
" // Although the SQL Standard allows subqueries in the WHEN [ NOT ] MATCHED clauses,",
" // we don't support them yet. That is because code-generation for those clauses breaks",
" // if they contain subqueries. That, in turn, is because we don't completely optimize those",
" // clauses. If we improve Derby so that we do completely optimize the WHEN [ NOT ] MATCHED clauses,",
" // then we can consider enabling subqueries in them.",
" //",
" forbidSubqueries();",
""
],
"header": "@@ -248,6 +248,15 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": []
}
]
}
] |
derby-DERBY-3155-1705596a
|
DERBY-3155: Propagate RowLocations out of index probes as necessary for the driving left join of a MERGE statement; commits derby-3155-26-aa-copyRowLocationForIndexScans.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1569396 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/sql/execute/ResultSetFactory.java",
"hunks": [
{
"added": [
"\t\t@param baseColumnCount\t\tNumber of columns in the base table"
],
"header": "@@ -1086,6 +1086,7 @@ public interface ResultSetFactory {",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/GenericResultSetFactory.java",
"hunks": [
{
"added": [
"\t\t\t\t\t\t\t\tdouble optimizerEstimatedCost,",
"\t\t\t\t\t\t\t\tint baseColumnCount )"
],
"header": "@@ -829,7 +829,8 @@ public class GenericResultSetFactory implements ResultSetFactory",
"removed": [
"\t\t\t\t\t\t\t\tdouble optimizerEstimatedCost)"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/IndexRowToBaseRowResultSet.java",
"hunks": [
{
"added": [
" //",
" // For managing row locations which are returned for the left join",
" // which drives the execution of the MERGE statement.",
" //",
" private int _baseColumnCount;",
" private boolean _includeRowLocation;",
"\tprivate FormatableBitSet _heapColsWithoutRowLocation;",
""
],
"header": "@@ -79,6 +79,14 @@ class IndexRowToBaseRowResultSet extends NoPutResultSetImpl",
"removed": []
},
{
"added": [
"\t\t\t\t\tdouble optimizerEstimatedCost,",
"\t\t\t\t\tint baseColumnCount) "
],
"header": "@@ -97,7 +105,8 @@ class IndexRowToBaseRowResultSet extends NoPutResultSetImpl",
"removed": [
"\t\t\t\t\tdouble optimizerEstimatedCost) "
]
},
{
"added": [
" _baseColumnCount = baseColumnCount;"
],
"header": "@@ -111,6 +120,7 @@ class IndexRowToBaseRowResultSet extends NoPutResultSetImpl",
"removed": []
},
{
"added": [
"",
" //",
" // The row location column may be needed by the left join which drives",
" // the execution of the MERGE statement.",
" //",
" _includeRowLocation = (_baseColumnCount < accessedHeapCols.getLength());",
" if ( _includeRowLocation )",
" {",
" _heapColsWithoutRowLocation = (FormatableBitSet) accessedHeapCols.clone();",
" _heapColsWithoutRowLocation.clear( accessedHeapCols.getLength() - 1 );",
" }",
""
],
"header": "@@ -172,6 +182,18 @@ class IndexRowToBaseRowResultSet extends NoPutResultSetImpl",
"removed": []
},
{
"added": [
" baseCC.fetch",
" (",
" baseRowLocation,",
" rowArray,",
" _includeRowLocation ? _heapColsWithoutRowLocation : accessedHeapCols",
" );"
],
"header": "@@ -338,8 +360,12 @@ class IndexRowToBaseRowResultSet extends NoPutResultSetImpl",
"removed": [
" baseCC.fetch(",
" baseRowLocation, rowArray, accessedHeapCols);"
]
}
]
}
] |
derby-DERBY-3155-18792e22
|
DERBY-3155: Add privilege checks for the INSERT actions of MERGE statements; tests passed cleanly on derby-3155-33-ab-insertPrivs.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1574131 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MatchingClauseNode.java",
"hunks": [
{
"added": [
"",
" bindExpressions( selectList, fullFromList );"
],
"header": "@@ -809,7 +809,8 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" bindExpressions( selectList, fullFromList );"
]
},
{
"added": [
" //",
" // Don't add USAGE privilege on user-defined types just because we're",
" // building the THEN columns.",
" //",
" boolean wasSkippingTypePrivileges = getCompilerContext().skipTypePrivileges( true );"
],
"header": "@@ -943,6 +944,11 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": []
},
{
"added": [
" getCompilerContext().skipTypePrivileges( wasSkippingTypePrivileges );"
],
"header": "@@ -1043,6 +1049,7 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": []
},
{
"added": [
"",
" boolean wasSkippingTypePrivileges = cc.skipTypePrivileges( true );",
" cc.setReliability( previousReliability | CompilerContext.SQL_IN_ROUTINES_ILLEGAL );"
],
"header": "@@ -1081,10 +1088,11 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" cc.setReliability( previousReliability | CompilerContext.SQL_IN_ROUTINES_ILLEGAL );",
" "
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MergeNode.java",
"hunks": [
{
"added": [],
"header": "@@ -598,9 +598,6 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" // now add USAGE priv on referenced types",
" addUDTUsagePriv( getValueNodes( _searchCondition ) );",
""
]
},
{
"added": [
"",
" // add USAGE privilege on CASTs to user-defined types",
" for ( CastNode value : getCastNodes( _searchCondition ) )",
" {",
" addUDTUsagePriv( value );",
" }"
],
"header": "@@ -612,6 +609,12 @@ public final class MergeNode extends DMLModStatementNode",
"removed": []
},
{
"added": [
" /** Get a list of CastNodes in an expression */",
" private List<CastNode> getCastNodes( QueryTreeNode expression )",
" CollectNodesVisitor<CastNode> getCNs =",
" new CollectNodesVisitor<CastNode>(CastNode.class);",
" expression.accept(getCNs);",
" return getCNs.getList();"
],
"header": "@@ -709,16 +712,16 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" /** Get a list of ValueNodes in an expression */",
" private List<ValueNode> getValueNodes( QueryTreeNode expression )",
" CollectNodesVisitor<ValueNode> getVNs =",
" new CollectNodesVisitor<ValueNode>(ValueNode.class);",
" expression.accept(getVNs);",
" return getVNs.getList();"
]
}
]
}
] |
derby-DERBY-3155-1a3e2c0c
|
DERBY-3155: Run all MERGE tests with and without collations; commit derby-3155-47-aa-collations.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1579040 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3155-1a5b33ed
|
DERBY-3155: Add support for ? parameters to MERGE statements; commit derby-3155-24-aa-supportParameters.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1566673 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3155-1cd5dd1c
|
DERBY-3155: Improve support for correlation names for the source tables of MERGE statements; tests passed cleanly for me on derby-3155-10-aa-correlationNames.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1548298 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MatchingClauseNode.java",
"hunks": [
{
"added": [
" FromList fromList = fullFromList;",
"",
" //",
" // For an INSERT action, the WHEN NOT MATCHED refinement can only",
" // mention columns in the source table.",
" //",
" if ( isInsertClause() )",
" {",
" fromList = new FromList( getOptimizerFactory().doJoinOrderOptimization(), getContextManager() );",
" fromList.addElement( fullFromList.elementAt( MergeNode.SOURCE_TABLE_INDEX ) );",
" }",
"",
" mergeNode.bindExpression( _matchingRefinement, fromList );"
],
"header": "@@ -229,7 +229,19 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" mergeNode.bindExpression( _matchingRefinement, fullFromList );"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MergeNode.java",
"hunks": [
{
"added": [
" public static final int SOURCE_TABLE_INDEX = 0;",
" public static final int TARGET_TABLE_INDEX = 1;",
"\tprivate static final String TARGET_ROW_LOCATION_NAME = \"###TargetRowLocation\";"
],
"header": "@@ -130,10 +130,10 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" private static final int SOURCE_TABLE_INDEX = 0;",
" private static final int TARGET_TABLE_INDEX = 1;",
"\tprivate static final String TARGET_ROW_LOCATION_NAME = \"###TargetRowLocation\";"
]
},
{
"added": [
" // Replace all references to correlation names with the actual",
" ",
" replaceCorrelationName( _targetTable.correlationName, _targetTable.tableName );",
" _targetTable.correlationName = null;",
" if ( _sourceTable instanceof FromBaseTable )",
" {",
" TableName sourceTableName = ((FromBaseTable) _sourceTable).tableName;",
" replaceCorrelationName( _sourceTable.correlationName, sourceTableName );",
" _sourceTable.correlationName = null;",
" //",
" // Bind the WHEN [ NOT ] MATCHED clauses.",
" //"
],
"header": "@@ -214,38 +214,27 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" // Replace all references to a target correlation name with the actual",
" if ( _targetTable.correlationName != null )",
" {",
" TableName targetTableName = _targetTable.tableName;",
" String correlationName = _targetTable.correlationName;",
" ",
" replaceCorrelationName",
" (",
" correlationName,",
" targetTableName,",
" _searchCondition",
" );",
" ",
" for ( MatchingClauseNode mcn : _matchingClauses )",
" {",
" mcn.replaceCorrelationName",
" (",
" this,",
" correlationName,",
" targetTableName",
" );",
" }",
" _targetTable.correlationName = null;"
]
}
]
}
] |
derby-DERBY-3155-273d23fd
|
DERBY-3155: Add tests for largish lobs in MERGE statements; commit derby-3155-40-aa-bigLobs.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1576383 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3155-2945dbb8
|
DERBY-3155: Allow system and temp tables in MERGE statements; commit derby-3155-13-aa-allowSystemAndTempTables.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1558912 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MergeNode.java",
"hunks": [
{
"added": [
" switch( desc.getTableType() )",
" {",
" case TableDescriptor.BASE_TABLE_TYPE:",
" case TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE:",
" return true;",
"",
" default:",
" return false;",
" }"
],
"header": "@@ -520,7 +520,15 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" return ( desc.getTableType() == TableDescriptor.BASE_TABLE_TYPE );"
]
}
]
}
] |
derby-DERBY-3155-2d835fe5
|
DERBY-3155: Add test for MERGE statement which fires triggers involving generated columns; commit derby-3155-42-aa-triggersAndGeneratedColumns.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1576893 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3155-2f8e6fbd
|
DERBY-3155: Revamp how columns referenced by WHEN [ NOT ] MATCHED clauses are linked to the columns coming back from the driving left join of a MERGE statement; commit derby-3155-20-aa-reworkColumnMatching.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1564874 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/ColumnReference.java",
"hunks": [
{
"added": [
" // For associating columns with the SOURCE and TARGET tables of MERGE statements.",
" public static final int MERGE_UNKNOWN = 0;",
" public static final int MERGE_SOURCE = MERGE_UNKNOWN + 1;",
" public static final int MERGE_TARGET = MERGE_SOURCE + 1;",
" "
],
"header": "@@ -42,6 +42,11 @@ import org.apache.derby.iapi.util.JBitSet;",
"removed": []
},
{
"added": [
" /** Columns mentioned by MERGE statements need to be associated",
" * the SOURCE or TARGET table */",
" private int _mergeTableID = MERGE_UNKNOWN;",
""
],
"header": "@@ -104,6 +109,10 @@ public class ColumnReference extends ValueNode",
"removed": []
},
{
"added": [
" if ( oldCR._mergeTableID != MERGE_UNKNOWN )",
" {",
" setMergeTableID( oldCR.getMergeTableID() );",
" }"
],
"header": "@@ -354,6 +363,10 @@ public class ColumnReference extends ValueNode",
"removed": []
},
{
"added": [
" matchingRC = fromList.bindColumnReference(this);"
],
"header": "@@ -391,7 +404,7 @@ public class ColumnReference extends ValueNode",
"removed": [
"\t\tmatchingRC = fromList.bindColumnReference(this);"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MatchingClauseNode.java",
"hunks": [
{
"added": [],
"header": "@@ -187,57 +187,6 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" /**",
" * <p>",
" * Replace references to the correlation name with the underlying table name",
" * in all ColumnReferences under all expressions. This replacement is",
" * done before the ColumnReferences are bound.",
" * </p>",
" */",
" public void replaceCorrelationName",
" (",
" MergeNode parent,",
" String correlationName,",
" TableName newTableName",
" )",
" throws StandardException",
" {",
" parent.replaceCorrelationName( correlationName, newTableName, _matchingRefinement );",
" replaceCorrelationNameInSetClauses( parent, correlationName, newTableName );",
" parent.replaceCorrelationName( correlationName, newTableName, _insertColumns );",
" parent.replaceCorrelationName( correlationName, newTableName, _insertValues );",
" }",
" ",
" /**",
" * <p>",
" * Replace references to the correlation name with the underlying table name",
" * in the SET clauses of WHEN MATCHED ... THEN UPDATE clauses. This replacement is",
" * done before the ColumnReferences are bound.",
" * </p>",
" */",
" public void replaceCorrelationNameInSetClauses",
" (",
" MergeNode parent,",
" String correlationName,",
" TableName newTableName",
" )",
" throws StandardException",
" {",
" if ( _updateColumns == null ) { return; }",
" ",
" // this handles the right side of the SET clauses",
" parent.replaceCorrelationName( correlationName, newTableName, _updateColumns );",
"",
" // we have to hand-process the left side because the Visitor",
" // logic for ResultColumns does not process the ColumnReference",
" for ( int i = 0; i < _updateColumns.size(); i++ )",
" {",
" ResultColumn rc = _updateColumns.elementAt( i );",
"",
" parent.replaceCorrelationName( correlationName, newTableName, rc.getReference() );",
" }",
" }",
" "
]
},
{
"added": [
" mergeNode.getColumnsInExpression( drivingColumnMap, _matchingRefinement, ColumnReference.MERGE_UNKNOWN );"
],
"header": "@@ -301,7 +250,7 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" mergeNode.getColumnsInExpression( drivingColumnMap, _matchingRefinement );"
]
},
{
"added": [
" mergeNode.getColumnsInExpression( drivingColumnMap, rc.getExpression(), ColumnReference.MERGE_UNKNOWN );",
" mergeNode.addColumn( drivingColumnMap, leftCR, ColumnReference.MERGE_TARGET );"
],
"header": "@@ -315,10 +264,10 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" mergeNode.getColumnsInExpression( drivingColumnMap, rc.getExpression() );",
" mergeNode.addColumn( drivingColumnMap, leftCR );"
]
},
{
"added": [
" mergeNode.getColumnsInExpression( drivingColumnMap, rc.getExpression(), ColumnReference.MERGE_UNKNOWN );"
],
"header": "@@ -326,7 +275,7 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" mergeNode.getColumnsInExpression( drivingColumnMap, rc.getExpression() );"
]
},
{
"added": [
" FromBaseTable targetTable",
" ResultColumnList setClauses = realiasSetClauses( targetTable );",
" bindSetClauses( fullFromList, targetTable, setClauses );",
"",
" // _updateColumns,",
" setClauses,"
],
"header": "@@ -338,15 +287,17 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" FromTable targetTable",
" bindSetClauses( fullFromList, targetTable );",
" ",
" _updateColumns,"
]
},
{
"added": [
" _dml = new UpdateNode( targetTable.getTableNameField(), selectNode, this, getContextManager() );"
],
"header": "@@ -355,7 +306,7 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" _dml = new UpdateNode( targetTable.getTableName(), selectNode, this, getContextManager() );"
]
},
{
"added": [
" /**",
" * <p>",
" * Due to discrepancies on how names are resolved in SELECT and UPDATE,",
" * we have to force the left side of SET clauses to use the same table identifiers",
" * as the right sides of the SET clauses.",
" * </p>",
" */",
" private ResultColumnList realiasSetClauses",
" (",
" FromBaseTable targetTable",
" )",
" throws StandardException",
" {",
" ResultColumnList rcl = new ResultColumnList( getContextManager() );",
" for ( int i = 0; i < _updateColumns.size(); i++ )",
" {",
" ResultColumn setRC = _updateColumns.elementAt( i );",
" ColumnReference newTargetColumn = new ColumnReference",
" (",
" setRC.getReference().getColumnName(),",
" targetTable.getTableName(),",
" getContextManager()",
" );",
" newTargetColumn.setMergeTableID( ColumnReference.MERGE_TARGET );",
" ResultColumn newRC = new ResultColumn",
" (",
" newTargetColumn,",
" setRC.getExpression(),",
" getContextManager()",
" );",
" rcl.addResultColumn( newRC );",
" }",
"",
" return rcl;",
" }",
" "
],
"header": "@@ -383,6 +334,42 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": []
},
{
"added": [
" FromTable targetTable,",
" ResultColumnList setClauses",
" setClauses.replaceOrForbidDefaults( targetTable.getTableDescriptor(), _updateColumns, true );",
" bindExpressions( setClauses, fullFromList );"
],
"header": "@@ -417,14 +404,15 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" FromTable targetTable",
" _updateColumns.replaceOrForbidDefaults( targetTable.getTableDescriptor(), _updateColumns, true );",
" bindExpressions( _updateColumns, fullFromList );"
]
},
{
"added": [
" FromBaseTable deleteTarget = new FromBaseTable",
" ( targetTable.getTableNameField(), null, null, null, getContextManager() );",
" FromList dummyFromList = new FromList( getContextManager() );",
" dummyFromList.addFromTable( deleteTarget );",
" dummyFromList.bindTables( dd, new FromList( getOptimizerFactory().doJoinOrderOptimization(), getContextManager() ) );",
" ",
" ( CURRENT_OF_NODE_NAME, deleteTarget, getContextManager() );"
],
"header": "@@ -685,8 +673,14 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" ( CURRENT_OF_NODE_NAME, targetTable, getContextManager() );"
]
},
{
"added": [
" _dml = new DeleteNode( targetTable.getTableNameField(), selectNode, this, getContextManager() );"
],
"header": "@@ -700,7 +694,7 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" _dml = new DeleteNode( targetTable.getTableName(), selectNode, this, getContextManager() );"
]
},
{
"added": [
" FromBaseTable targetTable"
],
"header": "@@ -771,7 +765,7 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" FromTable targetTable"
]
},
{
"added": [
" targetTable.getTableNameField(),"
],
"header": "@@ -795,7 +789,7 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" targetTable.getTableName(),"
]
},
{
"added": [
" String bufferedCRName = bufferedCR.getColumnName();",
" int bufferedCRMergeTableID = getMergeTableID( bufferedCR );"
],
"header": "@@ -1089,8 +1083,8 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" String tableName = bufferedCR.getTableName();",
" String columnName = bufferedCR.getColumnName();"
]
},
{
"added": [
" ( getMergeTableID( selectCR ) == bufferedCRMergeTableID) &&",
" bufferedCRName.equals( selectCR.getColumnName() )",
" ",
" if (SanityManager.DEBUG)",
" {",
" SanityManager.THROWASSERT( \"Can't find select list column corresponding to \" + bufferedCR.debugName() );",
" }"
],
"header": "@@ -1103,14 +1097,19 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" tableName.equals( selectCR.getTableName() ) &&",
" columnName.equals( selectCR.getColumnName() )"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MergeNode.java",
"hunks": [
{
"added": [
"import org.apache.derby.shared.common.sanity.SanityManager;"
],
"header": "@@ -40,6 +40,7 @@ import org.apache.derby.iapi.sql.dictionary.DataDictionary;",
"removed": []
},
{
"added": [],
"header": "@@ -213,24 +214,10 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" //",
" // Replace all references to correlation names with the actual",
" // resolved table name.",
" //",
" ",
" replaceCorrelationName( _targetTable.correlationName, _targetTable.tableName );",
" _targetTable.correlationName = null;",
"",
" if ( _sourceTable instanceof FromBaseTable )",
" {",
" TableName sourceTableName = ((FromBaseTable) _sourceTable).tableName;",
" replaceCorrelationName( _sourceTable.correlationName, sourceTableName );",
" _sourceTable.correlationName = null;",
" }"
]
},
{
"added": [
" _targetTable.getTableNameField(),"
],
"header": "@@ -238,7 +225,7 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" _targetTable.tableName,"
]
},
{
"added": [
""
],
"header": "@@ -249,7 +236,7 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" "
]
},
{
"added": [],
"header": "@@ -268,104 +255,6 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" /**",
" * <p>",
" * Replace references to the correlation name with the underlying table name",
" * in all ColumnReferences under all expressions. If the correlation name is null,",
" * then replace all references to the unqualified table name with the fully",
" * qualified table name. This replacement is",
" * done before the ColumnReferences are bound.",
" * </p>",
" */",
" private void replaceCorrelationName",
" (",
" String correlationName,",
" TableName newTableName",
" )",
" throws StandardException",
" {",
" if ( correlationName == null ) { correlationName = newTableName.getTableName(); }",
"",
" replaceCorrelationName",
" (",
" correlationName,",
" newTableName,",
" _searchCondition",
" );",
" ",
" for ( MatchingClauseNode mcn : _matchingClauses )",
" {",
" mcn.replaceCorrelationName",
" (",
" this,",
" correlationName,",
" newTableName",
" );",
" }",
" }",
" ",
" /**",
" * <p>",
" * Replace references to the correlation name with the underlying table name",
" * in all ColumnReferences under the indicated list of ResultColumns. This replacement is",
" * done before the ColumnReferences are bound.",
" * </p>",
" */",
" public void replaceCorrelationName",
" (",
" String correlationName,",
" TableName newTableName,",
" ResultColumnList rcl",
" )",
" throws StandardException",
" {",
" if ( rcl == null ) { return; }",
" ",
" for ( int i = 0; i < rcl.size(); i++ )",
" {",
" replaceCorrelationName( correlationName, newTableName, rcl.elementAt( i ) );",
" }",
" }",
" ",
" /**",
" * <p>",
" * Replace references to the correlation name with the underlying table name",
" * in all ColumnReferences in the indicated expression. This replacement is",
" * done before the ColumnReferences are bound.",
" * </p>",
" */",
" public void replaceCorrelationName",
" (",
" String correlationName,",
" TableName newTableName,",
" ValueNode expression",
" )",
" throws StandardException",
" {",
" if ( expression == null ) { return; }",
" ",
" CollectNodesVisitor<ColumnReference> getCRs =",
" new CollectNodesVisitor<ColumnReference>(ColumnReference.class);",
"",
" expression.accept(getCRs);",
" List<ColumnReference> colRefs = getCRs.getList();",
"",
" for ( ColumnReference cr : colRefs )",
" {",
" TableName origTableName = cr.getQualifiedTableName();",
" if ( origTableName != null )",
" {",
" if (",
" (origTableName.getSchemaName() == null) &&",
" correlationName.equals( origTableName.getTableName() )",
" )",
" {",
" cr.setQualifiedTableName( newTableName );",
" }",
" }",
" }",
" }",
""
]
},
{
"added": [
" getColumnsInExpression( drivingColumnMap, _searchCondition, ColumnReference.MERGE_UNKNOWN );",
"",
" int mergeTableID = mcn.isDeleteClause() ? ColumnReference.MERGE_TARGET : ColumnReference.MERGE_UNKNOWN;",
" getColumnsFromList( drivingColumnMap, mcn.getBufferedColumns(), mergeTableID );",
" addColumns",
" (",
" (FromTable) _leftJoinFromList.elementAt( SOURCE_TABLE_INDEX ),",
" drivingColumnMap,",
" selectList,",
" ColumnReference.MERGE_SOURCE",
" );",
" addColumns",
" (",
" (FromTable) _leftJoinFromList.elementAt( TARGET_TABLE_INDEX ),",
" drivingColumnMap,",
" selectList,",
" ColumnReference.MERGE_TARGET",
" );"
],
"header": "@@ -477,20 +366,34 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" getColumnsInExpression( drivingColumnMap, _searchCondition );",
" getColumnsFromList( drivingColumnMap, mcn.getBufferedColumns() );",
" addColumns( (FromTable) _leftJoinFromList.elementAt( SOURCE_TABLE_INDEX ), drivingColumnMap, selectList );",
" addColumns( (FromTable) _leftJoinFromList.elementAt( TARGET_TABLE_INDEX ), drivingColumnMap, selectList );"
]
},
{
"added": [
" cr.setMergeTableID( ColumnReference.MERGE_TARGET );"
],
"header": "@@ -507,6 +410,7 @@ public final class MergeNode extends DMLModStatementNode",
"removed": []
},
{
"added": [
" ResultColumnList selectList,",
" int mergeTableID"
],
"header": "@@ -598,7 +502,8 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" ResultColumnList selectList"
]
},
{
"added": [
" cr.setMergeTableID( mergeTableID );"
],
"header": "@@ -608,6 +513,7 @@ public final class MergeNode extends DMLModStatementNode",
"removed": []
},
{
"added": [
" ( HashMap<String,ColumnReference> map, ValueNode expression, int mergeTableID )"
],
"header": "@@ -632,7 +538,7 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" ( HashMap<String,ColumnReference> map, ValueNode expression )"
]
},
{
"added": [
" getColumnsFromList( map, colRefs, mergeTableID );",
" ( HashMap<String,ColumnReference> map, ResultColumnList rcl, int mergeTableID )"
],
"header": "@@ -643,12 +549,12 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" getColumnsFromList( map, colRefs );",
" ( HashMap<String,ColumnReference> map, ResultColumnList rcl )"
]
},
{
"added": [
" getColumnsFromList( map, colRefs, mergeTableID );",
" ( HashMap<String,ColumnReference> map, List<ColumnReference> colRefs, int mergeTableID )",
" addColumn( map, cr, mergeTableID );"
],
"header": "@@ -657,17 +563,17 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" getColumnsFromList( map, colRefs );",
" ( HashMap<String,ColumnReference> map, List<ColumnReference> colRefs )",
" addColumn( map, cr );"
]
},
{
"added": [
" ColumnReference originalCR,",
" int mergeTableID",
" ColumnReference cr = originalCR;",
" "
],
"header": "@@ -675,10 +581,13 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" ColumnReference cr"
]
},
{
"added": [
" associateColumn( cr, mergeTableID );",
" originalCR.setMergeTableID( cr.getMergeTableID() );",
""
],
"header": "@@ -686,6 +595,9 @@ public final class MergeNode extends DMLModStatementNode",
"removed": []
}
]
}
] |
derby-DERBY-3155-30e18dba
|
DERBY-3155: Remove some obsolete code which was causing a cross-schema MERGE statement to raise a missing schema error: commit derby-3155-29-aa-missingSchema.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1570352 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3155-3511a12d
|
DERBY-3155: Improve handling of correlation names for the target tables in MERGE statements; commit derby-3155-09-aa-correlationNames.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1547585 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MergeNode.java",
"hunks": [
{
"added": [
" // source table must be a vti or base table",
" if (",
" !(_sourceTable instanceof FromVTI) &&",
" !(_sourceTable instanceof FromBaseTable)",
" )",
" {",
" throw StandardException.newException( SQLState.LANG_SOURCE_NOT_BASE_VIEW_OR_VTI );",
" }",
"",
" // source and target may not have the same correlation names",
" if ( getExposedName( _targetTable ).equals( getExposedName( _sourceTable ) ) )",
" {",
" throw StandardException.newException( SQLState.LANG_SAME_EXPOSED_NAME );",
" }",
"",
" //",
" // Replace all references to a target correlation name with the actual",
" // resolved table name.",
" //",
" FromList dfl = new FromList( getContextManager() );",
" dfl.addFromTable( _sourceTable );",
" dfl.addFromTable( _targetTable );",
" dfl.bindTables( dd, new FromList( getOptimizerFactory().doJoinOrderOptimization(), getContextManager() ) );",
" if ( _targetTable.correlationName != null )",
" {",
" TableName targetTableName = _targetTable.tableName;",
" String correlationName = _targetTable.correlationName;",
" ",
" replaceCorrelationName",
" (",
" correlationName,",
" targetTableName,",
" _searchCondition",
" );",
" ",
" for ( MatchingClauseNode mcn : _matchingClauses )",
" {",
" mcn.replaceCorrelationName",
" (",
" this,",
" correlationName,",
" targetTableName",
" );",
" }",
"",
" _targetTable.correlationName = null;",
" }",
""
],
"header": "@@ -198,6 +198,54 @@ public final class MergeNode extends DMLModStatementNode",
"removed": []
},
{
"added": [
" // target table must be a base table",
" if ( !targetIsBaseTable( _targetTable ) ) { notBaseTable(); }"
],
"header": "@@ -209,17 +257,12 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" // source and target may not have the same correlation names",
" if ( getExposedName( dummyTargetTable ).equals( getExposedName( dummySourceTable ) ) )",
" {",
" throw StandardException.newException( SQLState.LANG_SAME_EXPOSED_NAME );",
" }",
"",
" if ( !targetIsBaseTable( dummyTargetTable ) ) { notBaseTable(); }"
]
}
]
}
] |
derby-DERBY-3155-3cc631bb
|
DERBY-3155: Forbid derived column lists in MERGE statements; commit derby-3155-23-aa-forbidDerivedColumnLists.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1566649 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MergeNode.java",
"hunks": [
{
"added": [
" // don't allow derived column lists right now",
" forbidDerivedColumnLists();",
" "
],
"header": "@@ -214,6 +214,9 @@ public final class MergeNode extends DMLModStatementNode",
"removed": []
}
]
}
] |
derby-DERBY-3155-3e77b5ad
|
DERBY-3155: More improvements to column resolution in MERGE statements; tests passed cleanly for me on derby-3155-30-ab-moreCorrelationNames.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1571808 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/ColumnReference.java",
"hunks": [
{
"added": [
" /**"
],
"header": "@@ -113,7 +113,7 @@ public class ColumnReference extends ValueNode",
"removed": [
"\t/**"
]
},
{
"added": [
"\t\tsetQualifiedTableName( oldCR.getQualifiedTableName() );"
],
"header": "@@ -352,7 +352,7 @@ public class ColumnReference extends ValueNode",
"removed": [
"\t\t_qualifiedTableName = oldCR.getQualifiedTableName();"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/FromBaseTable.java",
"hunks": [
{
"added": [],
"header": "@@ -3954,7 +3954,6 @@ class FromBaseTable extends FromTable",
"removed": [
"\t\tValueNode\t\t \t\t\tvalueNode;"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/FromList.java",
"hunks": [
{
"added": [
"\t\t\tFromTable newNode = (FromTable) fromTable.bindNonVTITables(dataDictionary, fromListParam);",
" {",
" }",
" newNode.setMergeTableID( fromTable.getMergeTableID() );",
"\t\t\tFromTable newNode = (FromTable) fromTable.bindVTITables(fromListParam);",
" {",
" }",
" newNode.setMergeTableID( fromTable.getMergeTableID() );"
],
"header": "@@ -340,21 +340,27 @@ class FromList extends QueryTreeNodeVector<ResultSetNode>",
"removed": [
"\t\t\tResultSetNode newNode = fromTable.bindNonVTITables(dataDictionary, fromListParam);",
"\t\t\tResultSetNode newNode = fromTable.bindVTITables(fromListParam);"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/FromTable.java",
"hunks": [
{
"added": [
"",
" /** for resolving column references in MERGE statements in tough cases*/",
" private int _mergeTableID = ColumnReference.MERGE_UNKNOWN;"
],
"header": "@@ -108,6 +108,9 @@ abstract class FromTable extends ResultSetNode implements Optimizable",
"removed": []
},
{
"added": [
"",
" ColumnReference newCR = new ColumnReference(rc.getName(), exposedName, cm);",
" if ( (oldCR != null ) && (oldCR.getMergeTableID() != ColumnReference.MERGE_UNKNOWN ) )",
" {",
" newCR.setMergeTableID( oldCR.getMergeTableID() );",
" }",
" newCR,"
],
"header": "@@ -1168,10 +1171,16 @@ abstract class FromTable extends ResultSetNode implements Optimizable",
"removed": [
" new ColumnReference(rc.getName(), exposedName, cm),"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MatchingClauseNode.java",
"hunks": [
{
"added": [
" if ( isUpdateClause() ) { bindUpdate( dd, mergeNode, fullFromList, targetTable ); }"
],
"header": "@@ -209,7 +209,7 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" if ( isUpdateClause() ) { bindUpdate( dd, fullFromList, targetTable ); }"
]
},
{
"added": [
" else if ( isDeleteClause() )",
" {",
" // add all of the THEN columns",
" mergeNode.getColumnsFromList( drivingColumnMap, _thenColumns, ColumnReference.MERGE_TARGET );",
" }"
],
"header": "@@ -272,6 +272,11 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": []
},
{
"added": [
" MergeNode mergeNode,",
" bindSetClauses( mergeNode, fullFromList, targetTable, setClauses );",
""
],
"header": "@@ -280,17 +285,18 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" bindSetClauses( fullFromList, targetTable, setClauses );",
" "
]
},
{
"added": [
" MergeNode mergeNode,"
],
"header": "@@ -401,6 +407,7 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": []
},
{
"added": [
"",
" //",
" // For column resolution later on, columns on the left side",
" // of SET operators are associated with the TARGET table.",
" //",
" for ( int i = 0; i < _updateColumns.size(); i++ )",
" {",
" ResultColumn rc = _updateColumns.elementAt( i );",
" ColumnReference cr = rc.getReference();",
" cr.setMergeTableID( ColumnReference.MERGE_TARGET );",
" }",
"",
" // Now associate the columns on the right side of SET operators.",
" CollectNodesVisitor<ColumnReference> getCRs =",
" new CollectNodesVisitor<ColumnReference>(ColumnReference.class);",
" _updateColumns.accept(getCRs);",
" List<ColumnReference> colRefs = getCRs.getList();",
" for ( ColumnReference cr : colRefs )",
" {",
" mergeNode.associateColumn( fullFromList, cr, ColumnReference.MERGE_UNKNOWN );",
" }"
],
"header": "@@ -411,6 +418,27 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": []
},
{
"added": [
" private void bindDeleteThenColumns( ResultColumnList selectList )"
],
"header": "@@ -738,7 +766,7 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" void bindDeleteThenColumns( ResultColumnList selectList )"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MergeNode.java",
"hunks": [
{
"added": [
"",
" dummyTargetTable.setMergeTableID( ColumnReference.MERGE_TARGET );",
" dummySourceTable.setMergeTableID ( ColumnReference.MERGE_SOURCE );",
" "
],
"header": "@@ -254,9 +254,13 @@ public final class MergeNode extends DMLModStatementNode",
"removed": []
},
{
"added": [
" }",
""
],
"header": "@@ -367,8 +371,8 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" } ",
" "
]
},
{
"added": [
" String[] columnNames = getColumns( mergeTableID, drivingColumnMap );"
],
"header": "@@ -558,7 +562,7 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" String[] columnNames = getColumns( getExposedName( fromTable ), drivingColumnMap );"
]
},
{
"added": [
" private String[] getColumns( int mergeTableID, HashMap<String,ColumnReference> map )",
" if ( cr.getMergeTableID() == mergeTableID ) { list.add( cr.getColumnName() ); }"
],
"header": "@@ -572,13 +576,13 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" private String[] getColumns( String exposedName, HashMap<String,ColumnReference> map )",
" if ( exposedName.equals( cr.getTableName() ) ) { list.add( cr.getColumnName() ); }"
]
},
{
"added": [
" void getColumnsFromList"
],
"header": "@@ -605,7 +609,7 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" private void getColumnsFromList"
]
},
{
"added": [
" if ( cr.getTableName() == null )",
" {",
" ResultColumn rc = _leftJoinFromList.bindColumnReference( cr );",
" TableName tableName = new TableName( null, rc.getTableName(), getContextManager() );",
" cr = new ColumnReference( cr.getColumnName(), tableName, getContextManager() );",
" }",
"",
" associateColumn( _leftJoinFromList, cr, mergeTableID );",
"",
" ColumnReference mapCR = map.get( key );",
" if ( mapCR != null )",
" {",
" mapCR.setMergeTableID( cr.getMergeTableID() );",
" }",
" else",
" void associateColumn( FromList fromList, ColumnReference cr, int mergeTableID )"
],
"header": "@@ -638,17 +642,30 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" associateColumn( cr, mergeTableID );",
" if ( map.get( key ) == null )",
" private void associateColumn( ColumnReference cr, int mergeTableID )"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/UpdateNode.java",
"hunks": [
{
"added": [
" //",
" // First step in associating added columns with the TARGET table of",
" // a MERGE statement. Here we identify the columns which were NOT ADDED.",
" //",
" if ( inMatchingClause() ) { tagOriginalResultSetColumns(); }",
""
],
"header": "@@ -195,6 +195,12 @@ public final class UpdateNode extends DMLModStatementNode",
"removed": []
},
{
"added": [
""
],
"header": "@@ -463,6 +469,7 @@ public final class UpdateNode extends DMLModStatementNode",
"removed": []
},
{
"added": [
" /* Append to the ResultColumnList */"
],
"header": "@@ -523,7 +530,7 @@ public final class UpdateNode extends DMLModStatementNode",
"removed": [
"\t\t\t/* Append to the ResultColumnList */"
]
},
{
"added": [
" //",
" // Second step in associating added columns with the TARGET table of",
" // a MERGE statement. Here we associate the columns which were not originally tagged.",
" //",
" if ( inMatchingClause() ) { associateAddedColumns(); }",
""
],
"header": "@@ -536,6 +543,12 @@ public final class UpdateNode extends DMLModStatementNode",
"removed": []
},
{
"added": [
" /**",
" * Associate all added columns with the TARGET table of the enclosing",
" * MERGE statement.",
" */",
" private void associateAddedColumns()",
" throws StandardException",
" {",
" for ( ColumnReference cr : collectAllResultSetColumns() )",
" {",
" if ( !cr.taggedWith( TagFilter.ORIG_UPDATE_COL ) )",
" {",
" cr.setMergeTableID( ColumnReference.MERGE_TARGET );",
" }",
" }",
" }",
"",
" /**",
" * Tag the original columns mentioned in the result list.",
" */",
" private void tagOriginalResultSetColumns()",
" throws StandardException",
" {",
" for ( ColumnReference cr : collectAllResultSetColumns() )",
" {",
" cr.addTag( TagFilter.ORIG_UPDATE_COL );",
" }",
" }",
"",
" /**",
" * Collect all of the result set columns.",
" */",
" private List<ColumnReference> collectAllResultSetColumns()",
" throws StandardException",
" {",
" CollectNodesVisitor<ColumnReference> crVisitor =",
" new CollectNodesVisitor<ColumnReference>(ColumnReference.class);",
" resultSet.getResultColumns().accept( crVisitor );",
"",
" return crVisitor.getList();",
" }",
""
],
"header": "@@ -654,6 +667,47 @@ public final class UpdateNode extends DMLModStatementNode",
"removed": []
}
]
}
] |
derby-DERBY-3155-480456f6
|
DERBY-3155: Allow for row location columns in the result column list of base tables; tests passed cleanly for me on derby-3155-16-aa-treatCurrentRowLocationNodeLikeBaseColumnNode.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1560134 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3155-4cf6633b
|
DERBY-3155: Improve comments for compilation of MERGE statements; commit derby-3155-51-aa-cleanup2.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1579950 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MatchingClauseNode.java",
"hunks": [
{
"added": [
" //",
" // Filled in by the constructor.",
" //",
" // Filled in at bind() time.",
" // the INSERT/UPDATE/DELETE statement of this WHEN [ NOT ] MATCHED clause",
" // the columns in the temporary conglomerate which drives the INSERT/UPDATE/DELETE",
" //",
" // Filled in at generate() time.",
" //"
],
"header": "@@ -72,23 +72,27 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" // filled in by the constructor",
" // filled in at bind() time",
" /** the INSERT/UPDATE/DELETE statement of this WHEN [ NOT ] MATCHED clause */",
" /** the columns in the temporary conglomerate which drives the INSERT/UPDATE/DELETE */",
" // Filled in at generate() time"
]
},
{
"added": [
" * Due to discrepancies in how names are resolved by SELECT and UPDATE,"
],
"header": "@@ -347,7 +351,7 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" * Due to discrepancies on how names are resolved in SELECT and UPDATE,"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MergeNode.java",
"hunks": [
{
"added": [
" * The driving left join's selectList then looks like this...",
" * </p>",
" *",
" * <pre>",
" * sc1, ..., scN, tc1, ..., tcM, targetTable.RowLocation",
" * </pre>",
" *",
" * <p>",
" * Where sc1...scN are the columns we need from the source table (in alphabetical",
" * order) and tc1...tcM are the columns we need from the target table (in alphabetical",
" * order).",
" * </p>",
" *",
" * <p>"
],
"header": "@@ -108,6 +108,20 @@ import org.apache.derby.shared.common.sanity.SanityManager;",
"removed": []
},
{
"added": [
" *",
" * <p>",
" * Name resolution was a particularly thorny problem. This is because name resolution",
" * behaves differently for SELECTs and UPDATEs. In particular, while processing UPDATEs,",
" * the compiler throws away name resolution information; this happens as a consequence",
" * of work done on DERBY-1043. In the end, I had to invent more name resolution machinery",
" * in order to compensate for the differences in the handling of SELECTs and UPDATEs.",
" * If we are to allow subqueries in matching refinement clauses and in the values expressions",
" * of INSERT and UPDATE actions, then we probably need to remove this special name",
" * resolution machinery. And that, in turn, probably means revisiting DERBY-1043.",
" * </p>",
" *",
" * <p>",
" * The special name resolution machinery involves marking source and target column references",
" * in order to make it clear which table they belong to. This is done in associateColumn(). The markers",
" * are consulted at code-generation time in order to resolve column references when we",
" * generate the expressions needed to populate the rows which go into the temporary tables.",
" * That resolution happens in MatchingClauseNode.getSelectListOffset().",
" * </p>"
],
"header": "@@ -124,6 +138,25 @@ import org.apache.derby.shared.common.sanity.SanityManager;",
"removed": []
},
{
"added": [
" //",
" // Filled in by the constructor.",
" //",
" //",
" // Filled in at bind() time.",
" //",
" //",
" // Filled in at generate() time.",
" //"
],
"header": "@@ -145,18 +178,24 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" // constructor args",
" // filled in at bind() time",
" // filled in at generate() time"
]
},
{
"added": [
" /**",
" * <p>",
" * Associate a column with the SOURCE or TARGET table. This is",
" * part of the special name resolution machinery which smooths over",
" * the differences between name resolution for SELECTs and UPDATEs.",
" * </p>",
" */"
],
"header": "@@ -200,7 +239,13 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" /** Associate a column with the SOURCE or TARGET table */"
]
},
{
"added": [
" /**",
" * <p>",
" * Add the columns in the matchingRefinement clause to the evolving map.",
" * This is called when we're building the SELECT list for the driving left join.",
" * </p>",
" */"
],
"header": "@@ -253,7 +298,12 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" /** Add the columns in the matchingRefinement clause to the evolving map */"
]
},
{
"added": [
" /**",
" * <p>",
" * Add a list of columns to the the evolving map.",
" * This is called when we're building the SELECT list for the driving left join.",
" * </p>",
" */"
],
"header": "@@ -265,7 +315,12 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" /** Add a list of columns to the the evolving map */"
]
},
{
"added": [
" // FOR THE DRIVING LEFT JOIN."
],
"header": "@@ -728,7 +783,7 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" // FOR THE DRIVING LEFT JOIN"
]
}
]
}
] |
derby-DERBY-3155-5d1ab631
|
DERBY-3155: Add support for ? parameters as INSERT values in MERGE statements; commit derby-3155-25-aa-parametersAsInsertValues.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1567368 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MatchingClauseNode.java",
"hunks": [
{
"added": [
" ResultColumnList selectList = new ResultColumnList( getContextManager() );",
" for ( int i = 0; i < _insertValues.size(); i++ )",
" {",
" ResultColumn rc = _insertValues.elementAt( i );",
" selectList.addResultColumn( rc.cloneMe() );",
" }",
" selectList.replaceOrForbidDefaults( targetTable.getTableDescriptor(), _insertColumns, true );",
" bindExpressions( selectList, fullFromList );",
" bindInsertValues( fullFromList, targetTable );",
""
],
"header": "@@ -773,8 +773,17 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" bindInsertValues( fullFromList, targetTable );"
]
}
]
}
] |
derby-DERBY-3155-661c2e6c
|
DERBY-3155: Add grammar and bind logic for MERGE statement; commit derby-3155-01-ac-grammar.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1516157 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/InsertNode.java",
"hunks": [
{
"added": [
" //",
" // For the MERGE statement, DEFAULT expressions in the SELECT node",
" // may have been replaced with generated expressions already.",
" //",
" ResultColumnList tempRCL = resultSet.getResultColumns();",
" boolean defaultsWereReplaced = false;",
" for ( int i = 0; i < tempRCL.size(); i++ )",
" {",
" ResultColumn rc = tempRCL.getResultColumn( i+1 );",
" if ( rc.wasDefaultColumn() ) { defaultsWereReplaced = true; }",
" }",
""
],
"header": "@@ -306,6 +306,18 @@ public final class InsertNode extends DMLModStatementNode",
"removed": []
},
{
"added": [
"\t\tresultSet = enhanceAndCheckForAutoincrement( resultSet, inOrder, colMap, defaultsWereReplaced );"
],
"header": "@@ -436,7 +448,7 @@ public final class InsertNode extends DMLModStatementNode",
"removed": [
"\t\tresultSet = enhanceAndCheckForAutoincrement(resultSet, inOrder, colMap);"
]
},
{
"added": [
"\t * @param defaultsWereReplaced true if DEFAULT clauses were replaced with generated expressions",
"\tResultSetNode enhanceAndCheckForAutoincrement",
" (",
" ResultSetNode resultSet,",
" boolean inOrder,",
" int[] colMap,",
" boolean defaultsWereReplaced",
" )"
],
"header": "@@ -550,12 +562,18 @@ public final class InsertNode extends DMLModStatementNode",
"removed": [
"\tResultSetNode enhanceAndCheckForAutoincrement(",
"\t\t\tResultSetNode resultSet, boolean inOrder, int[] colMap)"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/JoinNode.java",
"hunks": [
{
"added": [
" joinClause = bindExpression( joinClause, true, true, \"ON\" );"
],
"header": "@@ -801,36 +801,7 @@ class JoinNode extends TableOperatorNode",
"removed": [
"\t\t\t/* Create a new fromList with only left and right children before",
"\t\t\t * binding the join clause. Valid column references in the join clause",
"\t\t\t * are limited to columns from the 2 tables being joined. This",
"\t\t\t * algorithm enforces that.",
"\t\t\t */",
" FromList fromList = new FromList(",
" getOptimizerFactory().doJoinOrderOptimization(), cm);",
"",
"\t\t\tfromList.addElement((FromTable) leftResultSet);",
"\t\t\tfromList.addElement((FromTable) rightResultSet);",
"",
" int previousReliability = orReliability( CompilerContext.ON_CLAUSE_RESTRICTION );",
"\t\t\tjoinClause = joinClause.bindExpression(",
"\t\t\t\t\t\t\t\t\t fromList, subqueryList,",
" aggregates);",
" cc.setReliability( previousReliability );",
"",
"\t\t\t// SQL 2003, section 7.7 SR 5",
"\t\t\tSelectNode.checkNoWindowFunctions(joinClause, \"ON\");",
"",
"\t\t\t/*",
"\t\t\t** We cannot have aggregates in the ON clause.",
"\t\t\t** In the future, if we relax this, we'll need",
" ** to be able to pass the list of aggregates up",
"\t\t\t** the tree.",
"\t\t\t*/",
" if (!aggregates.isEmpty())",
"\t\t\t{",
"\t\t\t\tthrow StandardException.newException(SQLState.LANG_NO_AGGREGATES_IN_ON_CLAUSE);",
"\t\t\t}"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/ResultColumnList.java",
"hunks": [
{
"added": [
" "
],
"header": "@@ -891,6 +891,7 @@ class ResultColumnList extends QueryTreeNodeVector<ResultColumn>",
"removed": []
},
{
"added": [
"\t{",
" forbidOverrides( sourceRSRCL, false );",
" }",
" ",
"\t/**",
"\t * check if any autoincrement or generated columns exist in the result column list.",
"\t * called from insert or update where you cannot insert/update the value",
"\t * of a generated or autoincrement column.",
"\t *",
"\t * @exception StandardException\t\tIf the column is an ai column",
"\t */",
" void forbidOverrides(ResultColumnList sourceRSRCL, boolean defaultsWereReplaced )",
"\t\tthrows StandardException"
],
"header": "@@ -4034,6 +4035,19 @@ class ResultColumnList extends QueryTreeNodeVector<ResultColumn>",
"removed": []
},
{
"added": [
" if ( !defaultsWereReplaced && (sourceRC != null) && !sourceRC.hasGenerationClause() && !sourceRC.wasDefaultColumn() )"
],
"header": "@@ -4046,7 +4060,7 @@ class ResultColumnList extends QueryTreeNodeVector<ResultColumn>",
"removed": [
" if ( (sourceRC != null) && !sourceRC.hasGenerationClause() && !sourceRC.wasDefaultColumn() )"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/UpdateNode.java",
"hunks": [
{
"added": [
" private boolean inMatchedClause;",
""
],
"header": "@@ -78,6 +78,8 @@ public final class UpdateNode extends DMLModStatementNode",
"removed": []
},
{
"added": [
" * @param inMatchedClause True if this UPDATE is part of a MATCHED ... THEN UPDATE clause of a MERGE statement.",
" boolean inMatchedClause,",
" this.inMatchedClause = inMatchedClause;"
],
"header": "@@ -86,14 +88,17 @@ public final class UpdateNode extends DMLModStatementNode",
"removed": []
},
{
"added": [
" /*",
" */"
],
"header": "@@ -321,10 +326,12 @@ public final class UpdateNode extends DMLModStatementNode",
"removed": []
},
{
"added": [
" "
],
"header": "@@ -435,6 +442,7 @@ public final class UpdateNode extends DMLModStatementNode",
"removed": []
}
]
}
] |
derby-DERBY-3155-69ab4bd8
|
DERBY-3155: Make the INSERT column list optional in the WHEN NOT MATCHED clause of a MERGE statement provided that the number of values in the VALUES subclause equals the number of columns in the target table; commit derby-3155-12-aa-canOmitInsertColumnList.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1558871 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MatchingClauseNode.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.sql.dictionary.ColumnDescriptorList;"
],
"header": "@@ -41,6 +41,7 @@ import org.apache.derby.iapi.sql.ResultDescription;",
"removed": []
},
{
"added": [
" TableDescriptor td = targetTable.getTableDescriptor();",
"",
" // construct a full insert column list if insert columns weren't specified",
" if ( _insertColumns == null ) { _insertColumns = buildFullColumnList( td ); }",
""
],
"header": "@@ -780,13 +781,16 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" TableDescriptor td = targetTable.getTableDescriptor();",
""
]
}
]
}
] |
derby-DERBY-3155-6b8d71bb
|
DERBY-3155: Fix bug in MERGE statement fired by a trigger; commit derby-3155-53-aa-transitionSimpleColumn.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1587317 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3155-6ffb1125
|
DERBY-3155: Add a test case for privilege checking on a MERGE statement with all 3 kinds of actions; commit derby-3155-35-aa-allPrivsTest.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1574956 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3155-77b6e859
|
DERBY-3155: Eliminate redundant copies of columns in the select list of the driving left join for MERGE statements; commit derby-3155-43-aa-eliminateDuplicateColumnRefs.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1577566 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MergeNode.java",
"hunks": [
{
"added": [
"import java.util.HashSet;"
],
"header": "@@ -24,6 +24,7 @@ package\torg.apache.derby.impl.sql.compile;",
"removed": []
},
{
"added": [
""
],
"header": "@@ -391,7 +392,7 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" "
]
},
{
"added": [
" HashSet<String> set = new HashSet<String>();",
" if ( cr.getMergeTableID() == mergeTableID ) { set.add( cr.getColumnName() ); }",
" String[] retval = new String[ set.size() ];",
" set.toArray( retval );"
],
"header": "@@ -687,15 +688,15 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" ArrayList<String> list = new ArrayList<String>();",
" if ( cr.getMergeTableID() == mergeTableID ) { list.add( cr.getColumnName() ); }",
" String[] retval = new String[ list.size() ];",
" list.toArray( retval );"
]
}
]
}
] |
derby-DERBY-3155-7f8154de
|
DERBY-3155: Make MERGE statement serialize SQLRefs rather than HeapRowLocations; commit derby-3155-17-aa-serializingRowLocations.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1560452 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/execute/MergeResultSet.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.types.SQLRef;"
],
"header": "@@ -33,6 +33,7 @@ import org.apache.derby.iapi.sql.execute.ExecRow;",
"removed": []
}
]
}
] |
derby-DERBY-3155-856f8920
|
DERBY-3155: Replace correlation names on the left side of the SET clauses in MERGE statements: commit derby-3155-14-aa-replaceCorrelationNamesOnLeftSideOfSETclauses.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1559183 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3155-898f3f5a
|
DERBY-3155: Make cleanup logic in MergeResultSet more closely resemble the cleanup logic in InsertResultSet; commit derby-3155-15-aa-replumbMergeResultSetCleanup.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1559218 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/execute/MergeResultSet.java",
"hunks": [
{
"added": [
"\tprivate int\t\t\t\t\t\tnumOpens;",
" "
],
"header": "@@ -60,6 +60,8 @@ class MergeResultSet extends NoRowsResultSetImpl",
"removed": []
},
{
"added": [
"\t\tif (numOpens++ == 0)",
"\t\t{",
"\t\t\t_drivingLeftJoin.openCore();",
"\t\t}",
"\t\telse",
"\t\t{",
"\t\t\t_drivingLeftJoin.reopenCore();",
"\t\t}",
""
],
"header": "@@ -95,6 +97,15 @@ class MergeResultSet extends NoRowsResultSetImpl",
"removed": []
},
{
"added": [],
"header": "@@ -124,7 +135,6 @@ class MergeResultSet extends NoRowsResultSetImpl",
"removed": [
" _drivingLeftJoin.openCore();"
]
}
]
}
] |
derby-DERBY-3155-89f969c4
|
DERBY-3155: Verify that columns needed for triggers are buffered up by MERGE statements, even when the MERGE statements do not mention those columns; commit derby-3155-46-aa-deferredDeletes.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1578945 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3155-8a90f7cd
|
DERBY-3155: Add tests for the (de)serialization of MERGE statements; commit derby-3155-45-aa-serialization.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1578920 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3155-9cfeeb1e
|
DERBY-3155: Forbid MERGE statements in soft-upgraded databases; commit derby-3155-52-aa-upgrade.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1580889 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3155-9d3ad325
|
DERBY-3155: Add test for MERGE statement which reads the target table via an index; commit derby-3155-48-aa-indexScan.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1579685 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3155-a19ac61c
|
DERBY-3155: Add lock mode comment to master MERGE node; commit derby-3155-36-aa-lockModeComment.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1575026 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MergeNode.java",
"hunks": [
{
"added": [
" //",
" // No need to set lockMode in the master MergeNode. The individual",
" // actions and the driving left-join will set their own lock modes.",
" //"
],
"header": "@@ -878,12 +878,10 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
"\t\t/* In language we always set it to row lock, it's up to store to",
"\t\t * upgrade it to table lock. This makes sense for the default read",
"\t\t * committed isolation level and update lock. For more detail, see",
"\t\t * Beetle 4133.",
"\t\t */",
"\t\t//lockMode = TransactionController.MODE_RECORD;"
]
}
]
}
] |
derby-DERBY-3155-a2b37c2a
|
DERBY-3155: Materialize LOBs before buffering them up for processing by WHEN [ NOT ] MATCHED clauses; commit derby-3155-44-aa-lobsInTriggers.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1578535 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/execute/MergeResultSet.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.services.io.StreamStorable;"
],
"header": "@@ -24,6 +24,7 @@ package org.apache.derby.impl.sql.execute;",
"removed": []
}
]
}
] |
derby-DERBY-3155-a4a5f1ac
|
DERBY-3155: Fix build problem introduced by derby-3155-38-aa-datatypes.diff; commit derby-3155-39-aa-fixBuild.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1576062 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3155-a7d99c10
|
DERBY-3155: Add tests to verify that BEFORE triggers can't fire MERGE statements, just as they can't fire INSERT/UPDATE/DELETE statements; commit derby-3155-11-ab-beforeTriggersCantFireMerge.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1549948 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3155-b3f38f87
|
DERBY-3155: Forbid synonyms in MERGE statement; tests passed cleanly for me on derby-3155-21-ac-cleanupAndForbidSynonyms.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1565830 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/ColumnReference.java",
"hunks": [
{
"added": [
" if ( (_mergeTableID != MERGE_UNKNOWN) && (_mergeTableID != mergeTableID) )",
" \"MERGE statement can't re-associate column \" + getSQLColumnName() +",
" \" from \" + prettyPrintMergeTableID( _mergeTableID ) +",
" \" to \" + prettyPrintMergeTableID( mergeTableID )",
" private String prettyPrintMergeTableID( int mergeTableID )",
" {",
" switch ( mergeTableID )",
" {",
" case MERGE_SOURCE: return \"SOURCE\";",
" case MERGE_TARGET: return \"TARGET\";",
" default: return \"UNKNOWN\";",
" }",
" }"
],
"header": "@@ -1241,20 +1241,31 @@ public class ColumnReference extends ValueNode",
"removed": [
" if ( _mergeTableID != MERGE_UNKNOWN )",
" \"MERGE statement can't re-associate column \" + debugName()"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MatchingClauseNode.java",
"hunks": [
{
"added": [
" TableName tableName = targetTable.getTableNameField();",
" FromList selectFromList = fullFromList;",
" ",
" selectFromList,"
],
"header": "@@ -294,11 +294,13 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" // _updateColumns,",
" fullFromList,"
]
},
{
"added": [
" _dml = new UpdateNode( tableName, selectNode, this, getContextManager() );"
],
"header": "@@ -306,7 +308,7 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" _dml = new UpdateNode( targetTable.getTableNameField(), selectNode, this, getContextManager() );"
]
},
{
"added": [
" // split the row into before and after images"
],
"header": "@@ -320,6 +322,7 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": []
},
{
"added": [
" TableName tableName = targetTable.getTableName();",
" tableName,"
],
"header": "@@ -351,10 +354,11 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" targetTable.getTableName(),"
]
},
{
"added": [
" "
],
"header": "@@ -436,7 +440,7 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
""
]
},
{
"added": [
" SanityManager.THROWASSERT( \"Can't find select list column corresponding to \" + bufferedCR.getSQLColumnName() );"
],
"header": "@@ -1108,7 +1112,7 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" SanityManager.THROWASSERT( \"Can't find select list column corresponding to \" + bufferedCR.debugName() );"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MergeNode.java",
"hunks": [
{
"added": [
" // synonyms not allowed",
" forbidSynonyms( dd );",
"",
" FromTable dflSource = cloneFromTable( _sourceTable );",
" FromBaseTable dflTarget = (FromBaseTable) cloneFromTable( _targetTable );",
" dfl.addFromTable( dflSource );",
" dfl.addFromTable( dflTarget );",
" if ( !targetIsBaseTable( dflTarget ) ) { notBaseTable(); }",
" FromList dummyFromList = cloneFromList( dd, dflTarget );",
" FromBaseTable dummyTargetTable = (FromBaseTable) dummyFromList.elementAt( TARGET_TABLE_INDEX );"
],
"header": "@@ -214,34 +214,23 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" dfl.addFromTable( _sourceTable );",
" dfl.addFromTable( _targetTable );",
" //",
" // Bind the WHEN [ NOT ] MATCHED clauses.",
" //",
" FromList dummyFromList = new FromList( getContextManager() );",
" FromBaseTable dummyTargetTable = new FromBaseTable",
" (",
" _targetTable.getTableNameField(),",
" _targetTable.correlationName,",
" null,",
" null,",
" getContextManager()",
" );",
" FromTable dummySourceTable = cloneSourceTable();",
" ",
" dummyFromList.addFromTable( dummySourceTable );",
" dummyFromList.addFromTable( dummyTargetTable );",
" dummyFromList.bindTables( dd, new FromList( getOptimizerFactory().doJoinOrderOptimization(), getContextManager() ) );",
"",
" if ( !targetIsBaseTable( _targetTable ) ) { notBaseTable(); }"
]
},
{
"added": [
" /** Create a FromList for binding a WHEN [ NOT ] MATCHED clause */",
" private FromList cloneFromList( DataDictionary dd, FromBaseTable targetTable )",
" throws StandardException",
" {",
" FromList dummyFromList = new FromList( getContextManager() );",
" FromBaseTable dummyTargetTable = new FromBaseTable",
" (",
" targetTable.getTableNameField(),",
" targetTable.correlationName,",
" null,",
" null,",
" getContextManager()",
" );",
" FromTable dummySourceTable = cloneFromTable( _sourceTable );",
" ",
" dummyFromList.addFromTable( dummySourceTable );",
" dummyFromList.addFromTable( dummyTargetTable );",
" dummyFromList.bindTables( dd, new FromList( getOptimizerFactory().doJoinOrderOptimization(), getContextManager() ) );",
"",
" return dummyFromList;",
" }",
"",
" /** Neither the source nor the target table may be a synonym */",
" private void forbidSynonyms( DataDictionary dd ) throws StandardException",
" {",
" forbidSynonyms( dd, _targetTable.getTableNameField().cloneMe() );",
" if ( _sourceTable instanceof FromBaseTable )",
" {",
" forbidSynonyms( dd, ((FromBaseTable)_sourceTable).getTableNameField().cloneMe() );",
" }",
" }",
" private void forbidSynonyms( DataDictionary dd, TableName tableName ) throws StandardException",
" {",
" tableName.bind( dd );",
"",
" TableName synonym = resolveTableToSynonym( tableName );",
" if ( synonym != null )",
" {",
" throw StandardException.newException( SQLState.LANG_NO_SYNONYMS_IN_MERGE );",
" }",
" }",
""
],
"header": "@@ -255,12 +244,54 @@ public final class MergeNode extends DMLModStatementNode",
"removed": []
},
{
"added": [
""
],
"header": "@@ -406,7 +437,7 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" "
]
},
{
"added": [
" /** Clone a FromTable to avoid binding the original */",
" private FromTable cloneFromTable( FromTable fromTable ) throws StandardException",
" if ( fromTable instanceof FromVTI )",
" FromVTI source = (FromVTI) fromTable;"
],
"header": "@@ -458,12 +489,12 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" /** Clone the source table for binding the MATCHED clauses */",
" private FromTable cloneSourceTable() throws StandardException",
" if ( _sourceTable instanceof FromVTI )",
" FromVTI source = (FromVTI) _sourceTable;"
]
},
{
"added": [
" else if ( fromTable instanceof FromBaseTable )",
" FromBaseTable source = (FromBaseTable) fromTable;"
],
"header": "@@ -475,9 +506,9 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" else if ( _sourceTable instanceof FromBaseTable )",
" FromBaseTable source = (FromBaseTable) _sourceTable;"
]
}
]
}
] |
derby-DERBY-3155-bba59bbb
|
DERBY-3155: Uncomment a test verifying that trigger transition tables cannot be used as the target tables of MERGE statements: derby-3155-05-aa-triggerTransitionTableAsTarget.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1535397 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3155-bbd6aff4
|
DERBY-3155: Point matching refinement clauses into the row returned by the driving left join of the MERGE statement; commit derby-3155-27-aa-adjustMatchingRefinements.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1569521 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MatchingClauseNode.java",
"hunks": [
{
"added": [],
"header": "@@ -234,12 +234,6 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" /** Re-bind various clauses and lists once we have ResultSet numbers for the driving left join */",
" void bindResultSetNumbers( MergeNode mergeNode, FromList fullFromList ) throws StandardException",
" {",
" bindRefinement( mergeNode, fullFromList );",
" }",
""
]
},
{
"added": [
" adjustMatchingRefinement( selectList, generatedScan );",
" "
],
"header": "@@ -1283,6 +1277,8 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": []
},
{
"added": [
" private void generateInsertUpdateRow"
],
"header": "@@ -1369,7 +1365,7 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" void generateInsertUpdateRow"
]
},
{
"added": [
" /**",
" * <p>",
" * Point the column references in the matching refinement at the corresponding",
" * columns returned by the driving left join.",
" * </p>",
" */",
" private void adjustMatchingRefinement",
" (",
" ResultColumnList selectList,",
" ResultSetNode generatedScan",
" )",
" throws StandardException",
" {",
" if ( _matchingRefinement != null )",
" {",
" useGeneratedScan( selectList, generatedScan, _matchingRefinement );",
" }",
" }",
" ",
" private void adjustThenColumns"
],
"header": "@@ -1395,13 +1391,32 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" void adjustThenColumns"
]
},
{
"added": [
" useGeneratedScan( selectList, generatedScan, _thenColumns );"
],
"header": "@@ -1410,15 +1425,8 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" CollectNodesVisitor<ColumnReference> getCRs =",
" new CollectNodesVisitor<ColumnReference>( ColumnReference.class );",
" _thenColumns.accept( getCRs );",
" for ( ColumnReference cr : getCRs.getList() )",
" {",
" ResultColumn leftJoinRC = leftJoinResult.elementAt( getSelectListOffset( selectList, cr ) - 1 );",
" cr.setSource( leftJoinRC );",
" }"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MergeNode.java",
"hunks": [
{
"added": [],
"header": "@@ -238,13 +238,6 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
"",
" // re-bind the matchingRefinement clauses now that we have result set numbers",
" // from the driving left join.",
" for ( MatchingClauseNode mcn : _matchingClauses )",
" {",
" mcn.bindResultSetNumbers( this, _leftJoinFromList );",
" }"
]
}
]
}
] |
derby-DERBY-3155-bff25717
|
DERBY-3155: Fix some serialization bugs in the MERGE ConstantActions and add test cases for using trigger transition tables as the source tables of MERGE statements; check in derby-3155-06-aa-triggerTransitionTableAsSource.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1535447 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/execute/MatchingClauseConstantAction.java",
"hunks": [
{
"added": [
" /** 0-arg constructor needed by Formatable machinery */",
" public MatchingClauseConstantAction() {}",
""
],
"header": "@@ -89,6 +89,9 @@ public class MatchingClauseConstantAction implements ConstantAction, Formatable",
"removed": []
},
{
"added": [
" _matchRefinementName = (String) in.readObject();",
" _resultSetFieldName = (String) in.readObject();",
" _actionMethodName = (String) in.readObject();"
],
"header": "@@ -277,10 +280,10 @@ public class MatchingClauseConstantAction implements ConstantAction, Formatable",
"removed": [
" _matchRefinementName = in.readUTF();",
" _resultSetFieldName = in.readUTF();",
" _actionMethodName = in.readUTF();"
]
}
]
}
] |
derby-DERBY-3155-c2a0bdd9
|
DERBY-3155: Basic support for views as the source tables of MERGE statements; commit derby-3155-18-aa-basicView.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1560507 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3155-daffaee4
|
DERBY-3155: Enforce correct privileges for DELETE actions of MERGE statements; tests passed cleanly on derby-3155-31-aa-deletePrivs.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1572665 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/DeleteNode.java",
"hunks": [
{
"added": [
" getCompilerContext().removePrivilegeFilter( scopeFilter );"
],
"header": "@@ -297,11 +297,7 @@ class DeleteNode extends DMLModStatementNode",
"removed": [
"",
" //",
" // Don't remove the WHERE scopeFilter. Pre-processing may try to",
" // add other privileges which we don't need.",
" //"
]
},
{
"added": [
" // Don't add any more permissions during pre-processing",
" IgnoreFilter ignorePermissions = new IgnoreFilter();",
" getCompilerContext().addPrivilegeFilter( ignorePermissions );",
" "
],
"header": "@@ -880,6 +876,10 @@ class DeleteNode extends DMLModStatementNode",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MatchingClauseNode.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.sql.compile.IgnoreFilter;"
],
"header": "@@ -39,6 +39,7 @@ import org.apache.derby.shared.common.sanity.SanityManager;",
"removed": []
},
{
"added": [
" List<ColumnReference> colRefs = getColumnReferences( _updateColumns );"
],
"header": "@@ -431,10 +432,7 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" CollectNodesVisitor<ColumnReference> getCRs =",
" new CollectNodesVisitor<ColumnReference>(ColumnReference.class);",
" _updateColumns.accept(getCRs);",
" List<ColumnReference> colRefs = getCRs.getList();"
]
},
{
"added": [
" //",
" // Don't add any privileges until we bind the DELETE.",
" //",
" IgnoreFilter ignorePermissions = new IgnoreFilter();",
" getCompilerContext().addPrivilegeFilter( ignorePermissions );",
" "
],
"header": "@@ -699,6 +697,12 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": []
},
{
"added": [
" // ready to add permissions",
" getCompilerContext().removePrivilegeFilter( ignorePermissions );",
""
],
"header": "@@ -722,6 +726,9 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": []
},
{
"added": [
" List<ColumnReference> colRefs = getColumnReferences( checkConstraints );"
],
"header": "@@ -1345,17 +1352,13 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" CollectNodesVisitor<ColumnReference> getCRs =",
" new CollectNodesVisitor<ColumnReference>(ColumnReference.class);",
"",
" checkConstraints.accept(getCRs);",
" List<ColumnReference> colRefs = getCRs.getList();"
]
},
{
"added": [
" for ( ColumnReference cr : getColumnReferences( node ) )"
],
"header": "@@ -1498,11 +1501,8 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": [
" CollectNodesVisitor<ColumnReference> getCRs =",
" new CollectNodesVisitor<ColumnReference>( ColumnReference.class );",
" node.accept( getCRs );",
" for ( ColumnReference cr : getCRs.getList() )"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MergeNode.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.sql.compile.IgnoreFilter;",
"import org.apache.derby.iapi.sql.conn.Authorizer;"
],
"header": "@@ -33,7 +33,9 @@ import org.apache.derby.iapi.services.classfile.VMOpcode;",
"removed": []
},
{
"added": [
" //",
" // Don't add any privileges until we bind the matching clauses.",
" //",
" IgnoreFilter ignorePermissions = new IgnoreFilter();",
" getCompilerContext().addPrivilegeFilter( ignorePermissions );",
" "
],
"header": "@@ -220,6 +222,12 @@ public final class MergeNode extends DMLModStatementNode",
"removed": []
},
{
"added": [
" // ready to add permissions",
" getCompilerContext().removePrivilegeFilter( ignorePermissions );",
""
],
"header": "@@ -230,6 +238,9 @@ public final class MergeNode extends DMLModStatementNode",
"removed": []
},
{
"added": [
" //",
" // Don't add any privileges while binding the tables.",
" //",
" IgnoreFilter ignorePermissions = new IgnoreFilter();",
" getCompilerContext().addPrivilegeFilter( ignorePermissions );",
" ",
" // ready to add permissions",
" getCompilerContext().removePrivilegeFilter( ignorePermissions );",
" "
],
"header": "@@ -261,8 +272,17 @@ public final class MergeNode extends DMLModStatementNode",
"removed": []
},
{
"added": [
"",
" //",
" // Don't add any privileges until we bind the matching refinement clauses.",
" //",
" IgnoreFilter ignorePermissions = new IgnoreFilter();",
" getCompilerContext().addPrivilegeFilter( ignorePermissions );"
],
"header": "@@ -327,6 +347,12 @@ public final class MergeNode extends DMLModStatementNode",
"removed": []
},
{
"added": [
" // ready to add permissions",
" getCompilerContext().removePrivilegeFilter( ignorePermissions );",
""
],
"header": "@@ -350,6 +376,9 @@ public final class MergeNode extends DMLModStatementNode",
"removed": []
},
{
"added": [
" ",
" //",
" // We're only interested in privileges related to the ON clause.",
" // Otherwise, the driving left join should not contribute any",
" // privilege requirements.",
" //",
" getCompilerContext().addPrivilegeFilter( ignorePermissions );",
"",
" ",
" // ready to add permissions again",
" getCompilerContext().removePrivilegeFilter( ignorePermissions );",
"",
" // now figure out what privileges are needed for the ON clause",
" addOnClausePrivileges();"
],
"header": "@@ -399,7 +428,21 @@ public final class MergeNode extends DMLModStatementNode",
"removed": []
},
{
"added": [
" /**",
" * <p>",
" * Add the privileges required by the ON clause.",
" * </p>",
" */",
" private void addOnClausePrivileges() throws StandardException",
" {",
" // now add USAGE priv on referenced types",
" addUDTUsagePriv( getValueNodes( _searchCondition ) );",
"",
" // add SELECT privilege on columns",
" for ( ColumnReference cr : getColumnReferences( _searchCondition ) )",
" {",
" addColumnPrivilege( cr );",
" }",
" ",
" // add EXECUTE privilege on routines",
" for ( StaticMethodCallNode routine : getRoutineReferences( _searchCondition ) )",
" {",
" addRoutinePrivilege( routine );",
" }",
" }",
"",
" /**",
" * <p>",
" * Add SELECT privilege on the indicated column.",
" * </p>",
" */",
" private void addColumnPrivilege( ColumnReference cr )",
" throws StandardException",
" {",
" CompilerContext cc = getCompilerContext();",
" ResultColumn rc = cr.getSource();",
" ",
" if ( rc != null )",
" {",
" ColumnDescriptor colDesc = rc.getColumnDescriptor();",
" ",
" if ( colDesc != null )",
" {",
" cc.pushCurrentPrivType( Authorizer.SELECT_PRIV );",
" cc.addRequiredColumnPriv( colDesc );",
" cc.popCurrentPrivType();",
" }",
" }",
" }",
"",
" /**",
" * <p>",
" * Add EXECUTE privilege on the indicated routine.",
" * </p>",
" */",
" private void addRoutinePrivilege( StaticMethodCallNode routine )",
" throws StandardException",
" {",
" CompilerContext cc = getCompilerContext();",
" ",
" cc.pushCurrentPrivType( Authorizer.EXECUTE_PRIV );",
" cc.addRequiredRoutinePriv( routine.ad );",
" cc.popCurrentPrivType();",
" }",
""
],
"header": "@@ -548,6 +591,68 @@ public final class MergeNode extends DMLModStatementNode",
"removed": []
},
{
"added": [
" List<ColumnReference> colRefs = getColumnReferences( expression );",
"",
" getColumnsFromList( map, colRefs, mergeTableID );",
" }",
"",
" /** Get a list of ValueNodes in an expression */",
" private List<ValueNode> getValueNodes( QueryTreeNode expression )",
" throws StandardException",
" {",
" CollectNodesVisitor<ValueNode> getVNs =",
" new CollectNodesVisitor<ValueNode>(ValueNode.class);",
"",
" expression.accept(getVNs);",
" ",
" return getVNs.getList();",
" }",
"",
" /** Get a list of routines in an expression */",
" private List<StaticMethodCallNode> getRoutineReferences( QueryTreeNode expression )",
" throws StandardException",
" {",
" CollectNodesVisitor<StaticMethodCallNode> getSMCNs =",
" new CollectNodesVisitor<StaticMethodCallNode>(StaticMethodCallNode.class);",
"",
" expression.accept(getSMCNs);",
" ",
" return getSMCNs.getList();",
" }",
"",
" /** Get a list of column references in an expression */",
" private List<ColumnReference> getColumnReferences( QueryTreeNode expression )",
" throws StandardException",
" {",
" ",
" return getCRs.getList();"
],
"header": "@@ -599,13 +704,45 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" List<ColumnReference> colRefs = getCRs.getList();",
"",
" getColumnsFromList( map, colRefs, mergeTableID );"
]
},
{
"added": [
" List<ColumnReference> colRefs = getColumnReferences( rcl );"
],
"header": "@@ -613,11 +750,7 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" CollectNodesVisitor<ColumnReference> getCRs =",
" new CollectNodesVisitor<ColumnReference>( ColumnReference.class );",
"",
" rcl.accept( getCRs );",
" List<ColumnReference> colRefs = getCRs.getList();"
]
},
{
"added": [
" TableName tableName = cr.getQualifiedTableName();",
" if ( tableName == null ) { tableName = new TableName( null, rc.getTableName(), getContextManager() ); }"
],
"header": "@@ -645,7 +778,8 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" TableName tableName = new TableName( null, rc.getTableName(), getContextManager() );"
]
},
{
"added": [
" cc.setReliability( previousReliability | CompilerContext.SQL_IN_ROUTINES_ILLEGAL );",
" cc.pushCurrentPrivType( Authorizer.SELECT_PRIV );",
" try {",
" // this adds SELECT priv on referenced columns and EXECUTE privs on referenced routines",
"",
" // now add USAGE priv on referenced types",
" addUDTUsagePriv( getValueNodes( value ) );",
" cc.popCurrentPrivType();"
],
"header": "@@ -703,19 +837,25 @@ public final class MergeNode extends DMLModStatementNode",
"removed": [
" try {",
" cc.setReliability( previousReliability | CompilerContext.SQL_IN_ROUTINES_ILLEGAL );"
]
},
{
"added": [
" //",
" // Don't add any privileges during optimization.",
" //",
" IgnoreFilter ignorePermissions = new IgnoreFilter();",
" getCompilerContext().addPrivilegeFilter( ignorePermissions );",
" "
],
"header": "@@ -729,6 +869,12 @@ public final class MergeNode extends DMLModStatementNode",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/UpdateNode.java",
"hunks": [
{
"added": [
" //",
" // Add USAGE privilege for all UDTs mentioned in the WHERE clause and",
" // on the right side of SET operators.",
" //"
],
"header": "@@ -639,6 +639,10 @@ public final class UpdateNode extends DMLModStatementNode",
"removed": []
}
]
}
] |
derby-DERBY-3155-ddfefebc
|
DERBY-3155: Implement printSubNodes() for MergeNode and MatchingClauseNode; commit derby-3155-37-aa-printSubNodes.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1575032 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MergeNode.java",
"hunks": [
{
"added": [
"\t/**",
"\t * Prints the sub-nodes of this object. See QueryTreeNode.java for",
"\t * how tree printing is supposed to work.",
"\t *",
"\t * @param depth\t\tThe depth of this node in the tree",
"\t */",
" @Override",
" void printSubNodes( int depth )",
"\t{",
"\t\tif (SanityManager.DEBUG)",
"\t\t{",
"\t\t\tsuper.printSubNodes( depth );",
"",
" printLabel( depth, \"targetTable: \" );",
" _targetTable.treePrint( depth + 1 );",
"",
" printLabel( depth, \"sourceTable: \" );",
" _sourceTable.treePrint( depth + 1 );",
"",
" if ( _searchCondition != null )",
" {",
" printLabel( depth, \"searchCondition: \" );",
" _searchCondition.treePrint( depth + 1 );",
" }",
"",
" for ( MatchingClauseNode mcn : _matchingClauses )",
" {",
" printLabel( depth, mcn.toString() );",
" mcn.treePrint( depth + 1 );",
" }",
"\t\t}",
"\t}",
""
],
"header": "@@ -972,6 +972,39 @@ public final class MergeNode extends DMLModStatementNode",
"removed": []
}
]
}
] |
derby-DERBY-3155-efa1efa9
|
DERBY-3155: Prevent a MERGE statement from altering the same target row twice; commit derby-3155-28-aa-cardinalityViolations.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1570230 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/execute/MergeResultSet.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.store.access.BackingStoreHashtable;",
"import org.apache.derby.shared.common.sanity.SanityManager;"
],
"header": "@@ -25,15 +25,16 @@ import org.apache.derby.iapi.error.StandardException;",
"removed": [
"import org.apache.derby.shared.common.sanity.SanityManager;"
]
},
{
"added": [
"\tprivate BackingStoreHashtable\t\t_subjectRowIDs;",
" ",
"\tprivate int\t\t\t\t\t\t_numOpens;"
],
"header": "@@ -61,7 +62,9 @@ class MergeResultSet extends NoRowsResultSetImpl",
"removed": [
"\tprivate int\t\t\t\t\t\tnumOpens;"
]
},
{
"added": [
"\t\tif (_numOpens++ == 0)"
],
"header": "@@ -98,7 +101,7 @@ class MergeResultSet extends NoRowsResultSetImpl",
"removed": [
"\t\tif (numOpens++ == 0)"
]
},
{
"added": [
"",
" if ( _subjectRowIDs != null )",
" {",
" _subjectRowIDs.close();",
" _subjectRowIDs = null;",
" }",
" ",
"\t\t_numOpens = 0;"
],
"header": "@@ -162,7 +165,14 @@ class MergeResultSet extends NoRowsResultSetImpl",
"removed": [
"\t\tnumOpens = 0;"
]
},
{
"added": [],
"header": "@@ -180,7 +190,6 @@ class MergeResultSet extends NoRowsResultSetImpl",
"removed": [
" RowLocation baseRowLocation;"
]
},
{
"added": [
" SQLRef baseRowLocation = null;"
],
"header": "@@ -196,7 +205,7 @@ class MergeResultSet extends NoRowsResultSetImpl",
"removed": [
" baseRowLocation = null;"
]
},
{
"added": [
" baseRowLocation = new SQLRef( (RowLocation) rlColumn.getObject() );",
" _row.setColumn( _row.nColumns(), baseRowLocation );"
],
"header": "@@ -204,11 +213,11 @@ class MergeResultSet extends NoRowsResultSetImpl",
"removed": [
" baseRowLocation = (RowLocation) rlColumn.getObject();",
" _row.setColumn( _row.nColumns(), new SQLRef( baseRowLocation ) );"
]
},
{
"added": [
" if ( baseRowLocation != null ) { addSubjectRow( baseRowLocation ); }",
" "
],
"header": "@@ -243,6 +252,8 @@ class MergeResultSet extends NoRowsResultSetImpl",
"removed": []
}
]
}
] |
derby-DERBY-3155-f2cad07d
|
DERBY-3155: Add test to verify use of identifiers on the left side of SET clauses in MERGE statements; commit derby-3155-22-ad-testIdentifiersOnLeftSideOfSetClauses.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1566625 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3155-ffbe8135
|
DERBY-3155: Add privilege checks for the UPDATE actions of MERGE statements; commit derby-3155-34-ab-updatePrivs.diff.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1574566 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/MatchingClauseNode.java",
"hunks": [
{
"added": [
" //",
" // Don't add USAGE privilege on user-defined types.",
" //",
" boolean wasSkippingTypePrivileges = getCompilerContext().skipTypePrivileges( true );",
" "
],
"header": "@@ -313,6 +313,11 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": []
},
{
"added": [
"",
" getCompilerContext().skipTypePrivileges( wasSkippingTypePrivileges );"
],
"header": "@@ -336,6 +341,8 @@ public class MatchingClauseNode extends QueryTreeNode",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/UpdateNode.java",
"hunks": [
{
"added": [
" List<CastNode> allCastNodes = collectAllCastNodes();"
],
"header": "@@ -203,7 +203,7 @@ public final class UpdateNode extends DMLModStatementNode",
"removed": [
" List<ValueNode> allValueNodes = collectAllValueNodes();"
]
},
{
"added": [
" getCompilerContext().removePrivilegeFilter( tagFilter );",
" // Add USAGE privilege for all CASTs to UDTs mentioned in the WHERE clause and",
" for ( CastNode value : allCastNodes )",
" {",
" addUDTUsagePriv( value );",
" }"
],
"header": "@@ -636,15 +636,16 @@ public final class UpdateNode extends DMLModStatementNode",
"removed": [
" // don't remove the privilege filter. additional binding may be",
" // done during the pre-processing phase",
" // Add USAGE privilege for all UDTs mentioned in the WHERE clause and",
" addUDTUsagePriv( allValueNodes );",
""
]
}
]
}
] |
derby-DERBY-3158-ce018f39
|
DERBY-3158 DERBY-3159 Ensure that SYSCS_SET_USER_ACCESS: 1) always sets the user name in the lists in its delimited form. 2) Allows repeated calls to set the permission for the same user by always clearing both lists of that user name before adding. 3) Allows user names with characters that require quoting in SQL by not validating the name as a SQL identifier since the passed in user name is a normalized user name.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@589894 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/util/IdUtil.java",
"hunks": [
{
"added": [
" * Produce a delimited form of a normal value.",
"\tpublic static String normalToDelimited(String id)",
" ",
"\t\tfor (int ix = 0; ix < id.length(); ix++){",
"\t\t\tchar currentChar = id.charAt(ix);"
],
"header": "@@ -58,17 +58,16 @@ import java.util.Properties;",
"removed": [
"\t Delimit the identifier provided.",
"\tprivate static String delimitId(String id)",
"\t char[] charArray = id.toCharArray();",
"",
"\t\tfor (int ix = 0; ix < charArray.length; ix++){",
"\t\t\tchar currentChar = charArray[ix];"
]
},
{
"added": [
" return normalToDelimited(id2);",
" normalToDelimited(id1) +",
" normalToDelimited(id2);"
],
"header": "@@ -87,11 +86,11 @@ public abstract class IdUtil",
"removed": [
" return delimitId(id2);",
"\t\t\tdelimitId(id1) +",
"\t\t\tdelimitId(id2);"
]
},
{
"added": [
"\t\t\tsb.append(normalToDelimited(ids[ix]));"
],
"header": "@@ -103,7 +102,7 @@ public abstract class IdUtil",
"removed": [
"\t\t\tsb.append(delimitId(ids[ix]));"
]
},
{
"added": [
"\t\t\treturn normalToDelimited(b.toString()); //Put the quotes back."
],
"header": "@@ -303,7 +302,7 @@ c",
"removed": [
"\t\t\treturn delimitId(b.toString()); //Put the quotes back."
]
},
{
"added": [
"\t\t\tsb.append(IdUtil.normalToDelimited(ids[ix]));"
],
"header": "@@ -581,7 +580,7 @@ c",
"removed": [
"\t\t\tsb.append(IdUtil.delimitId(ids[ix]));"
]
}
]
}
] |
derby-DERBY-3159-ce018f39
|
DERBY-3158 DERBY-3159 Ensure that SYSCS_SET_USER_ACCESS: 1) always sets the user name in the lists in its delimited form. 2) Allows repeated calls to set the permission for the same user by always clearing both lists of that user name before adding. 3) Allows user names with characters that require quoting in SQL by not validating the name as a SQL identifier since the passed in user name is a normalized user name.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@589894 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/util/IdUtil.java",
"hunks": [
{
"added": [
" * Produce a delimited form of a normal value.",
"\tpublic static String normalToDelimited(String id)",
" ",
"\t\tfor (int ix = 0; ix < id.length(); ix++){",
"\t\t\tchar currentChar = id.charAt(ix);"
],
"header": "@@ -58,17 +58,16 @@ import java.util.Properties;",
"removed": [
"\t Delimit the identifier provided.",
"\tprivate static String delimitId(String id)",
"\t char[] charArray = id.toCharArray();",
"",
"\t\tfor (int ix = 0; ix < charArray.length; ix++){",
"\t\t\tchar currentChar = charArray[ix];"
]
},
{
"added": [
" return normalToDelimited(id2);",
" normalToDelimited(id1) +",
" normalToDelimited(id2);"
],
"header": "@@ -87,11 +86,11 @@ public abstract class IdUtil",
"removed": [
" return delimitId(id2);",
"\t\t\tdelimitId(id1) +",
"\t\t\tdelimitId(id2);"
]
},
{
"added": [
"\t\t\tsb.append(normalToDelimited(ids[ix]));"
],
"header": "@@ -103,7 +102,7 @@ public abstract class IdUtil",
"removed": [
"\t\t\tsb.append(delimitId(ids[ix]));"
]
},
{
"added": [
"\t\t\treturn normalToDelimited(b.toString()); //Put the quotes back."
],
"header": "@@ -303,7 +302,7 @@ c",
"removed": [
"\t\t\treturn delimitId(b.toString()); //Put the quotes back."
]
},
{
"added": [
"\t\t\tsb.append(IdUtil.normalToDelimited(ids[ix]));"
],
"header": "@@ -581,7 +580,7 @@ c",
"removed": [
"\t\t\tsb.append(IdUtil.delimitId(ids[ix]));"
]
}
]
}
] |
derby-DERBY-3160-bbba1c74
|
DERBY-3160 Make SYSCS_GET_USER_ACCESS treat the passed in user name in its normal form, as for example would be passed by using the expression CURRENT_USER
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@590720 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3172-836fc7bd
|
This is a followup checkin to checkin(595047) was committed for DERBY-3172. The DataSourceTest had started failing
under JDK1.6 after 595047. The particular test case that was failing was for Connection.getTypeMap The reason for
failure was that this method was overridden in a subclass which kicks in only when JDBC4.0 is available. The
overriden method was not sending the correct connection error event as expected by the test and hence the failure.
While fixing this, I realized that there are several new JDBC4.0 apis that need to send the correct events to
ConnectionEventListeners. This checkin takes care of those apis. More info on what was changed in this commit is
as follows.
New JDBC4.0 api, setClientInfo, wraps SQLException inside SQLClientInfoException but we were not copying the
error code of SQLException into SQLClientInfoException. Without the correct error code, we would not send
connection error event because the severity has to be fatal for us to send connection error event. Because of this,
I had to change several places where SQLException is wrapped inside SQLClientInfoException to pass SQLException's
error code to SQLClientInfoException. The classes changed because of this are EmbedConnection40,
BrokeredConnection40, NetConnection40.
For methods that throw SQLClientInfoException, we were not notifying the connection error events. I made changes to
fix this.
Several of new JDBC4 apis on connection object were not sending error events so I changed those methods in
BrokeredConnection40 and LogicalConnection40.
BrokeredConnection40 implements new JDBC4 methods on Connection object but these new methods did not follow the
same logic as the other existing pre-JDBC4 methods to check for connection close and that caused the events to
be not sent correctly. The problematic apis were createBlob, createClob, isWrapperFor, unwrap and I fixed those.
Not all the new JDBC4 apis have been implemented (they throw not implemented exception) so the tests written for
those apis just catch the unimplemented exception. These methods include createArrayOf, createNClob, createSQLXML,
createStruct.
In JDBC4, Connection object has two methods isWrapperFor and unwrap which do not go to the server when Derby is
being accessed in client server mode and because of this, we never detect that the server is down and hence no
connection error event is thrown in client server mode for these 2 apis. But when the same apis are called in
embedded Derby after the engine is shutdown, we get connection error event. I have added the test for these 2 apis
to count for the different in behavior but I am not sure if this is the expected behavior difference between the
2 configurations of Derby. I will enter a Jira entry for this.
And lastly, the new JDBC4 api isValid on Connection object has different behavior in client server mode and
embedded mode. They both throw exception that the connection is down but the connection close and error events
are not dealt the same way in the 2 configurations. In embedded mode, after the engine is shutdown, an isValid
call on Connection object raises a connection closed event and no connection error event. In client server mode,
after the Network Server is shutdown, an isValid call on Connection object does not raise any event. In both the
configurations, we do get a SQLException stating that connection is down. Again, I am not sure if this is expected
bahavior difference between the 2 configurations of Derby. I will enter a Jira entry for this too. In addition, as
per Connection.isValid api Java specification, a SQLException is thrown under following condition which is not
being followed in embedded and client-server mode
Throws:
SQLException - if the value supplied for timeout is less then 0
Based on this, I am not sure if our behavior is correct to throw an SQLException if the server/engine is down. I
will include this information in the Jira entry that I will make.
The tests for all these new JDBC4 apis are in jdbc4/DataSourceTest.
I moved the AssertEventCatcher class implementation from jdbcapi/DataSourceTest into a class of it's own. This way,
it can be shared by jdbcapi/DataSourceTest and jdbc4/DataSourceTest.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@595803 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/am/LogicalConnection40.java",
"hunks": [
{
"added": [
" try",
" {",
" \t\tcheckForNullPhysicalConnection();",
" return physicalConnection_.createArrayOf( typeName, elements );",
"\t\t} catch (SQLException sqle) {",
"\t\t\tnotifyException(sqle);",
"\t\t\tthrow sqle;",
"\t\t}",
" try",
" {",
" \t\tcheckForNullPhysicalConnection();",
" return physicalConnection_.createBlob();",
"\t\t} catch (SQLException sqle) {",
"\t\t\tnotifyException(sqle);",
"\t\t\tthrow sqle;",
"\t\t}",
" try",
" {",
" \t\tcheckForNullPhysicalConnection();",
" return physicalConnection_.createClob();",
"\t\t} catch (SQLException sqle) {",
"\t\t\tnotifyException(sqle);",
"\t\t\tthrow sqle;",
"\t\t}",
" try",
" {",
" \t\tcheckForNullPhysicalConnection();",
" return physicalConnection_.createNClob();",
"\t\t} catch (SQLException sqle) {",
"\t\t\tnotifyException(sqle);",
"\t\t\tthrow sqle;",
"\t\t}",
" try",
" {",
" \t\tcheckForNullPhysicalConnection();",
" return physicalConnection_.createSQLXML();",
"\t\t} catch (SQLException sqle) {",
"\t\t\tnotifyException(sqle);",
"\t\t\tthrow sqle;",
"\t\t}",
" try",
" {",
" \t\tcheckForNullPhysicalConnection();",
" return physicalConnection_.createStruct( typeName, attributes );",
"\t\t} catch (SQLException sqle) {",
"\t\t\tnotifyException(sqle);",
"\t\t\tthrow sqle;",
"\t\t}"
],
"header": "@@ -54,38 +54,74 @@ public class LogicalConnection40",
"removed": [
"\t\tcheckForNullPhysicalConnection();",
" return physicalConnection_.createArrayOf( typeName, elements );",
"\t\tcheckForNullPhysicalConnection();",
" return physicalConnection_.createBlob();",
"\t\tcheckForNullPhysicalConnection();",
" return physicalConnection_.createClob();",
"\t\tcheckForNullPhysicalConnection();",
" return physicalConnection_.createNClob();",
"\t\tcheckForNullPhysicalConnection();",
" return physicalConnection_.createSQLXML();",
"\t\tcheckForNullPhysicalConnection();",
" return physicalConnection_.createStruct( typeName, attributes );"
]
},
{
"added": [
" try",
" {",
" \tcheckForNullPhysicalConnection();",
" \treturn physicalConnection_.getClientInfo();",
"\t\t} catch (SQLException sqle) {",
"\t\t\tnotifyException(sqle);",
"\t\t\tthrow sqle;",
"\t\t}"
],
"header": "@@ -100,8 +136,14 @@ public class LogicalConnection40",
"removed": [
"\tcheckForNullPhysicalConnection();",
"\treturn physicalConnection_.getClientInfo();"
]
},
{
"added": [
" try",
" {",
" \tcheckForNullPhysicalConnection();",
" \treturn physicalConnection_.getClientInfo(name);",
"\t\t} catch (SQLException sqle) {",
"\t\t\tnotifyException(sqle);",
"\t\t\tthrow sqle;",
"\t\t}"
],
"header": "@@ -116,8 +158,14 @@ public class LogicalConnection40",
"removed": [
"\tcheckForNullPhysicalConnection();",
"\treturn physicalConnection_.getClientInfo(name);"
]
},
{
"added": [
" try",
" {",
" checkForNullPhysicalConnection();",
" return ((java.sql.Connection) physicalConnection_).getTypeMap();",
"\t\t} catch (SQLException sqle) {",
"\t\t\tnotifyException(sqle);",
"\t\t\tthrow sqle;",
"\t\t}"
],
"header": "@@ -127,8 +175,14 @@ public class LogicalConnection40",
"removed": [
" checkForNullPhysicalConnection();",
" return ((java.sql.Connection) physicalConnection_).getTypeMap();"
]
},
{
"added": [
" try",
" {",
" // Check if we have a underlying physical connection",
" if (physicalConnection_ == null) {",
" return false;",
" }",
" return physicalConnection_.isValid(timeout);",
"\t\t} catch (SQLException sqle) {",
"\t\t\tnotifyException(sqle);",
"\t\t\tthrow sqle;",
"\t\t}",
" try",
" {",
" checkForNullPhysicalConnection();",
" return interfaces.isInstance(this);",
"\t\t} catch (SQLException sqle) {",
"\t\t\tnotifyException(sqle);",
"\t\t\tthrow sqle;",
"\t\t}"
],
"header": "@@ -146,18 +200,30 @@ public class LogicalConnection40",
"removed": [
" // Check if we have a underlying physical connection",
" if (physicalConnection_ == null) {",
" return false;",
" }",
" return physicalConnection_.isValid(timeout);",
" checkForNullPhysicalConnection();",
" return interfaces.isInstance(this);"
]
},
{
"added": [
" try",
" {",
" \t\tcheckForNullPhysicalConnection(); ",
" \tphysicalConnection_.setClientInfo(properties);",
" } catch (SQLClientInfoException cie) {",
" notifyException(cie);",
" throw cie;",
" \t} catch (SQLException sqle) {",
" \t\tnotifyException(sqle);",
" \t throw new SQLClientInfoException",
" \t\t(sqle.getMessage(), sqle.getSQLState(), ",
" \t\t\t\tsqle.getErrorCode(),",
" \t\t\t\t(new FailedProperties40(properties)).getProperties());",
" \t}"
],
"header": "@@ -170,13 +236,20 @@ public class LogicalConnection40",
"removed": [
"\ttry { checkForNullPhysicalConnection(); }",
"\tcatch (SQLException se) { ",
"\t throw new SQLClientInfoException",
"\t\t(se.getMessage(), se.getSQLState(), ",
"\t\t (new FailedProperties40(properties)).getProperties());",
"\t}",
"\tphysicalConnection_.setClientInfo(properties);"
]
}
]
},
{
"file": "java/client/org/apache/derby/client/net/NetConnection40.java",
"hunks": [
{
"added": [
" (se.getMessage(), se.getSQLState(),",
" \t\tse.getErrorCode(),",
" \t\tnew FailedProperties40(p).getProperties());"
],
"header": "@@ -244,8 +244,9 @@ public class NetConnection40 extends org.apache.derby.client.net.NetConnection",
"removed": [
" (se.getMessage(), se.getSQLState(), ",
" new FailedProperties40(p).getProperties());"
]
},
{
"added": [
"\t \t\tse.getErrorCode(),",
"\t \t\tfp.getProperties());"
],
"header": "@@ -274,7 +275,8 @@ public class NetConnection40 extends org.apache.derby.client.net.NetConnection",
"removed": [
"\t\t\t\t\t fp.getProperties());"
]
}
]
},
{
"file": "java/engine/org/apache/derby/iapi/jdbc/BrokeredConnection40.java",
"hunks": [
{
"added": [],
"header": "@@ -67,9 +67,6 @@ public class BrokeredConnection40 extends BrokeredConnection30 {",
"removed": [
" if (isClosed()) {",
" throw Util.noCurrentConnection();",
" }"
]
},
{
"added": [],
"header": "@@ -93,9 +90,6 @@ public class BrokeredConnection40 extends BrokeredConnection30 {",
"removed": [
" if (isClosed()) {",
" throw Util.noCurrentConnection();",
" }"
]
},
{
"added": [
" notifyException(se);",
" \t\tse.getErrorCode(),"
],
"header": "@@ -182,8 +176,10 @@ public class BrokeredConnection40 extends BrokeredConnection30 {",
"removed": []
},
{
"added": [
" notifyException(se);",
" (se.getMessage(), se.getSQLState(),",
" \t\tse.getErrorCode(),"
],
"header": "@@ -210,8 +206,10 @@ public class BrokeredConnection40 extends BrokeredConnection30 {",
"removed": [
" (se.getMessage(), se.getSQLState(), "
]
},
{
"added": [
" try {",
" if (getRealConnection().isClosed())",
" throw Util.noCurrentConnection();",
" return interfaces.isInstance(this);",
" } catch (SQLException sqle) {",
" notifyException(sqle);",
" throw sqle;",
" }"
],
"header": "@@ -311,8 +309,14 @@ public class BrokeredConnection40 extends BrokeredConnection30 {",
"removed": [
" checkIfClosed();",
" return interfaces.isInstance(this);"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/jdbc/EmbedConnection40.java",
"hunks": [
{
"added": [
" se.getErrorCode(),"
],
"header": "@@ -120,6 +120,7 @@ public class EmbedConnection40 extends EmbedConnection30 {",
"removed": []
},
{
"added": [
" \t\tse.getErrorCode(), fp.getProperties());"
],
"header": "@@ -152,7 +153,7 @@ public class EmbedConnection40 extends EmbedConnection30 {",
"removed": [
" fp.getProperties());"
]
}
]
}
] |
derby-DERBY-3172-cb47a36e
|
DERBY-3172
Change the Network Server code so that when the Network Server is shutdown, any api on Connection object will
raise a Connection error event (similar to what is done in embedded mode). This also required me changing the
disconnectError from -4999 to 40000(to match embedded). This change was made client.am.SqlCode. I have added tests
for all the apis on Connection object in DataSourceTest. Had to change couple tests which were testing for
-4999. Now they look for 40000. I will merge this change into 10.3 and run tests there before committing the changes
in 10.3 codeline.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@595047 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/ClientPooledConnection.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.error.ExceptionSeverity;"
],
"header": "@@ -23,8 +23,8 @@ package org.apache.derby.client;",
"removed": [
"import org.apache.derby.jdbc.ClientDataSource;"
]
}
]
},
{
"file": "java/client/org/apache/derby/client/am/LogicalConnection.java",
"hunks": [
{
"added": [
" /**",
" * This method checks if the physcial connection underneath is null and",
" * if yes, then it simply returns.",
" * Otherwise, if the severity of exception is greater than equal to",
" * ExceptionSeverity.SESSION_SEVERITY, then we will send ",
" * connectionErrorOccurred event to all the registered listeners.",
" * ",
" * @param sqle SQLException An event will be sent to the listeners if the",
" * exception's severity is >= ExceptionSeverity.SESSION_SEVERITY.",
" */",
"\tfinal void notifyException(SQLException sqle) {",
" if (physicalConnection_ != null) ",
" \tpooledConnection_.trashConnection(new SqlException(sqle));",
"\t}",
"",
"\t\ttry {",
"\t checkForNullPhysicalConnection();",
"\t return physicalConnection_.createStatement();",
"\t\t} catch (SQLException sqle) {",
"\t\t\tnotifyException(sqle);",
"\t\t\tthrow sqle;",
"\t\t}",
"\t\ttry {",
"\t checkForNullPhysicalConnection();",
"\t return physicalConnection_.prepareStatement(sql);",
"\t\t} catch (SQLException sqle) {",
"\t\t\tnotifyException(sqle);",
"\t\t\tthrow sqle;",
"\t\t}"
],
"header": "@@ -130,17 +130,42 @@ public class LogicalConnection implements java.sql.Connection {",
"removed": [
" checkForNullPhysicalConnection();",
" return physicalConnection_.createStatement();",
" checkForNullPhysicalConnection();",
" return physicalConnection_.prepareStatement(sql);"
]
}
]
}
] |
derby-DERBY-3173-320f6afa
|
DERBY-3173: Removed cached String objects from SQLTime and SQLTimestamp
Patch contributed by Eranda Sooriyabandara <070468D at gmail dot com>.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@989918 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/types/DateTimeParser.java",
"hunks": [
{
"added": [],
"header": "@@ -31,7 +31,6 @@ class DateTimeParser",
"removed": [
" private String trimmedString;"
]
}
]
},
{
"file": "java/engine/org/apache/derby/iapi/types/SQLTime.java",
"hunks": [
{
"added": [],
"header": "@@ -93,9 +93,6 @@ public final class SQLTime extends DataType",
"removed": [
"\t// The cached value.toString()",
"\tprivate String\tvalueString;",
""
]
},
{
"added": [
" return BASE_MEMORY_USAGE;",
"\t\t\treturn encodedTimeToString(encodedTime);"
],
"header": "@@ -105,30 +102,17 @@ public final class SQLTime extends DataType",
"removed": [
" return BASE_MEMORY_USAGE + ClassSize.estimateMemoryUsage( valueString);",
"\t\t\tif (valueString == null)",
"\t\t\t{",
"\t\t\t\tvalueString = encodedTimeToString(encodedTime);",
"\t\t\t}",
"\t\t\treturn valueString;",
"\t\t\tif (SanityManager.DEBUG)",
"\t\t\t{",
"\t\t\t\tif (valueString != null)",
"\t\t\t\t{",
"\t\t\t\t\tSanityManager.THROWASSERT(",
"\t\t\t\t\t\t\"valueString expected to be null, not \" +",
"\t\t\t\t\t\tvalueString);",
"\t\t\t\t}",
"\t\t\t}"
]
},
{
"added": [],
"header": "@@ -231,15 +215,11 @@ public final class SQLTime extends DataType",
"removed": [
"\t\t// reset cached values",
"\t\tvalueString = null;",
"\t\t// reset cached values",
"\t\tvalueString = null;"
]
},
{
"added": [],
"header": "@@ -269,9 +249,6 @@ public final class SQLTime extends DataType",
"removed": [
"",
"\t\t// clear cached valueString",
"\t\tvalueString = null;"
]
},
{
"added": [],
"header": "@@ -470,7 +447,6 @@ public final class SQLTime extends DataType",
"removed": [
" valueString = parser.getTrimmedString();"
]
},
{
"added": [
" parser.checkEnd();"
],
"header": "@@ -551,7 +527,7 @@ public final class SQLTime extends DataType",
"removed": [
" valueString = parser.checkEnd();"
]
}
]
},
{
"file": "java/engine/org/apache/derby/iapi/types/SQLTimestamp.java",
"hunks": [
{
"added": [],
"header": "@@ -93,10 +93,6 @@ public final class SQLTimestamp extends DataType",
"removed": [
"",
"\t// The cached value.toString()",
"\tprivate String\tvalueString;",
""
]
},
{
"added": [
" int sz = BASE_MEMORY_USAGE;"
],
"header": "@@ -106,7 +102,7 @@ public final class SQLTimestamp extends DataType",
"removed": [
" int sz = BASE_MEMORY_USAGE + ClassSize.estimateMemoryUsage( valueString);"
]
},
{
"added": [
" String valueString = getTimestamp((Calendar) null).toString();",
" /* The java.sql.Timestamp.toString() method is supposed to return a string in",
" * the JDBC escape format. However the JDK 1.3 libraries truncate leading zeros from",
" * the year. This is not acceptable to DB2. So add leading zeros if necessary.",
" */",
" int separatorIdx = valueString.indexOf('-');",
" if (separatorIdx >= 0 && separatorIdx < 4)",
" {",
" StringBuffer sb = new StringBuffer();",
" for( ; separatorIdx < 4; separatorIdx++)",
" sb.append('0');",
" sb.append(valueString);",
" valueString = sb.toString();",
" }"
],
"header": "@@ -114,37 +110,25 @@ public final class SQLTimestamp extends DataType",
"removed": [
"\t\t\tif (valueString == null)",
"\t\t\t{",
"\t\t\t\tvalueString = getTimestamp((Calendar) null).toString();",
" /* The java.sql.Timestamp.toString() method is supposed to return a string in",
" * the JDBC escape format. However the JDK 1.3 libraries truncate leading zeros from",
" * the year. This is not acceptable to DB2. So add leading zeros if necessary.",
" */",
" int separatorIdx = valueString.indexOf( '-');",
" if( separatorIdx >= 0 && separatorIdx < 4)",
" {",
" StringBuffer sb = new StringBuffer();",
" for( ; separatorIdx < 4; separatorIdx++)",
" sb.append('0');",
" sb.append( valueString);",
" valueString = sb.toString();",
" }",
"\t\t\t}",
"\t\t\tif (SanityManager.DEBUG)",
"\t\t\t{",
"\t\t\t\tif (valueString != null)",
"\t\t\t\t{",
"\t\t\t\t\tSanityManager.THROWASSERT(",
"\t\t\t\t\t\t\"valueString expected to be null, not \" +",
"\t\t\t\t\t\tvalueString);",
"\t\t\t\t}",
"\t\t\t}"
]
},
{
"added": [],
"header": "@@ -253,16 +237,12 @@ public final class SQLTimestamp extends DataType",
"removed": [
"\t\t// reset cached values",
"\t\tvalueString = null;",
"\t\t// reset cached values",
"\t\tvalueString = null;"
]
},
{
"added": [],
"header": "@@ -294,8 +274,6 @@ public final class SQLTimestamp extends DataType",
"removed": [
"\t\t// clear cached valueString",
"\t\tvalueString = null;"
]
}
]
}
] |
derby-DERBY-3173-eac24d28
|
DERBY-3173: Removed cached String objects from SQLDate
Patch contributed by Eranda Sooriyabandara <070468D@gmail.com>.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@988107 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/types/SQLDate.java",
"hunks": [
{
"added": [
" return BASE_MEMORY_USAGE;"
],
"header": "@@ -86,14 +86,11 @@ public final class SQLDate extends DataType",
"removed": [
"\t// The cached value.toString()",
"\tprivate String\tvalueString;",
"",
" return BASE_MEMORY_USAGE + ClassSize.estimateMemoryUsage( valueString);"
]
},
{
"added": [
"\t\t\treturn encodedDateToString(encodedDate);"
],
"header": "@@ -111,23 +108,10 @@ public final class SQLDate extends DataType",
"removed": [
"\t\t\tif (valueString == null)",
"\t\t\t{",
"\t\t\t\tvalueString = encodedDateToString(encodedDate);",
"\t\t\t}",
"\t\t\treturn valueString;",
"\t\t\tif (SanityManager.DEBUG)",
"\t\t\t{",
"\t\t\t\tif (valueString != null)",
"\t\t\t\t{",
"\t\t\t\t\tSanityManager.THROWASSERT(",
"\t\t\t\t\t\t\"valueString expected to be null, not \" +",
"\t\t\t\t\t\tvalueString);",
"\t\t\t\t}",
"\t\t\t}"
]
},
{
"added": [],
"header": "@@ -229,15 +213,11 @@ public final class SQLDate extends DataType",
"removed": [
"\t\t// reset cached string values",
"\t\tvalueString = null;",
"\t\t// reset cached string values",
"\t\tvalueString = null;"
]
},
{
"added": [],
"header": "@@ -268,8 +248,6 @@ public final class SQLDate extends DataType",
"removed": [
"\t\t// clear cached valueString",
"\t\tvalueString = null;"
]
}
]
}
] |
derby-DERBY-3175-45c3f035
|
DERBY-3175: NullPointerException on INSERT after ALTER TABLE DROP COLUMN
The implementation of ALTER TABLE DROP COLUMN needs to adjust the
SYSCOLUMNS information for some of the columns in the table. Specifically,
columns with a higher column position than the dropped column need to
have their column position adjusted (decremented by 1).
The algorithm which did this column position adjustment was flawed,
because it was losing the auto-increment information for generated columns.
The code adjusts the column position by:
- reading the old column information into a ColumnDescriptor
- deleting the old column information from SYSCOLUMNS
- modifying the ColumnDescriptor to set the new column position
- adding the new column information to SYSCOLUMNS
But the generated column information in the ColumnDescriptor was not being
preserved properly in this process.
The fix involved ensuring that the ColumnDescriptor information was
accurate for generated columns, and ensuring that SYSCOLUMNSRowFactory.makeRow
was correctly loading the generated column information into the new row.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@594727 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/sql/dictionary/ColumnDescriptor.java",
"hunks": [
{
"added": [
"\tprivate long\t\t\t\tautoincValue;"
],
"header": "@@ -69,6 +69,7 @@ public final class ColumnDescriptor extends TupleDescriptor",
"removed": []
},
{
"added": [
"\t\tthis.autoincValue = autoincStart;"
],
"header": "@@ -154,6 +155,7 @@ public final class ColumnDescriptor extends TupleDescriptor",
"removed": []
},
{
"added": [
"\t * @param autoincValue\tCurrent value of the autoincrement column",
" long autoincStart, long autoincInc, long autoincValue)"
],
"header": "@@ -176,13 +178,14 @@ public final class ColumnDescriptor extends TupleDescriptor",
"removed": [
" long autoincStart, long autoincInc)"
]
},
{
"added": [
"\t\tthis.autoincValue = autoincValue;"
],
"header": "@@ -198,6 +201,7 @@ public final class ColumnDescriptor extends TupleDescriptor",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/catalog/DataDictionaryImpl.java",
"hunks": [
{
"added": [
" (UUID) null, 0, 0, 0);"
],
"header": "@@ -3678,7 +3678,7 @@ public final class\tDataDictionaryImpl",
"removed": [
" (UUID) null, 0, 0);"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/catalog/SYSCOLUMNSRowFactory.java",
"hunks": [
{
"added": [
"\t\tlong\t\t\t\t\tautoincValue = 0;"
],
"header": "@@ -154,6 +154,7 @@ public class SYSCOLUMNSRowFactory extends CatalogRowFactory",
"removed": []
},
{
"added": [
"\t\t\tautoincValue = column.getAutoincValue();"
],
"header": "@@ -174,6 +175,7 @@ public class SYSCOLUMNSRowFactory extends CatalogRowFactory",
"removed": []
},
{
"added": [
"\t\t\t// This code also gets run when ALTER TABLE DROP COLUMN",
"\t\t\t// is used to drop a column other than the autoinc",
"\t\t\t// column, and the autoinc column gets removed from",
"\t\t\t// SYSCOLUMNS and immediately re-added with a different",
"\t\t\t// column position (to account for the dropped column).",
"\t\t\t// In this case, the autoincValue may have a",
"\t\t\t// different value than the autoincStart.",
"\t\t\t\t\t\t new SQLLongint(autoincValue));"
],
"header": "@@ -222,8 +224,15 @@ public class SYSCOLUMNSRowFactory extends CatalogRowFactory",
"removed": [
"\t\t\t\t\t\t new SQLLongint(autoincStart));"
]
},
{
"added": [
"\t\tlong autoincStart, autoincInc, autoincValue;"
],
"header": "@@ -320,7 +329,7 @@ public class SYSCOLUMNSRowFactory extends CatalogRowFactory",
"removed": [
"\t\tlong autoincStart, autoincInc;"
]
},
{
"added": [
"\t\t/* 7th column is AUTOINCREMENTVALUE (long) */",
"\t\tautoincValue = row.getColumn(SYSCOLUMNS_AUTOINCREMENTVALUE).getLong();"
],
"header": "@@ -393,7 +402,8 @@ public class SYSCOLUMNSRowFactory extends CatalogRowFactory",
"removed": [
"\t\t/* 7th column is AUTOINCREMENTVALUE, not cached in descriptor (long) */"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/AlterTableConstantAction.java",
"hunks": [
{
"added": [
"import org.apache.derby.impl.sql.compile.ColumnDefinitionNode;"
],
"header": "@@ -79,6 +79,7 @@ import org.apache.derby.iapi.types.DataValueDescriptor;",
"removed": []
}
]
}
] |
derby-DERBY-3176-05e9c291
|
DERBY-3176: Re-enable support for running Derby on phoneME small device platforms.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@594184 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/services/info/JVMInfo.java",
"hunks": [
{
"added": [
"\tThis class is used to determine which Java specification Derby will run at.",
" For a useful discussion of how this class is used, please see DERBY-3176."
],
"header": "@@ -23,7 +23,8 @@ package org.apache.derby.iapi.services.info;",
"removed": [
"\tWhat's the current JDK runtime environment."
]
},
{
"added": [
"\t\t<LI> 2 - not used, was for JDK 1.2 and 1.3",
"\t\t<LI> 7 - J2SE_16 - JDK 1.6"
],
"header": "@@ -31,15 +32,15 @@ public abstract class JVMInfo",
"removed": [
"\t\t<LI> 2 - J2SE_13- JDK 1.2, 1.3",
"\tpublic static final int J2SE_13 = 2;"
]
},
{
"added": [
"\t\t\tjavaVersion = System.getProperty(\"java.specification.version\", \"1.4\");",
"\t\t\tjavaVersion = \"1.4\";",
"\t\tif (javaSpec != null &&",
" (",
" javaSpec.startsWith(\"J2ME\") || // recognize IBM WCTME",
" (",
" (javaSpec.indexOf( \"Profile\" ) > -1) && // recognize phoneME",
" (javaSpec.indexOf( \"Specification\" ) > -1)",
" )",
" )",
" )",
"\t\t\tid = J2SE_14;"
],
"header": "@@ -80,23 +81,25 @@ public abstract class JVMInfo",
"removed": [
"\t\t\tjavaVersion = System.getProperty(\"java.specification.version\", \"1.3\");",
"\t\t\tjavaVersion = \"1.3\";",
"\t\tif (javaSpec != null && javaSpec.startsWith(\"J2ME\"))",
"\t\t\t// IBM's WCTME 5.7 returns these values for CDC 1.0 profiles.",
"\t\t\t// \"J2ME Foundation Specification\"",
"\t\t\t//",
"",
"\t\t\t// Foundation 1.0 and Personal Profile 1.0 based",
"\t\t\t// upon CDC 1.0 which is JDK 1.3 based",
"\t\t\tid = J2SE_13;"
]
},
{
"added": [
"\t\t\tif (javaVersion.equals(\"1.4\"))"
],
"header": "@@ -104,11 +107,7 @@ public abstract class JVMInfo",
"removed": [
"\t\t\tif (javaVersion.equals(\"1.2\") || javaVersion.equals(\"1.3\"))",
"\t\t\t{\t",
"\t\t\t\tid = J2SE_13; //jdk1.3 is still Java2 platform with the same API",
"\t\t\t}",
"\t\t\telse if (javaVersion.equals(\"1.4\"))"
]
},
{
"added": [
"\t\t\t\tid = J2SE_14;"
],
"header": "@@ -129,7 +128,7 @@ public abstract class JVMInfo",
"removed": [
"\t\t\t\tid = J2SE_13;"
]
}
]
}
] |
derby-DERBY-3176-8a319fbe
|
DERBY-3224 - modifications to enable running with phoneME advanced platform;
changes are as follows:
- org/apache/derby/iapi/tools/i18n/LocalizedInput.java -
Performance of ij tests improved by using a BufferedReader on top of the
InputStreamReader when ij read from its scriptfiles.
The reason is the use of some buffers when doing character conversion.
Every time the buffer is full, an exception object would be created, thrown and caught; for instance, during LangScripts test, when ij read 1 char at a time from the InputStream, this would happen 1.5 million times. Using the BufferedReader reduced the number of exceptions to 92, and made the test run a lot faster on this platform.
- GrantRevokeTest - clean up code was added (on phoneME advanced tests run in opposite order. )
- CollationTest.java and CollationTest2.java - Added code to not run the tests for non-english locales if the locales are not supported
- ForBitDataTest.java - clean up code was added
- ScrollCursors1Test.java - clean up code was added
- ForUpdateTest.java - Moved code from a test to general setup as it was needed by more than one test and thus fixtures were not independent of order.
- UpdateCursorTest.java - clean up code was added
- upgradeTests/_Suite.java - added code to prevent upgrade from 10.3.1.4
because 10.3.1.4 doesn't boot on this platform (DERBY-3176).
- BaseJDBCTestCase - added method to compare Time objects
patch contributed by Vermund Ostgaard
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@618507 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/tools/org/apache/derby/iapi/tools/i18n/LocalizedInput.java",
"hunks": [
{
"added": [
"import java.io.BufferedReader;",
"public class LocalizedInput extends BufferedReader{",
"\t\tsuper(new InputStreamReader(i));",
"\t\tsuper(new InputStreamReader(i,encode));"
],
"header": "@@ -20,20 +20,21 @@",
"removed": [
"public class LocalizedInput extends InputStreamReader{",
"\t\tsuper(i);",
"\t\tsuper(i,encode);"
]
}
]
}
] |
derby-DERBY-3177-57465efa
|
DERBY-3177: Cleanup suspect coding practices in the types package.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1468143 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/catalog/types/DefaultInfoImpl.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.services.io.ArrayUtil;"
],
"header": "@@ -21,6 +21,7 @@",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/catalog/types/IndexDescriptorImpl.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.services.io.ArrayUtil;"
],
"header": "@@ -23,6 +23,7 @@ package org.apache.derby.catalog.types;",
"removed": []
},
{
"added": [
"\t\tthis.baseColumnPositions = ArrayUtil.copy( baseColumnPositions );",
"\t\tthis.isAscending = ArrayUtil.copy( isAscending );"
],
"header": "@@ -98,8 +99,8 @@ public class IndexDescriptorImpl implements IndexDescriptor, Formatable",
"removed": [
"\t\tthis.baseColumnPositions = baseColumnPositions;",
"\t\tthis.isAscending = isAscending;"
]
},
{
"added": [
" return ArrayUtil.copy( baseColumnPositions );"
],
"header": "@@ -127,7 +128,7 @@ public class IndexDescriptorImpl implements IndexDescriptor, Formatable",
"removed": [
"\t\treturn baseColumnPositions;"
]
}
]
},
{
"file": "java/engine/org/apache/derby/catalog/types/ReferencedColumnsDescriptorImpl.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.services.io.ArrayUtil;"
],
"header": "@@ -22,6 +22,7 @@",
"removed": []
},
{
"added": [
"\t\tthis.referencedColumns = ArrayUtil.copy( referencedColumns );"
],
"header": "@@ -80,7 +81,7 @@ public class ReferencedColumnsDescriptorImpl",
"removed": [
"\t\tthis.referencedColumns = referencedColumns;"
]
},
{
"added": [
"\t\tthis.referencedColumns = ArrayUtil.copy( referencedColumns );",
"\t\tthis.referencedColumnsInTriggerAction = ArrayUtil.copy( referencedColumnsInTriggerAction );"
],
"header": "@@ -94,8 +95,8 @@ public class ReferencedColumnsDescriptorImpl",
"removed": [
"\t\tthis.referencedColumns = referencedColumns;",
"\t\tthis.referencedColumnsInTriggerAction = referencedColumnsInTriggerAction;"
]
},
{
"added": [
"\t\treturn ArrayUtil.copy( referencedColumns );"
],
"header": "@@ -107,7 +108,7 @@ public class ReferencedColumnsDescriptorImpl",
"removed": [
"\t\treturn referencedColumns;"
]
}
]
},
{
"file": "java/engine/org/apache/derby/catalog/types/RoutineAliasInfo.java",
"hunks": [
{
"added": [
"\t\tthis.parameterNames = ArrayUtil.copy( parameterNames );",
"\t\tsetParameterTypes( parameterTypes );",
"\t\tthis.parameterModes = ArrayUtil.copy( parameterModes );"
],
"header": "@@ -172,9 +172,9 @@ public class RoutineAliasInfo extends MethodAliasInfo",
"removed": [
"\t\tthis.parameterNames = parameterNames;",
"\t\tthis.parameterTypes = parameterTypes;",
"\t\tthis.parameterModes = parameterModes;"
]
}
]
},
{
"file": "java/engine/org/apache/derby/catalog/types/RowMultiSetImpl.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.services.io.ArrayUtil;"
],
"header": "@@ -22,6 +22,7 @@",
"removed": []
},
{
"added": [
" _columnNames = ArrayUtil.copy( columnNames );",
" setTypes( types );"
],
"header": "@@ -86,8 +87,8 @@ public class RowMultiSetImpl extends BaseTypeIdImpl",
"removed": [
" _columnNames = columnNames;",
" _types = types;"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/QueryTreeNode.java",
"hunks": [
{
"added": [],
"header": "@@ -1678,7 +1678,6 @@ public abstract class QueryTreeNode implements Node, Visitable",
"removed": [
" String[] columnNames = originalMultiSet.getColumnNames();"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/AlterTableConstantAction.java",
"hunks": [
{
"added": [
"\t\t\t\tboolean rewriteBaseColumnPositions = false;",
" {",
" rewriteBaseColumnPositions = true;",
" }",
"",
" if ( rewriteBaseColumnPositions )",
" {",
" compressIRGs[j].setBaseColumnPositions( baseColumnPositions );",
" }",
" "
],
"header": "@@ -2766,17 +2766,27 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction",
"removed": []
}
]
}
] |
derby-DERBY-3178-2216a400
|
DERBY-3178
This commit modifies DataSourceTest.java to ensure that expected Connection events are fired to the
ConnectionEventListener.
The changes have been implemented by having AssertEventCatcher in DataSourceTest.java keep track of what events
it received. This state is kept in boolean gotConnectionClosed and boolean gotConnectionErrorOccured flags in
AssertEventCatcher. The test then checks the state of AssertEventCatcher after the test has done firing the events
to make sure AssertEventCatcher got the expected events.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@592544 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3181-38e6a8d2
|
DERBY-3181: DatabaseMetaData.getBestRowIdentifier behavior with invalid scope
This patch was contributed by Danoja Dias (danojadias at gmail dot com)
The DatabaseMetaData.getBestRowIdentifier method takes a number of arguments,
including "scope", which is supposed to be one of the enumerated values:
bestRowTemporary, bestRowTransaction, and bestRowSession.
If an invalid scope argument was passed to this method, Derby was returning
a hard-coded "empty" row identifier, which was slightly different, in detail,
to the row identifier that is returned for a valid scope argument.
Since JDBC does not require that we return such a row identifier for an
invalid scope argument, it seems cleaner and more useful to throw an
exception with a message indicating that an invalid scope argument was
passed. The caller can then correct their application to make the call with
a valid scope argument, and will then receive a valid row identifier.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1745414 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3182-3d3a8fe7
|
DERBY-3182 ; test comments re functioning of nullisok flag (5th parameter to
getBestRowIdentifier) were incorrect. Adjusting test.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@593931 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-319-fd58c3c2
|
Derby-194: Modify Derby metadata for datetime values so that the correct "precision" and "scale" for those types will be returned in metadata calls.
Description from Army:
Since the definitions of "precision" and "scale" aren't clearly defined for datetime values in JDBC, I've set them based on the ODBC specification. It was agreed in discussion of this issue (and also of DERBY-319) that the "intent" of JDBC for these values is to mimic ODBC behavior. See the thread here for that discussion:
http://article.gmane.org/gmane.comp.apache.db.derby.devel/2786
http://article.gmane.org/gmane.comp.apache.db.derby.devel/2787
So that said, the attached patch sets precision/scale for datetime values according to the following ODBC pages:
[ Precision ]
http://msdn.microsoft.com/library/default.asp?url=/library/en-us/odbc/htm/odbccolumn_size.asp
Pasted from the above link:
"The column (or parameter) size of numeric data types is defined as the maximum number of digits used by the data type of the column or parameter, or the precision of the data. For character types, this is the length in characters of the data; for binary data types, column size is defined as the length in bytes of the data. For the time, timestamp, and all interval data types, this is the number of characters in the character representation of this data."
[ Scale ]
http://msdn.microsoft.com/library/default.asp?url=/library/en-us/odbc/htm/odbcdecimal_digits.asp
Pasted from the above link:
"The decimal digits of decimal and numeric data types is defined as the maximum number of digits to the right of the decimal point, or the scale of the data. For approximate floating-point number columns or parameters, the scale is undefined because the number of digits to the right of the decimal point is not fixed. For datetime or interval data that contains a seconds component, the decimal digits is defined as the number of digits to the right of the decimal point in the seconds component of the data."
I have run "derbyall" on Windows 2000 with Sun JDK 1.4.2 and have included all required master updates in the patch. The "svn stat" output is attached to this email, along with the patch itself.
Submitted by Army Brown(qozinx@sbcglobal.net)
git-svn-id: https://svn.apache.org/repos/asf/incubator/derby/code/trunk@179839 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/types/TypeId.java",
"hunks": [
{
"added": [
" // Max width for datetime values is the length of the",
" // string returned from a call to \"toString()\" on the",
" // java.sql.Date, java.sql.Time, and java.sql.Timestamp",
" // classes (the result of toString() on those classes",
" // is defined by the JDBC API). This value is also",
" // used as the \"precision\" for those types.",
" public static final int DATE_MAXWIDTH = 10;\t// yyyy-mm-dd",
" public static final int TIME_MAXWIDTH = 8;\t// hh:mm:ss",
" public static final int TIMESTAMP_MAXWIDTH = 26;\t// yyyy-mm-dd hh:mm:ss.ffffff",
"",
" // Scale DOES exist for time values. For a TIMESTAMP value,",
" // it's 6 ('ffffff'); for a TIME value, it's 0 (because there",
" // are no fractional seconds). Note that date values do",
" // not have a scale.",
" public static final int TIME_SCALE = 0;",
" public static final int TIMESTAMP_SCALE = 6;"
],
"header": "@@ -115,9 +115,22 @@ public final class TypeId implements Formatable",
"removed": [
" public static final int DATE_MAXWIDTH = 4;",
" public static final int TIME_MAXWIDTH = 8;",
" public static final int TIMESTAMP_MAXWIDTH = 12;"
]
},
{
"added": [
" maxMaxWidth = TypeId.DATE_MAXWIDTH;",
" maxPrecision = TypeId.DATE_MAXWIDTH;"
],
"header": "@@ -714,10 +727,8 @@ public final class TypeId implements Formatable",
"removed": [
" /* this is used in ResultSetMetaData.getPrecision",
" * undefined for datetime types",
" */",
" maxMaxWidth = -1;"
]
}
]
}
] |
derby-DERBY-3191-aaa14032
|
DERBY-3191 SQL roles: add upgrade support
Patch DERBY-3191b; adds hard upgrade code for the new roles system table SYS.SYSROLES,
plus tests for this functionality to the upgrade tests.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@597409 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/TestConfiguration.java",
"hunks": [
{
"added": [
" /**",
" * Similar to additionalDatabaseDecorator except the database will",
" * not be shutdown, only deleted. It is the responsibility of the",
" * test to shut it down.",
" *",
" * @param test Test to be decorated",
" * @param logicalDbName The logical database name. This name is",
" * used to identify the database in",
" * openConnection(String logicalDatabaseName)",
" * method calls.",
" * @return decorated test.",
" */",
" public static TestSetup additionalDatabaseDecoratorNoShutdown(",
" Test test,",
" String logicalDbName)",
" {",
" return new DatabaseChangeSetup(",
" new DropDatabaseSetup(test, logicalDbName)",
" {",
" protected void tearDown() throws Exception {",
" // the test is responsible for shutdown",
" removeDatabase();",
" }",
" },",
" logicalDbName,",
" generateUniqueDatabaseName(),",
" false);",
" }",
""
],
"header": "@@ -434,6 +434,35 @@ public class TestConfiguration {",
"removed": []
}
]
}
] |
derby-DERBY-3192-585cf5c9
|
DERBY-3192: Cache session data in the client driver
Remove special handling of SYNCCTL in sanity-check ASSERT. Since the final
version of the real patch piggy-backs changes caused by SYNCCTL there is no
longer any need to omit the sanity check after SYNCCTL.
Patch file: derby-3192-fup.v1.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@633011 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/drda/org/apache/derby/impl/drda/DRDAConnThread.java",
"hunks": [
{
"added": [
" SanityManager.ASSERT(!pbsd.isModified(),",
" \"Unexpected PBSD modification: \" + pbsd +",
" \" after codePoint \" + cpStr);",
" server.consoleExceptionPrint(sqle);"
],
"header": "@@ -1032,20 +1032,16 @@ class DRDAConnThread extends Thread {",
"removed": [
" if (codePoint != CodePoint.SYNCCTL) {",
" // We expect the session attributes to have changed",
" // after SYNCCTL, but this is handled by the client",
" // and is not a problem",
" SanityManager.ASSERT(!pbsd.isModified(),",
" \"Unexpected PBSD modification: \" + pbsd +",
" \" after codePoint \" + cpStr);",
" }"
]
}
]
}
] |
derby-DERBY-3192-c6c4d4de
|
DERBY-3192: Cache session data in the client driver
Piggy-backs the current isolation level and the current schema onto
messages going back to the client. The client caches this information so that
it can be returned to a user (app) without an extra round-trip.
See also http://wiki.apache.org/db-derby/Derby3192Writeup
Patch file: derby-3192-mark2.v8.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@631593 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/am/Connection.java",
"hunks": [
{
"added": [
"import org.apache.derby.shared.common.sanity.SanityManager;"
],
"header": "@@ -27,6 +27,7 @@ import org.apache.derby.shared.common.reference.JDBC30Translation;",
"removed": []
},
{
"added": [
" /**",
" * Constant indicating that isolation_ has not been updated through",
" * piggy-backing, (or that the previously stored value was invalidated,",
" * e.g. by an XA state change).",
" */",
" private static final int TRANSACTION_UNKNOWN = -1;",
" /**",
" * Cached copy of the isolation level. Kept in sync with server through",
" * piggy-backing.",
" */",
" private int isolation_ = TRANSACTION_UNKNOWN;",
"",
" /**",
" * Cached copy of the schema name. Updated through piggy-backing and used",
" * to implement statement caching.",
" */",
" private String currentSchemaName_ = null;",
""
],
"header": "@@ -93,7 +94,24 @@ public abstract class Connection implements java.sql.Connection,",
"removed": [
" public int isolation_ = Configuration.defaultIsolation;"
]
},
{
"added": [
" isolation_ = TRANSACTION_UNKNOWN;",
" currentSchemaName_ = null;"
],
"header": "@@ -274,7 +292,8 @@ public abstract class Connection implements java.sql.Connection,",
"removed": [
" isolation_ = Configuration.defaultIsolation;"
]
},
{
"added": [
" isolation_ = TRANSACTION_UNKNOWN;",
" currentSchemaName_ = null;"
],
"header": "@@ -311,7 +330,8 @@ public abstract class Connection implements java.sql.Connection,",
"removed": [
" isolation_ = Configuration.defaultIsolation;"
]
},
{
"added": [
" if (SanityManager.DEBUG && supportsSessionDataCaching()) {",
" SanityManager.ASSERT(isolation_ == level);",
" }"
],
"header": "@@ -975,7 +995,9 @@ public abstract class Connection implements java.sql.Connection,",
"removed": [
" isolation_ = level;"
]
},
{
"added": [
" /**",
" * Finds out if the underlaying database connection supports session data",
" * caching.",
" * @return true if sessionData is supported",
" */",
" protected abstract boolean supportsSessionDataCaching();",
""
],
"header": "@@ -983,6 +1005,13 @@ public abstract class Connection implements java.sql.Connection,",
"removed": []
},
{
"added": [
" if (isolation_ != TRANSACTION_UNKNOWN) {",
" if (SanityManager.DEBUG) {",
" SanityManager.ASSERT(supportsSessionDataCaching(),",
" \"Cannot return cached isolation when caching is \" +",
" \"not supported!\");",
" }",
" return isolation_;",
" }",
""
],
"header": "@@ -997,6 +1026,15 @@ public abstract class Connection implements java.sql.Connection,",
"removed": []
},
{
"added": [
" // DERBY-3192 - Cache session data in the client driver allows",
" // the re-introduction of isolation level caching. Changes to the",
" // isolation level triggered from SQL are now handled by",
" // piggybacking the modified isolation level on messages going",
" // back to the client.",
" // The XA-problem is handled by letting XA state changes set the",
" // cached isolation level to TRANSACTION_UNKNOWN which will trigger",
" // a refresh from the server."
],
"header": "@@ -1007,6 +1045,14 @@ public abstract class Connection implements java.sql.Connection,",
"removed": []
},
{
"added": [
"",
" int isolation = translateIsolation(isolationStr);",
" if (isolation_ == TRANSACTION_UNKNOWN &&",
" supportsSessionDataCaching()) {",
" // isolation_ will be TRANSACTION_UNKNOWN if the connection has",
" // been reset on",
" // the client. The server will not observe a",
" // change in isolation level so no information is",
" // piggy-backed. Update the cached value here, rather than",
" // waiting for the isolation to change on the server.",
" isolation_ = isolation;",
" }",
" if (SanityManager.DEBUG) {",
" SanityManager.ASSERT(!supportsSessionDataCaching() ||",
" (isolation_ == isolation),",
" \"Cached isolation_ not updated, (isolation_=\"+",
" isolation_+\")!=(isolation=\"+isolation+\")\");",
" SanityManager.ASSERT(supportsSessionDataCaching() ||",
" (isolation_ == TRANSACTION_UNKNOWN),",
" \"isolation_ modified when caching is not supported\");",
" }",
"",
" return isolation;"
],
"header": "@@ -1020,13 +1066,35 @@ public abstract class Connection implements java.sql.Connection,",
"removed": [
" isolation_ = translateIsolation(isolationStr);"
]
},
{
"added": [
" /**",
" * Returns the current schema (the schema that would be used for",
" * compilation. This is not part of the java.sql.Connection interface, and",
" * is only intended for use with statement caching.",
" * @return the name of the current schema",
" * @throws java.sql.SQLException",
" */",
" public String getCurrentSchemaName() throws SQLException {",
" try {",
" checkForClosedConnection();",
" } catch (SqlException se) {",
" throw se.getSQLException();",
" }",
" if (currentSchemaName_ == null) {",
" if (agent_.loggingEnabled()) {",
" agent_.logWriter_.traceEntry(this,",
" \"getCurrentSchemaName() executes query\");",
" }",
" java.sql.Statement s = createStatement();",
" java.sql.ResultSet rs = s.executeQuery(\"VALUES CURRENT SCHEMA\");",
" rs.next();",
" String schema = rs.getString(1);",
" rs.close();",
" s.close();",
" return schema;",
" }",
" if (SanityManager.DEBUG) {",
" SanityManager.ASSERT(supportsSessionDataCaching(),",
" \"A cached schema name (\"+currentSchemaName_+",
" \") is not expected when session data caching is not\" +",
" \"supported\");",
" }",
" return currentSchemaName_;",
" }",
""
],
"header": "@@ -1038,10 +1106,43 @@ public abstract class Connection implements java.sql.Connection,",
"removed": [
" ",
" return isolation_;"
]
}
]
},
{
"file": "java/client/org/apache/derby/client/net/NetConnectionReply.java",
"hunks": [
{
"added": [
" protected int parseSYNCCTLreply(ConnectionCallbackInterface connection) ",
" throws DisconnectException {",
" if (peekCodePoint() == CodePoint.PBSD) {",
" parsePBSD();",
" }"
],
"header": "@@ -2130,7 +2130,11 @@ public class NetConnectionReply extends Reply",
"removed": [
" protected int parseSYNCCTLreply(ConnectionCallbackInterface connection) throws DisconnectException {"
]
}
]
},
{
"file": "java/client/org/apache/derby/client/net/NetDatabaseMetaData.java",
"hunks": [
{
"added": [
" /**",
" * True if the server supports session data caching",
" */",
" private boolean supportsSessionDataCaching_;",
""
],
"header": "@@ -31,6 +31,11 @@ public class NetDatabaseMetaData extends org.apache.derby.client.am.DatabaseMeta",
"removed": []
},
{
"added": [
"",
" supportsSessionDataCaching_ =",
" productLevel_.greaterThanOrEqualTo(10, 4, 0);"
],
"header": "@@ -86,6 +91,9 @@ public class NetDatabaseMetaData extends org.apache.derby.client.am.DatabaseMeta",
"removed": []
}
]
},
{
"file": "java/client/org/apache/derby/client/net/NetResultSetReply.java",
"hunks": [
{
"added": [
" if (peekCP == CodePoint.PBSD) {",
" parsePBSD();",
" }"
],
"header": "@@ -126,6 +126,9 @@ public class NetResultSetReply extends NetStatementReply implements ResultSetRep",
"removed": []
}
]
},
{
"file": "java/client/org/apache/derby/client/net/NetStatementReply.java",
"hunks": [
{
"added": [
" peekCP = peekCodePoint();",
" if (peekCP == CodePoint.PBSD) {",
" parsePBSD();",
" }",
" /**",
" * Parse the reply for the Open Query Command. This method handles the",
" * parsing of all command replies and reply data for the opnqry command.",
" * will be replaced by parseOPNQRYreply (see parseOPNQRYreplyProto)",
" * @param statementI statement to invoke callbacks on",
" * @throws org.apache.derby.client.am.DisconnectException",
" */",
" private void parseOPNQRYreply(StatementCallbackInterface statementI)",
" throws DisconnectException {"
],
"header": "@@ -211,12 +211,21 @@ public class NetStatementReply extends NetPackageReply implements StatementReply",
"removed": [
" // Parse the reply for the Open Query Command.",
" // This method handles the parsing of all command replies and reply data for the opnqry command.",
" // will be replaced by parseOPNQRYreply (see parseOPNQRYreplyProto)",
" private void parseOPNQRYreply(StatementCallbackInterface statementI) throws DisconnectException {"
]
},
{
"added": [
" if (peekCP == CodePoint.PBSD) {",
" parsePBSD();",
" }"
],
"header": "@@ -238,6 +247,9 @@ public class NetStatementReply extends NetPackageReply implements StatementReply",
"removed": []
},
{
"added": [
" peekCP = peekCodePoint();",
" }",
"",
" if (peekCP == CodePoint.PBSD) {",
" parsePBSD();"
],
"header": "@@ -288,6 +300,11 @@ public class NetStatementReply extends NetPackageReply implements StatementReply",
"removed": []
},
{
"added": [
" peekCP = peekCodePoint();"
],
"header": "@@ -302,6 +319,7 @@ public class NetStatementReply extends NetPackageReply implements StatementReply",
"removed": []
}
]
},
{
"file": "java/drda/org/apache/derby/impl/drda/DRDAConnThread.java",
"hunks": [
{
"added": [
""
],
"header": "@@ -17,11 +17,7 @@",
"removed": [
"/**",
" * This class translates DRDA protocol from an application requester to JDBC",
" * for Derby and then translates the results from Derby to DRDA",
" * for return to the application requester.",
" */"
]
},
{
"added": [],
"header": "@@ -34,7 +30,6 @@ import java.io.UnsupportedEncodingException;",
"removed": [
"import java.sql.Driver;"
]
},
{
"added": [
"/**",
" * This class translates DRDA protocol from an application requester to JDBC",
" * for Derby and then translates the results from Derby to DRDA",
" * for return to the application requester.",
" */"
],
"header": "@@ -70,6 +65,11 @@ import org.apache.derby.impl.jdbc.Util;",
"removed": []
},
{
"added": [
" writePBSD();"
],
"header": "@@ -724,6 +724,7 @@ class DRDAConnThread extends Thread {",
"removed": []
},
{
"added": [
" writePBSD();"
],
"header": "@@ -754,6 +755,7 @@ class DRDAConnThread extends Thread {",
"removed": []
},
{
"added": [
" writePBSD();"
],
"header": "@@ -851,6 +853,7 @@ class DRDAConnThread extends Thread {",
"removed": []
},
{
"added": [
" writePBSD();"
],
"header": "@@ -991,6 +994,7 @@ class DRDAConnThread extends Thread {",
"removed": []
},
{
"added": [
" \t\t\t\t\ttry {",
" \t\t\t\t\t\twritePBSD();",
" \t\t\t\t\t} catch (SQLException se) {",
"\t\t\t\t\t\tserver.consoleExceptionPrint(se);",
" \t\t\t\t\t\terrorInChain(se);",
" \t\t\t\t\t}",
" if (SanityManager.DEBUG) {",
" String cpStr = new CodePointNameTable().lookup(codePoint);",
" try {",
" PiggyBackedSessionData pbsd =",
" database.getPiggyBackedSessionData(false);",
" if (pbsd != null) {",
" // Session data has already been piggy-backed. Refresh",
" // the data from the connection, to make sure it has",
" // not changed behind our back.",
" pbsd.refresh();",
" if (codePoint != CodePoint.SYNCCTL) {",
" // We expect the session attributes to have changed",
" // after SYNCCTL, but this is handled by the client",
" // and is not a problem",
" SanityManager.ASSERT(!pbsd.isModified(),",
" \"Unexpected PBSD modification: \" + pbsd +",
" \" after codePoint \" + cpStr);",
" }",
" }",
" // Not having a pbsd here is ok. No data has been",
" // piggy-backed and the client has no cached values.",
" // If needed it will send an explicit request to get",
" // session data",
" } catch (SQLException sqle) {",
" SanityManager.THROWASSERT(\"Unexpected exception after \" +",
" \"codePoint \"+cpStr, sqle);",
" }",
" }",
""
],
"header": "@@ -1007,11 +1011,46 @@ class DRDAConnThread extends Thread {",
"removed": []
}
]
},
{
"file": "java/drda/org/apache/derby/impl/drda/Database.java",
"hunks": [
{
"added": [
" if (this.conn != conn) {",
" // Need to drop the pb session data when switching connections",
" pbsd_ = null;",
" }"
],
"header": "@@ -122,6 +122,10 @@ class Database",
"removed": []
}
]
}
] |
derby-DERBY-3192-d4a1ecd2
|
DERBY-3192: followup to disable the test for JSR169 and use more harness helper methods.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@597630 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3195-10acff8d
|
DERBY-3195; Describe if default security manager & policy is installed or not on each of the mechanisms to start the network server.
updating the javadoc for NetworkServerControl to reflect information about the security manager.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1646373 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/drda/org/apache/derby/drda/NetworkServerControl.java",
"hunks": [
{
"added": [
" connect to a running Network Server to shutdown, configure or retrieve "
],
"header": "@@ -37,7 +37,7 @@ import org.apache.derby.impl.drda.NetworkServerControlImpl;",
"removed": [
" connect to a running Network Server to shutdown, configure or retreive "
]
},
{
"added": [
" <LI>start [-h <host>] [-p <portnumber>] [-ssl <sslmode>]: This starts the Network",
" Server on the port/host specified or on localhost, port 1527 if no",
" defaults. By default a security manager with a default security policy will ",
" be installed. The default security policy file is called server.policy. ",
" By default the Network Server will only listen for ",
" <LI>shutdown [-h <host>][-p <portnumber>] [-ssl <sslmode>] [-user <username>] [-password <password>]: This shutdowns the Network Server with given user credentials on the host and port specified or on the local host and port 1527(default) if no host or port is specified. </LI> "
],
"header": "@@ -45,16 +45,18 @@ import org.apache.derby.impl.drda.NetworkServerControlImpl;",
"removed": [
" <LI>start [-h <host>] [-p <portnumber>] [-ssl <sslmode>]: This starts the network",
" server on the port/host specified or on localhost, port 1527 if no",
" defaults. By default Network Server will only listen for ",
" <LI>shutdown [-h <host>][-p <portnumber>] [-ssl <sslmode>] [-user <username>] [-password <password>]: This shutdowns the network server with given user credentials on the host and port specified or on the local host and port 1527(default) if no host or port is specified. </LI> "
]
},
{
"added": [
" <P>This is an example of starting and then shutting down the Network ",
" Server on port 1621 on machine myhost "
],
"header": "@@ -159,8 +161,8 @@ import org.apache.derby.impl.drda.NetworkServerControlImpl;",
"removed": [
" <P>This is an example of starting and then shutting down the network ",
" server on port 1621 on machine myhost "
]
},
{
"added": [
" * This method will launch a separate thread and start a Network Server.",
" * This will also install a security manager with a default security policy."
],
"header": "@@ -380,8 +382,9 @@ public class NetworkServerControl{",
"removed": [
" * This method will launch a separate thread and start Network Server."
]
},
{
"added": [
" * Check if the Network Server is started"
],
"header": "@@ -419,7 +422,7 @@ public class NetworkServerControl{",
"removed": [
" * Check if Network Server is started"
]
}
]
}
] |
derby-DERBY-3198-29c10e34
|
DERBY-3198: Using setQueryTimeout will leak sections
Patch file: derby-3198.v6.diff
Introduce a separate varable to track the Section object used by
writeSetSpecialRegister. The Section is allocated on demand and freed
when the Statement is closed or reset.
Adds a new test case to StatementJDBC30Test to catch future regressions.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@602495 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/am/Statement.java",
"hunks": [
{
"added": [
"import org.apache.derby.shared.common.sanity.SanityManager;"
],
"header": "@@ -24,6 +24,7 @@ import java.sql.SQLException;",
"removed": []
},
{
"added": [
" if (setSpecialRegisterSection_ != null) {",
" setSpecialRegisterSection_.free();",
" setSpecialRegisterSection_ = null;",
" }",
""
],
"header": "@@ -225,6 +226,11 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface",
"removed": []
},
{
"added": [
" /**",
" * This variable keeps track of a Section dediacted to",
" * writeSpecialRegister. It gets initialized the first time a",
" * Section is needed, and freed when the Statement is closed.",
" */",
" private Section setSpecialRegisterSection_ = null;",
" public void writeSetSpecialRegister(java.util.ArrayList sqlsttList) ",
" throws SqlException {",
" if (setSpecialRegisterSection_ == null) {",
" setSpecialRegisterSection_ = ",
" agent_.sectionManager_.getDynamicSection",
" (java.sql.ResultSet.HOLD_CURSORS_OVER_COMMIT);",
" }",
" materialStatement_.writeSetSpecialRegister_(setSpecialRegisterSection_,",
" sqlsttList);",
" if (SanityManager.DEBUG) {",
" SanityManager.ASSERT(setSpecialRegisterSection_ != null);",
" }"
],
"header": "@@ -1296,11 +1302,27 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface",
"removed": [
" public void writeSetSpecialRegister(java.util.ArrayList sqlsttList) throws SqlException {",
" materialStatement_.writeSetSpecialRegister_(sqlsttList);"
]
}
]
}
] |
derby-DERBY-3199-86bc9d55
|
DERBY-3199: Minor cleanup of DRDAProtocolTest.
Patch file: derby-3199-1a-minor_cleanup.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@594118 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3202-57964be4
|
DERBY-3202: 'AccessControlException: access denied
(java.io.FilePermission C:\....\file-2.log read)' in ErrorStreamTest.
Problem was that getCanonicalPath() needed a security permission on
Windows (but not on Solaris/Linux). In addition the Derby engine had
to be shut down before derby.log could be deleted on Windows.
Fix contributed by Vemund Østgaard.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@594894 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3202-cda55ef7
|
DERBY-3202:
Workaround for failure in ErrorStreamTest on Windows Vista and Sun JDK 1.4.2.
Contributed by Vemund Østgaard.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@598626 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3208-4a868384
|
DERBY-3208: Callers of DaemonService.serviceNow() assume behaviour not
guaranteed by the interface
Expanded the javadoc comment in the interface so that it guarantees
what the callers implicitly assume.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@596262 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/services/daemon/DaemonService.java",
"hunks": [
{
"added": [
" /**",
" * Service this subscription ASAP. When this method is called, the",
" * subscriber's <code>performWork()</code> method is guaranteed to be",
" * invoked at some point in the future. However, there is no guarantee that",
" * a subscriber's <code>performWork()</code> is called the same number of",
" * times as the subscriber calls this method. More precisely, if a",
" * subscriber is waiting for this daemon service to invoke its",
" * <code>performWork()</code> method, the daemon service may, but is not",
" * required to, ignore requests from that subscriber until the",
" * <code>performWork()</code> method has been invoked.",
" *",
" * @param clientNumber the number that uniquely identifies the client",
" */"
],
"header": "@@ -87,12 +87,19 @@ public interface DaemonService",
"removed": [
"\t/**",
"\t Service this subscription ASAP. Does not guarantee that the daemon",
"\t\twill actually do anything about it.",
"",
"\t\t@param clientNumber the number that uniquely identify the client",
"\t */"
]
}
]
}
] |
derby-DERBY-3213-fb98ba49
|
DERBY-3213: SQLChar.trim method is unused and could be removed
Patch contributed by Deepthi Devaki A R
Patch file: DERBY-3213.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@627791 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/iapi/types/StringDataValue.java",
"hunks": [
{
"added": [],
"header": "@@ -109,21 +109,6 @@ public interface StringDataValue extends ConcatableDataValue",
"removed": [
"\t/**",
"\t * The SQL trim(), ltrim() and rtrim() functions.",
"\t *",
"\t * @param trimType\tType of trim",
"\t * @param result\tThe result of a previous call to this method,",
"\t *\t\t\t\t\tnull if not called yet.",
"\t *",
"\t * @return\tA StringDataValue containing the result of the trim()",
"\t *",
"\t * @exception StandardException\t\tThrown on error",
"\t */",
"\tpublic StringDataValue trim(",
"\t\t\t\tint trimType,",
"\t\t\t\tStringDataValue result)",
"\t\tthrows StandardException;"
]
}
]
}
] |
derby-DERBY-3214-87fd7022
|
DERBY-3214: Account for loss of precision that can occur when very
large cost estimates are added to, and then subtracted from, relatively
small cumulative estimates during Optimizable "pull" processing.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@603659 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/OptimizerImpl.java",
"hunks": [
{
"added": [
"\t\t\t\t\t** loss of precision--but that should ONLY happen if the",
"\t\t\t\t\t** optimizable we just pulled was at position 0. If we",
"\t\t\t\t\t** have a newCost that is <= 0 at any other time, then",
"\t\t\t\t\t** it's the result of a different kind of precision loss--",
"\t\t\t\t\t** namely, the estimated cost of pullMe was so large that",
"\t\t\t\t\t** we lost the precision of the accumulated cost as it",
"\t\t\t\t\t** existed prior to pullMe. Then when we subtracted",
"\t\t\t\t\t** pullMe's cost out, we ended up setting newCost to zero.",
"\t\t\t\t\t** That's an unfortunate side effect of optimizer cost",
"\t\t\t\t\t** estimates that grow too large. If that's what happened",
"\t\t\t\t\t** here,try to make some sense of things by adding up costs",
"\t\t\t\t\t** as they existed prior to pullMe...",
"\t\t\t\t\tif (newCost <= 0.0)",
"\t\t\t\t\t{",
"\t\t\t\t\t\tif (joinPosition == 0)",
"\t\t\t\t\t\t\tnewCost = 0.0;",
"\t\t\t\t\t\telse",
"\t\t\t\t\t\t\tnewCost = recoverCostFromProposedJoinOrder();",
"\t\t\t\t\t}"
],
"header": "@@ -857,10 +857,26 @@ public class OptimizerImpl implements Optimizer",
"removed": [
"\t\t\t\t\t** loss of precision.",
"\t\t\t\t\tif (newCost < 0.0)",
"\t\t\t\t\t\tnewCost = 0.0;"
]
},
{
"added": [
"\t\t\t\t\t\t// See discussion above for \"newCost\"; same applies here.",
"\t\t\t\t\t\tif (prevEstimatedCost <= 0.0)",
"\t\t\t\t\t\t{",
"\t\t\t\t\t\t\tif (joinPosition == 0)",
"\t\t\t\t\t\t\t\tprevEstimatedCost = 0.0;",
"\t\t\t\t\t\t\telse",
"\t\t\t\t\t\t\t{",
"\t\t\t\t\t\t\t\tprevEstimatedCost =",
"\t\t\t\t\t\t\t\t\trecoverCostFromProposedJoinOrder();",
"\t\t\t\t\t\t\t}",
"\t\t\t\t\t\t}",
""
],
"header": "@@ -934,6 +950,18 @@ public class OptimizerImpl implements Optimizer",
"removed": []
},
{
"added": [
"\t/**",
"\t * Iterate through all optimizables in the current proposedJoinOrder",
"\t * and find the accumulated sum of their estimated costs. This method",
"\t * is used to 'recover' cost estimate sums that have been lost due to",
"\t * the addition/subtraction of the cost estimate for the Optimizable",
"\t * at position \"joinPosition\". Ex. If the total cost for Optimizables",
"\t * at positions < joinPosition is 1500, and then the Optimizable at",
"\t * joinPosition has an estimated cost of 3.14E40, adding those two",
"\t * numbers effectively \"loses\" the 1500. When we later subtract 3.14E40",
"\t * from the total cost estimate (as part of \"pull\" processing), we'll",
"\t * end up with 0 as the result--which is wrong. This method allows us",
"\t * to recover the \"1500\" that we lost in the process of adding and",
"\t * subtracting 3.14E40.",
"\t */",
"\tprivate double recoverCostFromProposedJoinOrder()",
"\t\tthrows StandardException",
"\t{",
"\t\tdouble recoveredCost = 0.0d;",
"\t\tfor (int i = 0; i < joinPosition; i++)",
"\t\t{",
"\t\t\trecoveredCost +=",
"\t\t\t\toptimizableList.getOptimizable(proposedJoinOrder[i])",
"\t\t\t\t\t.getBestAccessPath().getCostEstimate().getEstimatedCost();",
"\t\t}",
"",
"\t\treturn recoveredCost;",
"\t}",
""
],
"header": "@@ -1232,6 +1260,34 @@ public class OptimizerImpl implements Optimizer",
"removed": []
}
]
}
] |
derby-DERBY-3214-c741a20a
|
DERBY-3214: Follow-up patch to fix a bug in the first commit.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@604513 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/OptimizerImpl.java",
"hunks": [
{
"added": [
"\t\t\t\t\t\t\tnewCost = recoverCostFromProposedJoinOrder(false);"
],
"header": "@@ -875,7 +875,7 @@ public class OptimizerImpl implements Optimizer",
"removed": [
"\t\t\t\t\t\t\tnewCost = recoverCostFromProposedJoinOrder();"
]
},
{
"added": [
"\t\t\t\t\t\t\t\t\trecoverCostFromProposedJoinOrder(true);"
],
"header": "@@ -958,7 +958,7 @@ public class OptimizerImpl implements Optimizer",
"removed": [
"\t\t\t\t\t\t\t\t\trecoverCostFromProposedJoinOrder();"
]
},
{
"added": [
"\tprivate double recoverCostFromProposedJoinOrder(boolean sortAvoidance)",
"\t\t\tif (sortAvoidance)",
"\t\t\t{",
"\t\t\t\trecoveredCost +=",
"\t\t\t\t\toptimizableList.getOptimizable(proposedJoinOrder[i])",
"\t\t\t\t\t\t.getBestSortAvoidancePath().getCostEstimate()",
"\t\t\t\t\t\t\t.getEstimatedCost();",
"\t\t\t}",
"\t\t\telse",
"\t\t\t{",
"\t\t\t\trecoveredCost +=",
"\t\t\t\t\toptimizableList.getOptimizable(proposedJoinOrder[i])",
"\t\t\t\t\t\t.getBestAccessPath().getCostEstimate()",
"\t\t\t\t\t\t\t.getEstimatedCost();",
"\t\t\t}"
],
"header": "@@ -1274,15 +1274,26 @@ public class OptimizerImpl implements Optimizer",
"removed": [
"\tprivate double recoverCostFromProposedJoinOrder()",
"\t\t\trecoveredCost +=",
"\t\t\t\toptimizableList.getOptimizable(proposedJoinOrder[i])",
"\t\t\t\t\t.getBestAccessPath().getCostEstimate().getEstimatedCost();"
]
}
]
}
] |
derby-DERBY-3215-90daaabd
|
DERBY-3215 Potential NullPointerException in CachedPage class
Patch DERBY-3215b; code cleanup follow-up, no semantic change.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@597123 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/store/raw/data/CachedPage.java",
"hunks": [
{
"added": [],
"header": "@@ -23,22 +23,17 @@ package org.apache.derby.impl.store.raw.data;",
"removed": [
"import org.apache.derby.impl.store.raw.data.BasePage;",
"",
"import org.apache.derby.iapi.store.raw.ContainerHandle;",
"import org.apache.derby.iapi.services.context.ContextService;",
"import org.apache.derby.iapi.services.io.StoredFormatIds;"
]
},
{
"added": [
"\t\tif (myContainer == null)",
"\t\t{",
"\t\t\tStandardException nested =",
"\t\t\t\tStandardException.newException(",
"\t\t\t\t\tSQLState.DATA_CONTAINER_VANISHED,",
"\t\t\t\t\tidentity.getContainerId());",
"\t\t\tthrow dataFactory.markCorrupt(",
"\t\t\t\tStandardException.newException(",
"\t\t\t\t\tSQLState.FILE_WRITE_PAGE_EXCEPTION, nested,",
"\t\t\t\t\tidentity));",
"\t\t}",
"",
"\t\ttry",
"\t\t{",
"\t\t\tmyContainer.writePage(",
"\t\t\t\tidentity.getPageNumber(), pageData, syncMe);",
"",
"\t\t\t//",
"\t\t\t// Do some in memory unlogged bookkeeping tasks while we have",
"\t\t\t// the container.",
"\t\t\t//",
"",
"\t\t\tif (!isOverflowPage() && isDirty())",
"\t\t\t{",
"",
"\t\t\t\t// let the container knows whether this page is a not",
"\t\t\t\t// filled, non-overflow page",
"\t\t\t\tmyContainer.trackUnfilledPage(",
"\t\t\t\t\tidentity.getPageNumber(), unfilled());",
"\t\t\t\t// if this is not an overflow page, see if the page's row",
"\t\t\t\t// count has changed since it come into the cache.",
"\t\t\t\t// if the page is not invalid, row count is 0.\tOtherwise,",
"\t\t\t\t// count non-deleted records on page.",
"\t\t\t\t// Cannot call nonDeletedRecordCount because the page is",
"\t\t\t\t// unlatched now even though nobody is changing it",
"\t\t\t\tint currentRowCount = internalNonDeletedRecordCount();",
"\t\t\t\tif (currentRowCount != initialRowCount)",
"\t\t\t\t\tmyContainer.updateEstimatedRowCount(",
"\t\t\t\t\t\tcurrentRowCount - initialRowCount);",
"\t\t\t\t\tsetContainerRowCount(",
"\t\t\t\t\t\tmyContainer.getEstimatedRowCount(0));",
"\t\t\t\t\tinitialRowCount = currentRowCount;",
"",
"\t\t}",
"\t\tcatch (IOException ioe)",
"\t\t\t// page cannot be written",
"\t\t\tthrow StandardException.newException(",
"\t\t\t\tSQLState.FILE_WRITE_PAGE_EXCEPTION,",
"\t\t\t\tioe, identity);",
"\t\t}",
"\t\tfinally",
"\t\t{",
"\t\t\tcontainerCache.release(myContainer);",
"\t\t\tmyContainer = null;"
],
"header": "@@ -768,72 +763,70 @@ public abstract class CachedPage extends BasePage implements Cacheable",
"removed": [
"\t\tif (myContainer != null) ",
" {",
"\t\t\ttry ",
" {",
"\t\t\t\tmyContainer.writePage(",
" identity.getPageNumber(), pageData, syncMe);",
"\t\t\t\t// Do some in memory unlogged bookkeeping tasks while we have",
"\t\t\t\t// the container. ",
"\t\t\t\tif (!isOverflowPage() && isDirty())",
"\t\t\t\t\t// let the container knows whether this page is a not ",
" // filled, non-overflow page",
"\t\t\t\t\tmyContainer.trackUnfilledPage(",
" identity.getPageNumber(), unfilled());",
"",
"\t\t\t\t\t// if this is not an overflow page, see if the page's row",
"\t\t\t\t\t// count has changed since it come into the cache.",
"\t\t\t\t\t//",
"\t\t\t\t\t// if the page is not invalid, row count is 0. Otherwise,",
"\t\t\t\t\t// count non-deleted records on page.",
"\t\t\t\t\t//",
"\t\t\t\t\t// Cannot call nonDeletedRecordCount because the page is",
"\t\t\t\t\t// unlatched now even though nobody is changing it",
"\t\t\t\t\tint currentRowCount = internalNonDeletedRecordCount();",
"",
"\t\t\t\t\tif (currentRowCount != initialRowCount)",
"\t\t\t\t\t{",
"\t\t\t\t\t\tmyContainer.updateEstimatedRowCount(",
" currentRowCount - initialRowCount);",
"",
"\t\t\t\t\t\tsetContainerRowCount(",
" myContainer.getEstimatedRowCount(0));",
"\t\t\t\t\t\tinitialRowCount = currentRowCount;",
"\t\t\t\t\t}",
"",
"\t\t\t} ",
" catch (IOException ioe) ",
" {",
"\t\t\t\t// page cannot be written",
"\t\t\t\tthrow StandardException.newException(",
" SQLState.FILE_WRITE_PAGE_EXCEPTION, ",
" ioe, identity);",
"\t\t\tfinally",
"\t\t\t{",
"\t\t\t\tcontainerCache.release(myContainer);",
"\t\t\t\tmyContainer = null;",
"\t\t\t}",
"\t\t} ",
"\t\telse",
"\t\t\tStandardException nested = ",
" StandardException.newException(",
" SQLState.DATA_CONTAINER_VANISHED, ",
" identity.getContainerId());",
"\t\t\tthrow dataFactory.markCorrupt(",
" StandardException.newException(",
" SQLState.FILE_WRITE_PAGE_EXCEPTION, nested, ",
" identity));"
]
}
]
}
] |
derby-DERBY-3216-63bc195f
|
DERBY-3216
If btree post commit thread can not get table level lock for row cleanup and
possible tree merging, then instead attempt just row level row purging. Without
this change some stress tests were seeing post commit queue continuous grow
as there was always some thread with a lock on the index, and thus the items
could never get executed.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@597865 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/store/access/btree/BTreePostCommit.java",
"hunks": [
{
"added": [
"import org.apache.derby.iapi.store.access.RowUtil;",
""
],
"header": "@@ -30,7 +30,9 @@ import org.apache.derby.iapi.store.access.AccessFactory;",
"removed": []
},
{
"added": [
" /**",
" * Open index for either table level or row level update.",
" * <p>",
" * @param lock_level For table level use TransactionManager.MODE_TABLE,",
" * for row level use TransactionManager.MODE_RECORD",
" * @param lock_mode For table level use LockingPolicy.MODE_CONTAINER,",
" * for row level use LockingPolicy.MODE_RECORD",
" *",
" * @exception StandardException Standard exception policy.",
" **/",
" private final OpenBTree openIndex(",
" TransactionManager internal_xact,",
" int lock_level,",
" int lock_mode)",
" throws StandardException",
" {",
" OpenBTree open_btree = new OpenBTree();",
"",
" ConglomerateController base_cc = ",
" btree.lockTable(",
" internal_xact, ",
" (ContainerHandle.MODE_FORUPDATE |",
" ContainerHandle.MODE_LOCK_NOWAIT), ",
" lock_level,",
" TransactionController.ISOLATION_REPEATABLE_READ);",
"",
" open_btree.init(",
" (TransactionManager) null, ",
" internal_xact, ",
" (ContainerHandle) null, // open the container ",
" internal_xact.getRawStoreXact(),",
" false,",
" (ContainerHandle.MODE_FORUPDATE | ContainerHandle.MODE_LOCK_NOWAIT),",
" lock_level,",
" btree.getBtreeLockingPolicy(",
" internal_xact.getRawStoreXact(),",
" lock_level,",
" lock_mode,",
" TransactionController.ISOLATION_REPEATABLE_READ, ",
" base_cc,",
" open_btree),",
" btree, ",
" (LogicalUndo) null, // No logical undo necessry.",
" (DynamicCompiledOpenConglomInfo) null);",
"",
" return(open_btree);",
" }",
""
],
"header": "@@ -131,6 +133,54 @@ class BTreePostCommit implements Serviceable",
"removed": []
},
{
"added": [
" OpenBTree open_btree = null;",
" // First attempt to get a table lock on the btree. This lock is",
" // requested NOWAIT to not impede normal operation on the table.",
" // If the lock were to wait then the current lock manager livelock ",
" // algorithm would block all subsequent lock requests on this ",
" // btree even if they are compatible with the current holder of ",
" // the lock.",
" // If this lock is granted then:",
" // 1) deleted rows on the page can automatically be purged as",
" // they must be committed, otherwise lock would not have been",
" // granted.",
" // 2) if all rows from page are reclaimed then a structure shrink",
" // which requires table level lock can be executed.",
" //",
" open_btree = ",
" openIndex(",
" TransactionController.MODE_TABLE, ",
" LockingPolicy.MODE_CONTAINER);"
],
"header": "@@ -165,51 +215,31 @@ class BTreePostCommit implements Serviceable",
"removed": [
" OpenBTree open_btree = new OpenBTree();",
" // The current space reclamation algorithm requires a table level",
" // lock on the btree - this is mostly because the shrink algorithm",
" // is not multi-user. This lock is requested NOWAIT as it does",
" // not want to impedede normal operation on the table. If the lock",
" // were to wait then the current lock manager livelock algorithm ",
" // would block all subsequent lock requests on this btree even if",
" // they are compatible with the current holder of the lock.",
" // ",
" // There are currently 3 outstanding enhancement requests:",
" // track 4237 - retry the work intelligently",
" // track 4238 - if can't get table lock, at least reclaim the rows",
" // track 4239 - do row level lock shrink - very hard to do.",
" ConglomerateController base_cc = ",
" btree.lockTable(",
" (ContainerHandle.MODE_FORUPDATE |",
" ContainerHandle.MODE_LOCK_NOWAIT), ",
" TransactionController.MODE_TABLE,",
" TransactionController.ISOLATION_REPEATABLE_READ);",
"",
" open_btree.init(",
" (TransactionManager) null, ",
" internal_xact, ",
" (ContainerHandle) null, // open the container ",
" internal_xact.getRawStoreXact(),",
" false,",
" ContainerHandle.MODE_FORUPDATE,",
" TransactionController.MODE_TABLE,",
" btree.getBtreeLockingPolicy(",
" internal_xact.getRawStoreXact(),",
" TransactionController.MODE_TABLE,",
" LockingPolicy.MODE_CONTAINER,",
" TransactionController.ISOLATION_REPEATABLE_READ, ",
" base_cc,",
" open_btree),",
" btree, ",
" (LogicalUndo) null, // No logical undo necessry.",
" (DynamicCompiledOpenConglomInfo) null);"
]
},
{
"added": [
" // 2 kinds of errors here expected here. Either container not ",
" // found or could not obtain lock (LOCK_TIMEOUT or DEADLOCK).",
" //",
" // Could not get exclusive table lock, so try row level",
" // reclaim of just the rows on this page. No merge is ",
" // attempted.",
" try",
" {",
" open_btree = ",
" openIndex(",
" internal_xact, ",
" TransactionController.MODE_RECORD, ",
" LockingPolicy.MODE_RECORD);",
"",
" purgeRowLevelCommittedDeletes(open_btree);",
" open_btree.close();",
"",
" }",
" catch (StandardException se2)",
" {",
" if (se2.getMessageId().equals(SQLState.LOCK_TIMEOUT) ||",
" se2.getMessageId().equals(SQLState.DEADLOCK))",
" {",
" // Could not get intended exclusive table lock, so ",
" // requeue and hope other user gives up table level",
" // lock soon. This should not be normal case.",
" requeue_work = true;",
" }",
" }",
" }"
],
"header": "@@ -222,29 +252,46 @@ class BTreePostCommit implements Serviceable",
"removed": [
"",
"\t\t\t",
" //2 kinds of errors here expected here. Either container not found or dead lock. ",
"\t\t\t//If it is a locking error, work is requeued. (4237)",
"\t\t ",
"\t\t\t\trequeue_work = true;",
"\t\t\t}",
"\t\t\t//RESSOLVE-mike (4238) If you can't get a table level lock for btree space recovery in ",
"\t\t\t//the post commit thread, maybe you should at least reclaim the ",
"\t\t\t//rows on the page while you are at it. Use the same algorithm ",
"\t\t\t//as exists in BTreeController.java. row level shrink is still a ",
"\t\t\t//big problem and a separate track exists for it.",
"\t\t\t"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/store/access/btree/BTreeScan.java",
"hunks": [
{
"added": [
" if (SanityManager.DEBUG) ",
" {"
],
"header": "@@ -1366,7 +1366,8 @@ public abstract class BTreeScan extends OpenBTree implements ScanManager",
"removed": [
" if (SanityManager.DEBUG) {"
]
},
{
"added": [
" scan_position.current_slot)) ",
" {",
" } ",
" else ",
" {"
],
"header": "@@ -1377,9 +1378,12 @@ public abstract class BTreeScan extends OpenBTree implements ScanManager",
"removed": [
" scan_position.current_slot)) {",
" } else {"
]
},
{
"added": [
" \t // Do not reclaim the root page of the btree if there are no ",
" // children since we were doing too many post commit actions in a ",
" // benchmark which does an insert/commit/delete/commit operations ",
" // in a single user system. Now with this change the work will ",
" // move to the user thread which does the insert and finds no space",
" // on the root page. In that case it will try a split, which ",
" // automatically first checks if there is committed deleted space",
" // that can be reclaimed.",
" !(scan_position.current_leaf.getIsRoot() && ",
" scan_position.current_leaf.getLevel() == 0)) "
],
"header": "@@ -1387,16 +1391,18 @@ public abstract class BTreeScan extends OpenBTree implements ScanManager",
"removed": [
" \t // Beetle 5750: we do not reclaim the root page of the btree if ",
" // there are no children since we were",
" \t // doing too many post commit actions in a benchmark which does an",
" \t // insert/commit/delete/commit operations in a single user system. now ,",
" \t // with this change the work will move to the user",
" \t // thread which does the insert ",
"\t\t!(scan_position.current_leaf.getIsRoot() && ",
"\t\t scan_position.current_leaf.getLevel() == 0 )) "
]
}
]
}
] |
derby-DERBY-3219-a840fa50
|
DERBY-3219: GROUP BY query fails with ERROR XSDA7
The underlying cause of this problem is that the externalized data format
for a MaxMinAggregator instance includes the max (or min) value that the
aggregator is processing, and this data value happens to be embedded
*inside* of the overall externalized data format. However, the externalized
format for a SQLChar-based data value can use a "stream" format, in which
the explicit length of the value is not encoded, and rather the value is
read until an EOF is received, which means that such a value always has
to be the *last* value in the particular stream, and cannot be embedded
inside of a larger data structure.
In the case in question, the value was a string of length 0, which when
externalized looks like a streamed value, but can be distinguished
because the EOF exception occurs before any data has been read. But when
the value is included inside the larger MaxMinAggregator value, the EOF
exception does *not* occur immediately, but rather after the code in
SQLChar.readExternal has read beyond its own section, and has erroneously
consumed the other values from the MaxMinAggregator's external data.
The solution is to re-order the external format of the MaxMinAggregator,
such that the data value is the last item in the external data, by
calling super.writeExternal and super.readExternal *after* processing
the other MaxMinAggregator data. Since MaxMinAggregator instances are
never stored persistently in permanent data structures, but only in
temporary data structures such as overflow tables and sort buffers, this
should not cause any compatibility problems.
An alternative implementation, which involved changing SQLChar.writeExternal
to use an explicitly-delimited external format for a string of length
zero, rather than the streaming-until-EOF format, was rejected because it
would have increased the on-disk format of such values, and because it
could have caused compatibility problems by changing the on-disk format
of existing values.
The unit test for this bug fix involves the use of some SanityManager code
in the sorter, and hence is only effective when run in a sane debug build.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@661204 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/execute/MaxMinAggregator.java",
"hunks": [
{
"added": [
"\t// Formatable implementations usually invoke the super()",
"\t// version of readExternal or writeExternal first, then",
"\t// do the additional actions here. However, since the",
"\t// superclass of this class requires that its externalized",
"\t// data must be the last data in the external stream, we",
"\t// invoke the superclass's read/writeExternal method",
"\t// last, not first. See DERBY-3219 for more discussion.",
"\t\tsuper.writeExternal(out);"
],
"header": "@@ -88,11 +88,18 @@ public final class MaxMinAggregator",
"removed": [
"\t\tsuper.writeExternal(out);"
]
}
]
}
] |
derby-DERBY-3220-925ae992
|
DERBY-3220 <transition table or variable list> (ReferencingClause) in CREATE TRIGGER statement not SQL compliant
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@597604 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3221-2a31ed91
|
DERBY-3221: "java.sql.SQLException: The conglomerate (-5) requested does not exist." from Derby 10.3.1.4 embedded within Eclipse 3.3 and RAD 7.0
Patch file: derby-3221.v3.diff
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@613116 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/execute/TemporaryRowHolderImpl.java",
"hunks": [
{
"added": [
"\tprivate\tlong\t\t\t\t CID;"
],
"header": "@@ -67,7 +67,7 @@ class TemporaryRowHolderImpl implements TemporaryRowHolder",
"removed": [
"\tprotected\tlong\t\t\t\tCID;"
]
},
{
"added": [
" if (!isUniqueStream) {",
" }"
],
"header": "@@ -238,8 +238,9 @@ class TemporaryRowHolderImpl implements TemporaryRowHolder",
"removed": [
"\t\t\tif(!isUniqueStream)"
]
},
{
"added": [
" if (SanityManager.DEBUG) {",
" SanityManager.ASSERT(lastArraySlot == -1);",
" SanityManager.ASSERT(state == STATE_UNINIT);",
" SanityManager.ASSERT(!conglomCreated);",
" SanityManager.ASSERT(CID == 0);",
" }",
"\t\tnumRowsIn = 0;",
" /**",
" * Accessor to get the id of the temporary conglomerate. Temporary ",
" * conglomerates have negative ids. An id equal to zero means that no ",
" * temporary conglomerate has been created.",
" * @return Conglomerate ID of temporary conglomerate",
" */",
" if (SanityManager.DEBUG) {",
" SanityManager.ASSERT(CID == 0 && !conglomCreated || ",
" CID < 0 && conglomCreated);",
" }"
],
"header": "@@ -490,32 +491,32 @@ class TemporaryRowHolderImpl implements TemporaryRowHolder",
"removed": [
"",
"\t\tlastArraySlot = -1;",
"\t\tnumRowsIn = 0;",
"\t\tstate = STATE_UNINIT;",
"\t\t/*",
"\t\t** We are not expecting this to be called",
"\t\t** when we have a temporary conglomerate",
"\t\t** but just to be on the safe side, drop",
"\t\t** it. We'd like do something cheaper,",
"\t\t** but there is no truncate on congloms.",
"\t\t*/",
"\t\tif (conglomCreated)",
"\t\t{",
"\t\t\tTransactionController tc = activation.getTransactionController();",
"\t\t\ttc.dropConglomerate(CID);",
"\t\t\tconglomCreated = false;",
"\t\t}"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/execute/TemporaryRowHolderResultSet.java",
"hunks": [
{
"added": [
" if (SanityManager.DEBUG) {",
" SanityManager.ASSERT(currentConglomId == holder.getTemporaryConglomId(),",
" \"currentConglomId(\" + currentConglomId + ",
" \") == holder.getTemporaryConglomeateId (\" + ",
" holder.getTemporaryConglomId() + \")\");",
" }"
],
"header": "@@ -151,7 +151,12 @@ class TemporaryRowHolderResultSet implements CursorResultSet, NoPutResultSet, Cl",
"removed": [
"\t\t\tholder.CID = currentConglomId;"
]
},
{
"added": [
"\t\tif (holder.getTemporaryConglomId() == 0)"
],
"header": "@@ -273,7 +278,7 @@ class TemporaryRowHolderResultSet implements CursorResultSet, NoPutResultSet, Cl",
"removed": [
"\t\tif (holder.CID == 0)"
]
},
{
"added": [
" holder.getTemporaryConglomId(),"
],
"header": "@@ -285,7 +290,7 @@ class TemporaryRowHolderResultSet implements CursorResultSet, NoPutResultSet, Cl",
"removed": [
" holder.CID,"
]
},
{
"added": [
" heapCC = tc.openConglomerate(holder.getTemporaryConglomId(),"
],
"header": "@@ -338,7 +343,7 @@ class TemporaryRowHolderResultSet implements CursorResultSet, NoPutResultSet, Cl",
"removed": [
"\t\t\t\theapCC = tc.openConglomerate( holder.CID,"
]
},
{
"added": [
" if (holder.getTemporaryConglomId() == 0)",
"\t\t\theapCC = tc.openConglomerate( holder.getTemporaryConglomId(),"
],
"header": "@@ -358,11 +363,11 @@ class TemporaryRowHolderResultSet implements CursorResultSet, NoPutResultSet, Cl",
"removed": [
"\t\tif(holder.CID ==0)",
"\t\t\theapCC = tc.openConglomerate( holder.CID,"
]
}
]
}
] |
derby-DERBY-3222-a30f4ad8
|
Merging change(599933) from 10.3 codeline into trunk for DERBY-3222 The commit comment for checkin into 10.3
codeline was as follows
The test will fail depending on the timing. It inserts CURRENT_DATE into a column and then compares that
column with whatever the CURRENT_DATE might be during the run of compare sql. If the insert happened right before
midnight and comparison happens right after midnight, the comparison of the dates will return FALSE. Since the main
purpose of the test is to make sure that Derby derives correct collation type for CURRENT_DATE and
UPPER(CURRENT_DATE), it really does not matter if the 2 current dates actually match or not. Because of this, I
have changed the test to look for <= while doing date comparison rather than =. This should resolve the timing
related fact of the test.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@599973 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3229-4aa78691
|
DERBY-3229: Make SysinfoLocaleTest print more information on failure
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@598456 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-323-a4aa5a7e
|
DERBY-323: Locale problem in test of derby
Description of the patch:
1) Rewrite tools/derbyrunjartest.java to use jvm.getCommandLine() to
get the command line to start the test with.
2) Move the setting of user.language and user.country from RunTest
and NetServer to jvm.getCommandLine(). This way, all of the java
processes that are forked from the test harness or from a test
get the same locale.
3) Add Locale.setDefault(Locale.US) to RunTest.main(). This fixes
the issue with the nist tests.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@424072 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3230-fca5be58
|
DERBY-3230 Selecting data from a Table raises Error XN008: Query processing has been terminated due to an error on the server
The solution was to move the retrieval of all of the data associated with the QRYDTA before the ResultSet is marked as closed on the server.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@612849 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/client/org/apache/derby/client/net/NetCursor.java",
"hunks": [
{
"added": [
" // If we don't have at least one byte in the buffer for the DA null indicator,",
" // then we need to send a CNTQRY request to fetch the next block of data.",
" // Read the DA null indicator. Do this before we close mark the statement",
" // closed on the server. DERBY-3230",
" daNullIndicator = readFdocaOneByte();",
" "
],
"header": "@@ -146,7 +146,12 @@ public class NetCursor extends org.apache.derby.client.am.Cursor {",
"removed": [
""
]
}
]
}
] |
derby-DERBY-3231-71b56308
|
DERBY-3231: Sorting on COUNT with OR and GROUP BY delivers wrong results.
Patch contributed by: Manish Khettry.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@603954 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3233-59678316
|
DERBY-3233 ComparisonFailure in derbyStress
Change test to close connection to ensure all result sets are closed before dropping the table.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@614277 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3238-75d1fb19
|
DERBY-3238 minor cleanup in TriggerTest to remove getConnection() calls
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@601118 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3238-9696c22c
|
DERBY-3238 Add test cases when the lob columns are referenced and referenced multiple times.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@604042 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3238-d2d2e684
|
DERBY-3238 When table contains large LOB values (> ~32K) trigger execution fails for that row with ERROR XCL30: An IOException was thrown when reading a 'BLOB'
When a trigger is present, unchanged columns with be part of the UpdateResultSet. These columns are present twice in the result set as a before and after value. There was code to "objectify" the before value but the after value was not getting updated. Changed objectifyStream to make sure after values point to data value and not the stream after objectifyStream occurs.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@601110 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/execute/DMLWriteResultSet.java",
"hunks": [
{
"added": [
"import java.io.InputStream;",
""
],
"header": "@@ -21,6 +21,8 @@",
"removed": []
}
]
}
] |
derby-DERBY-3242-ad72a2b5
|
DERBY-3242: ij doesn't understand bracketed comments
Fix contributed by James F. Adams.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@604871 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/tools/org/apache/derby/impl/tools/ij/StatementFinder.java",
"hunks": [
{
"added": [
" Comments currently recognized include the SQL comment,",
" which begins with \"--\" and ends at the next EOL, and nested",
" bracketed comments."
],
"header": "@@ -36,8 +36,9 @@ import java.io.Reader;",
"removed": [
"\tThe only comment form currently recognized is the SQL comment,",
"\twhich begins with \"--\" and ends at the next EOL."
]
},
{
"added": [
"\tprivate static final char SLASH = '/';",
"\tprivate static final char ASTERISK = '*';"
],
"header": "@@ -78,6 +79,8 @@ public class StatementFinder {",
"removed": []
},
{
"added": [
"\t\t\t\tcase SLASH:",
"\t\t\t\t readBracketedComment();",
"\t\t\t\t break;"
],
"header": "@@ -181,6 +184,9 @@ public class StatementFinder {",
"removed": []
}
]
}
] |
derby-DERBY-3243-212a1264
|
DERBY-3243 (jdbc net client) exception during normal iteration through "ResultSet" of "select * from t"
Allow locators > 32K. Exclude extended length values 0x800x so that lob by value will work with older versions. Exclude negative and 0 locators.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@618431 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/jdbc/EmbedConnection.java",
"hunks": [
{
"added": [
"\t* Return the current locator value/",
" * 0x800x values are not valid values as they are used to indicate the BLOB ",
" * is being sent by value, so we skip those values (DERBY-3243)",
" * ",
" int newKey = ++rootConnection.lobHMKey;",
" // Skip 0x8000, 0x8002, 0x8004, 0x8006, for DERBY-3243",
" // Earlier versions of the Derby Network Server (<10.3) didn't",
" // support locators and would send an extended length field",
" // with one of the above mentioned values instead of a",
" // locator, even when locators were requested. To enable the",
" // client driver to detect that locators aren't supported,",
" // we don't use any of them as locator values.",
" if (newKey == 0x8000 || newKey == 0x8002 || newKey == 0x8004 ||",
" newKey == 0x8006 || newKey == 0x8008)",
" newKey = ++rootConnection.lobHMKey;",
" // Also roll over when the high bit of four byte locator is set.",
" // This will prevent us from sending a negative locator to the",
" // client. Don't allow zero since it is not a valid locator for the ",
" // client.",
" if (newKey == 0x80000000 || newKey == 0)",
" newKey = rootConnection.lobHMKey = 1;",
" return newKey;"
],
"header": "@@ -2680,11 +2680,31 @@ public abstract class EmbedConnection implements EngineConnection",
"removed": [
"\t* Return the current locator value",
"\t\treturn ++rootConnection.lobHMKey;"
]
}
]
}
] |
derby-DERBY-3243-66fe22b0
|
DERBY-3243 (jdbc net client) exception during normal iteration through "ResultSet" of "select * from t"
Add test retrieving more than 32K lobs.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@630036 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-3244-5faf131d
|
DERBY-3244
In the case of a wait for a latch while traveling left at leaf level, and
a subsequent wait for either a lock or another latch while looking for row
to lock then one path through the code would get a null pointer. The code
was trying to release a latch that had already been released and was tracked
by "current_leaf == null".
I could not get this to fail in my environment, but did force it by code
inspection and changing the path through the code by hand to mimic latch
waits. My assumption is that intermittently on some platforms this single
threaded test is competing for these latches with background deleted row
cleaner thread, probably on a fast multiple processor machine.
-
M java/engine/org/apache/derby/impl/store/access/btree/index/B2IRowLocking3.java
M java/engine/org/apache/derby/impl/store/access/btree/BTreeLockingPolicy.java
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@603375 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/store/access/btree/index/B2IRowLocking3.java",
"hunks": [
{
"added": [
" /**",
" * move left in btree and lock previous key.",
" * <p>",
" * Enter routine with \"current_leaf\" latched. This routine implements",
" * the left travel ladder locking protocol to search the leaf pages from",
" * right to left for the previous key to 1st key on current_leaf.",
" *",
" * There are 2 cases:",
" * 1) the previous page has keys, in which case the last key on that",
" * page is locked, other wise search continues on the next page to",
" * the left.",
" * 2) there are no keys on the current page and there is no page to the",
" * left. In this case the special \"leftmost key\" lock is gotten by",
" * calling lockPreviousToFirstKey().",
" *",
" * Left laddar locking is used if all latches can be obtained immediately",
" * with NOWAIT. This means that current latch is held while asking for",
" * left latch NOWAIT, and if left latch is granted then subsequently ",
" * current latch can be released. If this protocol is followed and ",
" * all latches are granted then caller is guaranteed that the correct",
" * previous key has been locked and current_page latch remains. The",
" * NOWAIT protocol is used to avoid latch/latch deadlocks. The overall",
" * protocol is that one never holds a latch while waiting on another unless",
" * the direction of travel is down and to the right.",
" * <p>",
" * If along the search a latch has to be waited on then latches are",
" * released and a wait is performed, and \"false\" status is returned to",
" * caller. In this case the routine can no longer be sure of it's current",
" * position and may have to retry the whole operation.",
" *",
" * @return true if previous key found without ever waiting on a latch, ",
" * false if latch released in order to wait for other latch.",
" *",
" * @exception StandardException Standard exception policy.",
" **/"
],
"header": "@@ -341,6 +341,41 @@ class B2IRowLocking3 implements BTreeLockingPolicy",
"removed": []
},
{
"added": [
" // initial latch request on leaf left of current could not be",
" // granted NOWAIT.",
"",
" // wait on the left leaf, which we could not be granted NOWAIT."
],
"header": "@@ -364,18 +399,18 @@ class B2IRowLocking3 implements BTreeLockingPolicy",
"removed": [
"",
" // error going from mainpage to first left page. Release ",
" // current page latch and continue the search.",
" // wait on the left page, which we could not get before. "
]
},
{
"added": [
""
],
"header": "@@ -391,6 +426,7 @@ class B2IRowLocking3 implements BTreeLockingPolicy",
"removed": []
},
{
"added": [
" // needed to wait on a row lock, so both prev_leaf and",
" // current_leaf latches have been released by ",
" // lockRowOnPage()"
],
"header": "@@ -409,6 +445,9 @@ class B2IRowLocking3 implements BTreeLockingPolicy",
"removed": []
},
{
"added": [
" // needed to wait on a row lock, so both prev_leaf and",
" // current_leaf latches have been released by ",
" // lockPreviousToFirstKey()",
""
],
"header": "@@ -427,6 +466,10 @@ class B2IRowLocking3 implements BTreeLockingPolicy",
"removed": []
},
{
"added": [
" // the search. Do left ladder locking as you walk left.",
""
],
"header": "@@ -440,14 +483,14 @@ class B2IRowLocking3 implements BTreeLockingPolicy",
"removed": [
" // the search. Do left ladder locking as you walk left, ",
" // but be ready to release l"
]
},
{
"added": [
" if (current_leaf != null)",
" {",
" // current_leaf may have already been released as part of",
" // previous calls, need to check null status.",
" current_leaf.release();",
" current_leaf = null;",
" }",
"",
" // can only get here by above getLeftSibling() call so prev_leaf",
" // should always be valid and latched at this point. No null",
" // check necessary."
],
"header": "@@ -455,8 +498,17 @@ class B2IRowLocking3 implements BTreeLockingPolicy",
"removed": [
" current_leaf.release();",
" current_leaf = null;"
]
},
{
"added": [
""
],
"header": "@@ -467,11 +519,11 @@ class B2IRowLocking3 implements BTreeLockingPolicy",
"removed": [
""
]
},
{
"added": [
" * See BTreeLockingPolicy.lockNonScanPreviousRow"
],
"header": "@@ -890,7 +942,7 @@ class B2IRowLocking3 implements BTreeLockingPolicy",
"removed": [
" * See BTree.lockPreviousRow() for more info."
]
}
]
}
] |
derby-DERBY-3249-fafbff83
|
Add JDBC.assertGeneratedKeyResultSet() to verify the type and concurrency of ResultSets returned by Statement.getGeneratedKeys. Add a commented out test case in AutoGenJDBC30Test.java that shows the bug DERBY-3249.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@601044 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/JDBC.java",
"hunks": [
{
"added": [
" /**",
" * Assert that a ResultSet representing generated keys is non-null",
" * and of the correct type. This method leaves the ResultSet",
" * open and does not fetch any date from it.",
" * ",
" * @param description For assert messages",
" * @param keys ResultSet returned from getGeneratedKeys().",
" * @throws SQLException",
" */",
" public static void assertGeneratedKeyResultSet(",
" String description, ResultSet keys) throws SQLException",
" {",
" ",
" Assert.assertNotNull(description, keys);",
" ",
" // Requirements from section 13.6 JDBC 4 specification",
" Assert.assertEquals(",
" description + ",
" \" - Required CONCUR_READ_ONLY for generated key result sets\",",
" ResultSet.CONCUR_READ_ONLY, keys.getConcurrency());",
" ",
" int type = keys.getType();",
" if ( (type != ResultSet.TYPE_FORWARD_ONLY) &&",
" (type != ResultSet.TYPE_SCROLL_INSENSITIVE))",
" {",
" Assert.fail(description +",
" \" - Invalid type for generated key result set\" + type);",
" }",
" ",
" ",
"",
" }",
" ",
" "
],
"header": "@@ -510,6 +510,40 @@ public class JDBC {",
"removed": []
}
]
}
] |
derby-DERBY-3250-e621aee2
|
DERBY-3250 : making NetworkServerTestSetup using a Runtime.exec(String[]) rather than Runtime.exec(String) to prevent trouble with spaces in arguments.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@608140 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/testing/org/apache/derbyTesting/junit/NetworkServerTestSetup.java",
"hunks": [
{
"added": [
" ArrayList al = new ArrayList();",
" al.add( \"java\" );",
" al.add( \"-classpath\" );",
" al.add( classpath );",
" al.add( \"-D\" + systemProperties[ i ] );",
" al.add( \"org.apache.derby.drda.NetworkServerControl\" );"
],
"header": "@@ -184,22 +184,21 @@ final public class NetworkServerTestSetup extends BaseTestSetup {",
"removed": [
" StringBuffer buffer = new StringBuffer();",
" buffer.append( \"java -classpath \" );",
" buffer.append( classpath );",
" buffer.append( \" \" );",
" buffer.append( \" -D\" );",
" buffer.append( systemProperties[ i ] );",
" buffer.append( \" org.apache.derby.drda.NetworkServerControl \" );"
]
},
{
"added": [
" al.add( defaultArgs[ i ] );",
" al.add( startupArgs[ i ] );",
" final String[] command = new String[ al.size() ];",
" al.toArray(command);",
" /* System.out.println( \"XXX server startup command = \");",
" for (int i = 0 ; i < command.length ; i++) {",
" System.out.print( command[i] + \" \" );",
" }",
" System.out.println();",
" */"
],
"header": "@@ -214,20 +213,24 @@ final public class NetworkServerTestSetup extends BaseTestSetup {",
"removed": [
" buffer.append( \" \" );",
" buffer.append( defaultArgs[ i ] );",
" buffer.append( \" \" );",
" buffer.append( startupArgs[ i ] );",
" final String command = buffer.toString();",
" //System.out.println( \"XXX server startup command = \" + command );"
]
}
]
}
] |
derby-DERBY-3253-32edb61b
|
DERBY-3253: Fix NPE for IN list operator when the probe predicate is
pushed into a subselect but then multi-probing does not occur.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@605616 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/BinaryRelationalOperatorNode.java",
"hunks": [
{
"added": [
"\t * back to the InListOperatorNode referenced here. NOTE: Once",
"\t * set, this variable should *only* ever be accessed via the",
"\t * isInListProbeNode() or getInListOp() methods--see comments",
"\t * in the latter method for more.",
"\tprivate InListOperatorNode inListProbeSource = null;"
],
"header": "@@ -80,9 +80,12 @@ public class BinaryRelationalOperatorNode",
"removed": [
"\t * back to the InListOperatorNode referenced here.",
"\tInListOperatorNode inListProbeSource = null;"
]
},
{
"added": [
"\t * IN-list). With the exception of nullability checking via the",
"\t * isInListProbeNode() method, all access to this.inListProbeSource",
"\t * MUST come through this method, as this method ensures that the",
"\t * left operand of the inListProbeSource is set correctly before",
"\t * returning it.",
"\t\tif (inListProbeSource != null)",
"\t\t{",
"\t\t\t/* Depending on where this probe predicate currently sits",
"\t\t\t * in the query tree, this.leftOperand *may* have been",
"\t\t\t * transformed, replaced, or remapped one or more times",
"\t\t\t * since inListProbeSource was last referenced. Since the",
"\t\t\t * leftOperand of the IN list should be the same regardless",
"\t\t\t * of which \"version\" of the operation we're looking at",
"\t\t\t * (i.e. the \"probe predicate\" version (this node) vs the",
"\t\t\t * original version (inListProbeSource)), we have to make",
"\t\t\t * sure that all of the changes made to this.leftOperand",
"\t\t\t * are reflected in inListProbeSource's leftOperand, as",
"\t\t\t * well. In doing so we ensure the caller of this method",
"\t\t\t * will see an up-to-date version of the InListOperatorNode--",
"\t\t\t * and thus, if the caller references the InListOperatorNode's",
"\t\t\t * leftOperand, it will see the correct information. One",
"\t\t\t * notable example of this is at code generation time, where",
"\t\t\t * if this probe predicate is deemed \"not useful\", we'll",
"\t\t\t * generate the underlying InListOperatorNode instead of",
"\t\t\t * \"this\". For that to work correctly, the InListOperatorNode",
"\t\t\t * must have the correct leftOperand. DERBY-3253.",
"\t\t\t *",
"\t\t\t * That said, since this.leftOperand will always be \"up-to-",
"\t\t\t * date\" w.r.t. the current query tree (because this probe",
"\t\t\t * predicate sits in the query tree and so all relevant",
"\t\t\t * transformations will be applied here), the simplest way",
"\t\t\t * to ensure the underlying InListOperatorNode also has an",
"\t\t\t * up-to-date leftOperand is to set it to this.leftOperand.",
"\t\t\t */",
"\t\t\tinListProbeSource.setLeftOperand(this.leftOperand);",
"\t\t}",
""
],
"header": "@@ -153,10 +156,46 @@ public class BinaryRelationalOperatorNode",
"removed": [
"\t * IN-list)."
]
},
{
"added": [
"\t\tif (isInListProbeNode())"
],
"header": "@@ -777,7 +816,7 @@ public class BinaryRelationalOperatorNode",
"removed": [
"\t\tif (inListProbeSource != null)"
]
},
{
"added": [
"\t\treturn !isInListProbeNode();"
],
"header": "@@ -1215,7 +1254,7 @@ public class BinaryRelationalOperatorNode",
"removed": [
"\t\treturn (inListProbeSource == null);"
]
},
{
"added": [
"\t\treturn !isInListProbeNode() &&",
"\t/**",
"\t * @see ValueNode#isInListProbeNode",
"\t *",
"\t * It's okay for this method to reference inListProbeSource directly",
"\t * because it does not rely on the contents of inListProbeSource's",
"\t * leftOperand, and a caller of this method cannot gain access to",
"\t * inListProbeSource's leftOperand through this method.",
"\t */"
],
"header": "@@ -1225,11 +1264,18 @@ public class BinaryRelationalOperatorNode",
"removed": [
"\t\treturn (inListProbeSource == null) &&",
"\t/** @see ValueNode#isInListProbeNode */"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/OrNode.java",
"hunks": [
{
"added": [
"\t\t\t\t\t\t\tif (!bron.isInListProbeNode())"
],
"header": "@@ -152,7 +152,7 @@ public class OrNode extends BinaryLogicalOperatorNode",
"removed": [
"\t\t\t\t\t\t\tif (bron.getInListOp() == null)"
]
}
]
}
] |
derby-DERBY-3257-2e556fe3
|
DERBY-3257 SELECT with HAVING clause containing OR conditional incorrectly return 1 row - should return 2 rows - works correctly with 10.2 DB
Normalize the havingClause before calling preprocess. This made it necessary to explicitly exclude having subqueries from flattenning, before they were implicitly excluded because the clause was not normalized. Army Brown contributed the main part of the fix to normalize the having clause.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@614017 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/SelectNode.java",
"hunks": [
{
"added": [
"\t\twhereClause = normExpressions(whereClause);",
"\t\t// DERBY-3257. We need to normalize the having clause as well, because ",
"\t\t// preProcess expects CNF.",
"\t\thavingClause = normExpressions(havingClause);",
"\t\t"
],
"header": "@@ -816,8 +816,11 @@ public class SelectNode extends ResultSetNode",
"removed": [
"\t\tnormExpressions();",
""
]
},
{
"added": [
"\t\t // DERBY-3257 ",
"\t\t // Mark subqueries that are part of the having clause as ",
"\t\t // such so we can avoid flattenning later. Having subqueries",
"\t\t // cannot be flattened because we cannot currently handle",
"\t\t // column references at the same source level.",
"\t\t // DERBY-3257 required we normalize the having clause which",
"\t\t // triggered flattening because SubqueryNode.underTopAndNode",
"\t\t // became true after normalization. We needed another way to",
"\t\t // turn flattening off. Perhaps the long term solution is",
"\t\t // to avoid this restriction all together but that was beyond",
"\t\t // the scope of this bugfix.",
"\t\t havingSubquerys.markHavingSubqueries();",
"\t\t havingClause = havingClause.preprocess("
],
"header": "@@ -880,7 +883,19 @@ public class SelectNode extends ResultSetNode",
"removed": [
"\t\t\thavingClause = havingClause.preprocess("
]
},
{
"added": [
" * @param boolClause clause to normalize",
" * ",
"\tprivate ValueNode normExpressions(ValueNode boolClause)"
],
"header": "@@ -1068,9 +1083,11 @@ public class SelectNode extends ResultSetNode",
"removed": [
"\tprivate void normExpressions()"
]
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/SubqueryNode.java",
"hunks": [
{
"added": [
" /**",
" * is this subquery part of a having clause. We need to know this so ",
" * we can avoid flattening.",
" */",
" boolean havingSubquery = false;",
" "
],
"header": "@@ -120,6 +120,12 @@ public class SubqueryNode extends ValueNode",
"removed": []
},
{
"added": [
" * o It is not a subquery in a having clause (DERBY-3257)",
"\t\t\t\t\t underTopAndNode && !havingSubquery &&"
],
"header": "@@ -605,11 +611,12 @@ public class SubqueryNode extends ValueNode",
"removed": [
"\t\t\t\t\t underTopAndNode &&"
]
},
{
"added": [
" * o The subquery is not part of a having clause (DERBY-3257)"
],
"header": "@@ -666,6 +673,7 @@ public class SubqueryNode extends ValueNode",
"removed": []
},
{
"added": [
"\t\t\t\t\t underTopAndNode && !havingSubquery &&"
],
"header": "@@ -673,7 +681,7 @@ public class SubqueryNode extends ValueNode",
"removed": [
"\t\t\t\t\t underTopAndNode &&"
]
}
]
}
] |
derby-DERBY-3258-dd32c163
|
DERBY-3258: 'Unexpected row count: expected:<0> but was:<3>' in testReleaseCompileLocks
Wait for post-commit work to complete before checking the contents of
the lock table.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1041338 13f79535-47bb-0310-9956-ffa450edef68
|
[] |
derby-DERBY-326-f3ab04f3
|
- DERBY-326 Improve streaming of large objects for network server and client - Patch by Tomohito Nakayama (tomonaka@basil.ocn.ne.jp)
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@405037 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/drda/org/apache/derby/impl/drda/DDMWriter.java",
"hunks": [
{
"added": [
"import java.io.InputStream;",
"import java.io.BufferedInputStream;",
"import java.io.BufferedOutputStream;",
"import org.apache.derby.iapi.reference.Property;",
"import org.apache.derby.iapi.services.property.PropertyUtil;",
"",
"import java.io.IOException;"
],
"header": "@@ -21,12 +21,19 @@",
"removed": []
},
{
"added": [
" ",
" protected void writeScalarStream (boolean chainedWithSameCorrelator,",
"\t\t\t\t EXTDTAInputStream in,",
"\t ",
"",
"\t int spareDssLength = prepScalarStream( chainedWithSameCorrelator,",
"\t\t\t\t\t\t\t\t\t\t\twriteNullByte);",
"\t ",
"",
"\t\t\t\t ",
"\t\tOutputStream out = ",
"\t\t placeLayerBStreamingBuffer( agent.getOutputStream() );",
"\t\t",
"\t\tboolean isLastSegment = false;",
"\t\t",
"\t\twhile( !isLastSegment ){",
"\t\t ",
"\t\t int spareBufferLength = bytes.length - offset;",
"\t\t ",
"\t\t if( SanityManager.DEBUG ){",
"\t\t",
"\t\t\tif( PropertyUtil.getSystemProperty(\"derby.debug.suicideOfLayerBStreaming\") != null )",
"\t\t\t throw new IOException();",
"\t\t ",
"\t\t bytesRead = in.read(bytes,",
"\t\t\t\t\toffset,",
"\t\t\t\t\tMath.min(spareDssLength,",
"\t\t\t\t\t\t spareBufferLength));",
"\t\t ",
"\t\t\t\t\ttotalBytesRead += bytesRead;",
"\t\t spareDssLength -= bytesRead;",
"\t\t spareBufferLength -= bytesRead;",
"",
"\t\t isLastSegment = peekStream(in) < 0;",
"\t\t ",
"\t\t if(isLastSegment || ",
"\t\t spareDssLength == 0){",
"\t\t\t",
"\t\t\tflushScalarStreamSegment (isLastSegment, ",
"\t\t\t\t\t\t out);",
"\t\t\t",
"\t\t\tif( ! isLastSegment )",
"\t\t\t spareDssLength = DssConstants.MAX_DSS_LENGTH - 2;",
"\t\t ",
"\t\t",
"\t\tout.flush();",
"\t\t",
"\t }catch(IOException e){",
"\t\tagent.markCommunicationsFailure (\"DDMWriter.writeScalarStream()\",",
"\t\t\t\t\t\t \"\",",
"\t\t\t\t\t\t e.getMessage(),",
"\t\t\t\t\t\t \"*\");",
"\t\t\t\t"
],
"header": "@@ -662,67 +669,76 @@ class DDMWriter",
"removed": [
"\t// TODO: Rewrite writeScalarStream to avoid passing a length.",
"\t// The length is never written and not required by the DRDA spec.",
"\t// Also looks like on IOException we just pad out the stream instead",
"\t// of actually sending an exception. Similar code is in client, so ",
"\t// should be fixed in both places.",
"\tprotected int writeScalarStream (boolean chainedWithSameCorrelator,",
"\t\t\t\t\t\t\t\t\t int length,",
"\t\t\t\t\t\t\t\t\t java.io.InputStream in,",
"\t\tint leftToRead = length;",
"\t\tint bytesToRead = prepScalarStream (chainedWithSameCorrelator,",
"\t\t\t\t\t\t\t\t\t\t\twriteNullByte,",
"\t\t\t\t\t\t\t\t\t\t\tleftToRead);",
"",
"\t\tif (length == 0)",
"\t\t\treturn 0;",
"",
"\t\tdo {",
"\t\t\tdo {",
"\t\t\t\t\tbytesRead = in.read (bytes, offset, bytesToRead);",
"\t\t\t\t\ttotalBytesRead += bytesRead;",
"\t\t\t\t}",
"\t\t\t\tcatch (java.io.IOException e) {",
"\t\t\t\t\tpadScalarStreamForError (leftToRead, bytesToRead);",
"\t\t\t\t\treturn totalBytesRead;",
"\t\t\t\t}",
"\t\t\t\tif (bytesRead == -1) {",
"\t\t\t\t\tpadScalarStreamForError (leftToRead, bytesToRead);",
"\t\t\t\t\treturn totalBytesRead;",
"\t\t\t\telse {",
"\t\t\t\t\tbytesToRead -= bytesRead;",
"\t\t\t\t\tleftToRead -= bytesRead;",
"\t\t\t\t}",
"\t\t\t} while (bytesToRead > 0);",
"\t\t\tbytesToRead = flushScalarStreamSegment (leftToRead, bytesToRead);",
"\t\t} while (leftToRead > 0);",
"\t\t// check to make sure that the specified length wasn't too small",
"\t\ttry {",
"\t\t\tif (in.read() != -1) {",
"\t\t\t\ttotalBytesRead += 1;",
"\t\tcatch (java.io.IOException e) {",
"\t\t\t// Encountered error in stream length verification for ",
"\t\t\t// InputStream, parameter #\" + parameterIndex + \". ",
"\t\t\t// Don't think we need to error for this condition",
"\t\treturn totalBytesRead;"
]
},
{
"added": [
" /**",
" * prepScalarStream does the following prep for writing stream data:",
" * 1. Flushes an existing DSS segment, if necessary",
" * 2. Determines if extended length bytes are needed",
" * 3. Creates a new DSS/DDM header and a null byte indicator, if applicable",
" *",
" * If value of length was less than 0, this method processes streaming as Layer B Streaming.",
" * cf. page 315 of specification of DRDA, Version 3, Volume 3 ",
" *",
" */",
" private int prepScalarStream( boolean chainedWithSameCorrelator,",
" boolean writeNullByte) throws DRDAProtocolException",
" ensureLength( DEFAULT_BUFFER_SIZE - offset );",
" ",
" final int nullIndicatorSize = writeNullByte ? 1:0;",
"",
" ",
" // flush the existing DSS segment ,",
" // if this stream will not fit in the send buffer or ",
" // length of this stream is unknown.",
" // Here, 10 stands for sum of headers of layer A and B."
],
"header": "@@ -749,24 +765,31 @@ class DDMWriter",
"removed": [
" // prepScalarStream does the following prep for writing stream data:",
" // 1. Flushes an existing DSS segment, if necessary",
" // 2. Determines if extended length bytes are needed",
" // 3. Creates a new DSS/DDM header and a null byte indicator, if applicable",
" protected int prepScalarStream (boolean chainedWithSameCorrelator,",
" boolean writeNullByte,",
" int leftToRead) throws DRDAProtocolException",
" int extendedLengthByteCount;",
" int nullIndicatorSize = 0;",
" if (writeNullByte) ",
"\t\tnullIndicatorSize = 1;",
"\textendedLengthByteCount = calculateExtendedLengthByteCount (leftToRead + 4 + nullIndicatorSize);",
" // flush the existing DSS segment if this stream will not fit in the send buffer",
" if (10 + extendedLengthByteCount + nullIndicatorSize + leftToRead + offset > DssConstants.MAX_DSS_LENGTH) {"
]
},
{
"added": [
" writeLengthCodePoint(0x8004,codePoint);",
" //Here, 6 stands for header of layer A and ",
" //4 stands for header of layer B.",
" return DssConstants.MAX_DSS_LENGTH - 6 - 4 - nullIndicatorSize;"
],
"header": "@@ -777,39 +800,22 @@ class DDMWriter",
"removed": [
" }",
" if (extendedLengthByteCount > 0) {",
" // method should never ensure length",
" writeLengthCodePoint (0x8004 + extendedLengthByteCount, codePoint);",
" if (writeNullByte)",
" writeExtendedLengthBytes (extendedLengthByteCount, leftToRead + 1);",
" else",
" writeExtendedLengthBytes (extendedLengthByteCount, leftToRead);",
" }",
" else {",
" if (writeNullByte)",
" writeLengthCodePoint (leftToRead + 4 + 1, codePoint);",
" else",
" writeLengthCodePoint (leftToRead + 4, codePoint);",
" }",
" int bytesToRead;",
" if (writeNullByte)",
" bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - 1 - extendedLengthByteCount);",
" else",
" bytesToRead = Math.min (leftToRead, DssConstants.MAX_DSS_LENGTH - 6 - 4 - extendedLengthByteCount);",
" return bytesToRead;"
]
},
{
"added": [
"\tprivate void flushScalarStreamSegment ( boolean lastSegment,",
"\t\t\t\t\t OutputStream out)",
"\t if (! lastSegment) {",
"\t\t// 32k segment filled and not at end of data.",
"\t\t\t\t\tsendBytes (out,",
"\t\t\t\t\t\t false);",
"\t\t\t ",
"\t\t\t}catch (java.io.IOException ioe) {",
"",
"\t }else{"
],
"header": "@@ -823,69 +829,44 @@ class DDMWriter",
"removed": [
"\tprotected int flushScalarStreamSegment (int leftToRead,",
"\t\t\t\t\t\t\t\t\t\t\tint bytesToRead)",
"\t\tint newBytesToRead = bytesToRead;",
"\t\tif (leftToRead != 0) {",
"\t\t// 32k segment filled and not at end of data.",
"\t\t\tif ((Math.min (2 + leftToRead, 32767)) > (bytes.length - offset)) {",
"\t\t\t\t\tsendBytes (agent.getOutputStream());",
"\t\t\t\t}",
"\t\t\t\tcatch (java.io.IOException ioe) {",
"\t\t\t}",
"\t\t\telse {",
"\t\t\t// DSS is full, but we still have space in the buffer. So",
"\t\t\t// end the DSS, then start the next DSS right after it.",
"\t\t\t\tendDss(false);\t\t// false => don't finalize length.",
"\t\t\t}",
"\t\t\tnewBytesToRead = Math.min (leftToRead,32765);",
" \t\t}",
"\t\telse {",
"\t\t}",
"",
"\t\treturn newBytesToRead;",
" // the offset must not be updated when an error is encountered",
" // note valid data may be overwritten",
" protected void padScalarStreamForError (int leftToRead, int bytesToRead) throws DRDAProtocolException",
" {",
" do {",
" do {",
" bytes[offset++] = (byte)(0x0); // use 0x0 as the padding byte",
" bytesToRead--;",
" leftToRead--;",
" } while (bytesToRead > 0);",
"",
" bytesToRead = flushScalarStreamSegment (leftToRead, bytesToRead);",
" } while(leftToRead > 0);",
""
]
},
{
"added": [
" ",
" private void sendBytes (java.io.OutputStream socketOutputStream) ",
"\tthrows java.io.IOException{",
"\t",
"\tsendBytes(socketOutputStream,",
"\t\t true);",
"\t",
" }",
" ",
" private void sendBytes (java.io.OutputStream socketOutputStream,",
"\t\t\t boolean flashStream ) ",
" throws java.io.IOException",
" if(flashStream)",
"\t socketOutputStream.flush();"
],
"header": "@@ -1787,14 +1768,25 @@ class DDMWriter",
"removed": [
"",
" private void sendBytes (java.io.OutputStream socketOutputStream) throws java.io.IOException",
" socketOutputStream.flush();"
]
}
]
},
{
"file": "java/drda/org/apache/derby/impl/drda/DRDAConnThread.java",
"hunks": [
{
"added": [
"import java.io.IOException;"
],
"header": "@@ -28,6 +28,7 @@ import java.io.ByteArrayInputStream;",
"removed": []
},
{
"added": [
"\t\t\t\t ",
"\t\t\t\t if( ! ((EXTDTAInputStream) val).isEmptyStream() ){",
"\t\t\t\t ",
"\t\t\t\t\t//indicate externalized and size is unknown.",
"\t\t\t\t\twriter.writeExtendedLength(0x8000);",
"\t\t\t\t\t",
"\t\t\t\t }else{",
"\t\t\t\t\twriter.writeExtendedLength(0);",
"\t\t\t\t\t",
"\t\t\t\t }",
"\t\t\t\t ",
"\t\t\t\t "
],
"header": "@@ -6958,12 +6959,21 @@ class DRDAConnThread extends Thread {",
"removed": [
"\t\t\t\t\tvalLength = ((EXTDTAInputStream) val).length();",
"\t\t\t\t\tif (valLength > 0)",
"\t\t\t\t\twriter.writeExtendedLength (valLength);"
]
},
{
"added": [
"\t\t\ttry{",
"\t\t\t}finally{",
"\t\t\t closeStream(stream);",
"\t\t\t"
],
"header": "@@ -7539,29 +7549,17 @@ class DRDAConnThread extends Thread {",
"removed": [
"\t\t\tlong lobLength = stream.length();",
"\t\t\t\t\t\t\t\t\t (int) Math.min(lobLength,",
"\t\t\t\t\t\t\t\t\t\t\t\t\t Integer.MAX_VALUE),",
"\t\t\ttry {",
"\t\t\t\tif (stream != null)",
"\t\t\t\t\tstream.close();",
"\t\t\t} catch (IOException e) {",
"\t\t\t\tUtil.javaException(e);",
"\t\t\t}",
"\t\telse if (o instanceof byte[]) {",
"\t\t\tbyte[] b = (byte []) o;",
"\t\t\twriter.writeScalarStream (chainedWithSameCorrelator,",
"\t\t\t\t\t\t\t\t\t CodePoint.EXTDTA,",
"\t\t\t\t\t\t\t\t\t (int) b.length,",
"\t\t\t\t\t\t\t\t\t new ByteArrayInputStream(b),",
"\t\t\t\t\t\t\t\t\t writeNullByte);"
]
}
]
},
{
"file": "java/drda/org/apache/derby/impl/drda/EXTDTAInputStream.java",
"hunks": [
{
"added": [
"import java.io.BufferedInputStream;",
"import java.sql.Blob;",
"import java.sql.Clob;",
"import java.io.UnsupportedEncodingException;",
""
],
"header": "@@ -22,9 +22,14 @@ package org.apache.derby.impl.drda;",
"removed": []
},
{
"added": [
" private InputStream binaryInputStream = null;",
" private boolean isEmptyStream;",
" private ResultSet dataResultSet = null;",
" private Blob blob = null;",
" private Clob clob = null;",
"\tprivate EXTDTAInputStream(ResultSet rs,",
"\t\t\t\t int columnNumber,",
"\t\t\t\t int ndrdaType) ",
"\t throws SQLException, IOException",
" {",
"\t",
"\t this.dataResultSet = rs;",
"\t this.isEmptyStream = ! initInputStream(rs,",
"\t\t\t\t\t\t columnNumber,",
"\t\t\t\t\t\t ndrdaType);",
" ",
" ",
"\t * input stream for the large object being retrieved. Do not hold"
],
"header": "@@ -42,28 +47,35 @@ import org.apache.derby.impl.jdbc.Util;",
"removed": [
"\tlong dataLength = 0; // length of the stream;",
"",
"\tInputStream binaryInputStream = null;",
"\tint columnNumber;",
"",
"\tResultSet dataResultSet = null;",
"\t * @param dataLength",
"\tprivate EXTDTAInputStream( int dataLength, InputStream binaryInputStream) {",
"\t\tthis.dataLength = dataLength;",
"\t\tthis.binaryInputStream = binaryInputStream;",
"\t * input stream and length for the large object being retrieved. Do not hold"
]
},
{
"added": [
" \t try{",
"\t\treturn new EXTDTAInputStream(rs,",
"\t\t\t\t\t column,",
"\t\t\t\t\t ndrdaType);",
" \t }catch(IOException e){",
" \t\tthrow new SQLException(e.getMessage());"
],
"header": "@@ -87,52 +99,17 @@ class EXTDTAInputStream extends InputStream {",
"removed": [
"\t\t",
"\t\tEXTDTAInputStream extdtaStream = null;",
"\t\tint length = 0;",
"\t\tbyte[] bytes = null;",
"\t\t",
"\t\t// BLOBS",
"\t\tif (ndrdaType == DRDAConstants.DRDA_TYPE_NLOBBYTES) ",
"\t\t{",
"\t\t\t//TODO: Change to just use rs.getBinaryStream() by ",
"\t\t\t// eliminating the need for a length parameter in",
"\t\t\t//DDMWriter.writeScalarStream and therefore eliminating the need for dataLength in this class",
"\t\t\tbytes = rs.getBytes(column);",
"\t\t}",
"\t\t// CLOBS",
"\t\telse if (ndrdaType == DRDAConstants.DRDA_TYPE_NLOBCMIXED)",
"\t\t{\t",
"\t\t\t//TODO: Change to use getCharacterStream and change the read method",
"\t\t\t// to stream the data after length is no longer needed in DDMWRiter.writeScalarStream",
"\t\t\tString s = rs.getString(column);",
"\t\t\ttry {",
"\t\t\t\tif (s != null)",
"\t\t\t\t\tbytes = s.getBytes(NetworkServerControlImpl.DEFAULT_ENCODING);",
"\t\t\t}",
"\t\t\tcatch (java.io.UnsupportedEncodingException e) {",
"\t\t\t\tthrow new SQLException (e.getMessage());",
"\t\t\t}",
"\t\t}",
"\t\telse",
"\t\t{",
"\t\t\tif (SanityManager.DEBUG)",
"\t\t\t{",
"\t\t\tSanityManager.THROWASSERT(\"DRDAType: \" + drdaType +",
"\t\t\t\t\t\t\" not valid EXTDTA object type\");",
"\t\t\t}",
"\t\t}",
"\t\tif (bytes != null)",
"\t\t{",
"\t\t\tlength = bytes.length;",
"\t\t\tInputStream is = new ByteArrayInputStream(bytes);",
"\t\t\textdtaStream = new EXTDTAInputStream(length, is);",
"\t\treturn extdtaStream;"
]
},
{
"added": [],
"header": "@@ -170,18 +147,6 @@ class EXTDTAInputStream extends InputStream {",
"removed": [
"\t",
"\t/**",
"\t * Return the length of the binary stream which was calculated when",
"\t * EXTDTAObject was created.",
"\t * ",
"\t * @return the length of the stream once converted to an InputStream",
"\t */",
"\tpublic long length() throws SQLException {",
"\t\treturn dataLength;",
"\t\t",
"\t}",
""
]
},
{
"added": [
"\t ",
"\t try{",
"\t\tbinaryInputStream = null;",
"",
"\t }finally{",
"\t\t",
"\t\tblob = null;",
"\t\tclob = null;",
"\t\tdataResultSet = null;",
"\t }",
"\t "
],
"header": "@@ -206,8 +171,19 @@ class EXTDTAInputStream extends InputStream {",
"removed": []
}
]
}
] |
derby-DERBY-3260-387d1fb4
|
DERBY-3260: NullPointerException caused by race condition in GenericActivationHolder
Hold the synchronization lock on the GenericPreparedStatement until
the activation class has been retrieved. This prevents other threads
from setting the activation class to null before we have retrieved it,
and thereby prevents the NPE.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@613815 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/GenericActivationHolder.java",
"hunks": [
{
"added": [
" GeneratedClass newGC;",
"",
" // DERBY-3260: If someone else reprepares the statement at the",
" // same time as we do, there's a window between the calls to",
" // rePrepare() and getActivationClass() when the activation",
" // class can be set to null, leading to NullPointerException",
" // being thrown later. Therefore, synchronize on ps to close",
" // the window.",
" synchronized (ps) {",
" ps.rePrepare(getLanguageConnectionContext());",
" newGC = ps.getActivationClass();",
" }",
""
],
"header": "@@ -255,9 +255,20 @@ final class GenericActivationHolder implements Activation",
"removed": [
"\t\t\t\tps.rePrepare(getLanguageConnectionContext());",
"\t\t\t\t"
]
},
{
"added": [],
"header": "@@ -265,8 +276,6 @@ final class GenericActivationHolder implements Activation",
"removed": [
"\t\t\t\tGeneratedClass\t\tnewGC = ps.getActivationClass();",
""
]
}
]
}
] |
derby-DERBY-3266-58d069af
|
DERBY-3266 Not possible for non-db-owner to create a temporary table
Patch derby-3266-2. With this patch, a non-dbo user can use temporary
tables. A new testcase was added to GrantRevokeDDLTest.
git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@704762 13f79535-47bb-0310-9956-ffa450edef68
|
[
{
"file": "java/engine/org/apache/derby/impl/sql/compile/CompilerContextImpl.java",
"hunks": [
{
"added": [
"",
"\t\tif (td.getTableType() ==",
"\t\t\t\tTableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE) {",
"\t\t\treturn; // no priv needed, it is per session anyway",
"\t\t}",
""
],
"header": "@@ -742,6 +742,12 @@ public class CompilerContextImpl extends ContextImpl",
"removed": []
}
]
},
{
"file": "java/engine/org/apache/derby/impl/sql/compile/CreateTableNode.java",
"hunks": [
{
"added": [
"\t\t\tSchemaDescriptor sd = getSchemaDescriptor(",
"\t\t\t\ttableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE,",
"\t\t\t\ttrue);",
""
],
"header": "@@ -290,7 +290,10 @@ public class CreateTableNode extends DDLStatementNode",
"removed": [
"\t\t\tSchemaDescriptor sd = getSchemaDescriptor();"
]
},
{
"added": [
"\t\t\t\tgetSchemaDescriptor(",
"\t\t\t\t\ttableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE,",
"\t\t\t\t\ttrue));"
],
"header": "@@ -365,7 +368,9 @@ public class CreateTableNode extends DDLStatementNode",
"removed": [
"\t\t\t\t\tgetSchemaDescriptor());"
]
},
{
"added": [
"\t\treturn isSessionSchema(",
"\t\t\tgetSchemaDescriptor(",
"\t\t\t\ttableType != TableDescriptor.GLOBAL_TEMPORARY_TABLE_TYPE,",
"\t\t\t\ttrue));"
],
"header": "@@ -460,7 +465,10 @@ public class CreateTableNode extends DDLStatementNode",
"removed": [
"\t\treturn isSessionSchema(getSchemaDescriptor());"
]
}
]
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.