id
stringlengths
22
25
commit_message
stringlengths
137
6.96k
diffs
listlengths
0
63
derby-DERBY-2435-c9c3c8e8
DERBY-2435: Commit derby-2435-01.diff, making NetworkServerControl pick up properties from derby.properties as needed. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@519004 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/drda/org/apache/derby/drda/NetworkServerControl.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.services.property.PropertyUtil;" ], "header": "@@ -26,6 +26,7 @@ import java.net.InetAddress;", "removed": [] }, { "added": [ " String authenticationProperty =", " PropertyUtil.getSystemProperty( Property.REQUIRE_AUTHENTICATION_PARAMETER );", " boolean authenticationRequired = Boolean.valueOf( authenticationProperty ).booleanValue();", " ", " if ( !authenticationRequired )" ], "header": "@@ -561,7 +562,11 @@ public class NetworkServerControl{", "removed": [ " if ( !Boolean.getBoolean( Property.REQUIRE_AUTHENTICATION_PARAMETER ) )" ] } ] }, { "file": "java/testing/org/apache/derbyTesting/junit/SupportFilesSetup.java", "hunks": [ { "added": [ " private String[] readOnlyTargetFileNames;", " private String[] readWriteTargetFileNames;" ], "header": "@@ -64,6 +64,8 @@ public class SupportFilesSetup extends TestSetup {", "removed": [] }, { "added": [ " this(test, readOnly, (String[]) null, (String[]) null, (String[]) null);" ], "header": "@@ -79,7 +81,7 @@ public class SupportFilesSetup extends TestSetup {", "removed": [ " this(test, readOnly, (String[]) null);" ] }, { "added": [ " {", " this(test, readOnly, readWrite, (String[]) null, (String[]) null);", " }", " ", " /**", " * Create all the folders, copy a set of resources into", " * the read only folder and copy a set of resources into", " * the read write folder. If specified, use the specific target file", " * supplied by the caller.", " */", " public SupportFilesSetup", " (Test test, String[] readOnly, String[] readWrite, String[] readOnlyTargetFileNames, String[] readWriteTargetFileNames)", " this.readOnlyTargetFileNames = readOnlyTargetFileNames;", " this.readWriteTargetFileNames = readWriteTargetFileNames;", " privCopyFiles(\"extin\", readOnly, readOnlyTargetFileNames);", " privCopyFiles(\"extinout\", readWrite, readWriteTargetFileNames);", " privCopyFiles(\"extout\", (String[]) null, (String[]) null);" ], "header": "@@ -88,17 +90,31 @@ public class SupportFilesSetup extends TestSetup {", "removed": [ " privCopyFiles(\"extin\", readOnly);", " privCopyFiles(\"extinout\", readWrite);", " privCopyFiles(\"extout\", (String[]) null);" ] }, { "added": [ " private void privCopyFiles(final String dirName, final String[] resources, final String[] targetNames)", " copyFiles(dirName, resources, targetNames);", " private void copyFiles(String dirName, String[] resources, String[] targetNames)" ], "header": "@@ -108,21 +124,21 @@ public class SupportFilesSetup extends TestSetup {", "removed": [ " private void privCopyFiles(final String dirName, final String[] resources)", " copyFiles(dirName, resources);", " private void copyFiles(String dirName, String[] resources)" ] }, { "added": [ " String baseName;", "", " if ( targetNames == null )", " {", " // by default, just the same file name as the source file", " baseName = name.substring(name.lastIndexOf('/') + 1);", " }", " else", " {", " // we let the caller override the target file name", " baseName = targetNames[ i ];", " }", " URL url = BaseTestCase.getTestResource(name);" ], "header": "@@ -136,10 +152,20 @@ public class SupportFilesSetup extends TestSetup {", "removed": [ " String baseName = name.substring(name.lastIndexOf('/') + 1);", " ", " URL url = BaseTestCase.getTestResource(name);" ] } ] } ]
derby-DERBY-244-958e2569
DERBY-244: with linux, depending on env setting $LANG and console encoding, some i18n tests fail * Run the i18n tests run with -Dfile.encoding=UTF-8 and -Dconsole.encoding=UTF-8 * Use UTF-8 encoding in ProcessStreamResult for the i18n tests * Make Sed.java read result files from i18n tests using UTF-8 encoding * Respect the derby.ui.codeset setting in i18n tests, and use UTF-8 if it is not specified. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@432645 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/functionTests/harness/Sed.java", "hunks": [ { "added": [ " if (is == null && isI18N) {", " // read UTF-8 encoded file", " InputStream fs = new FileInputStream(srcFile);", " inFile = new BufferedReader(new InputStreamReader(fs, \"UTF-8\"));", " } else if (is == null) {", " // read the file using the default encoding", " } else {", " }" ], "header": "@@ -372,10 +372,16 @@ public class Sed", "removed": [ " if (is == null)", " else" ] } ] } ]
derby-DERBY-2440-d2e9303e
DERBY-2440 DerbyNetNewServer should specify port when it starts a server to avoid requiring 1527 to run Committed patch d2440.diff. Contributed by Julius Stroffek (julius.stroffek@sun.com). git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@532498 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-2442-41943bbb
DERBY-2442: Remove code made redundant by DERBY 681. Patch contributed by Manish Khettry. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@519936 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/FromSubquery.java", "hunks": [ { "added": [], "header": "@@ -21,12 +21,8 @@", "removed": [ "import org.apache.derby.iapi.reference.SQLState;", "", "import org.apache.derby.iapi.services.context.ContextManager;", "import org.apache.derby.iapi.sql.compile.CompilerContext;" ] }, { "added": [], "header": "@@ -34,7 +30,6 @@ import org.apache.derby.iapi.services.sanity.SanityManager;", "removed": [ "import java.util.Properties;" ] }, { "added": [], "header": "@@ -48,8 +43,6 @@ import java.util.Properties;", "removed": [ "\tboolean\t\t\tgeneratedForGroupByClause;", "\tboolean\t\t\tgeneratedForHavingClause;" ] }, { "added": [ "\t\t\treturn super.toString();" ], "header": "@@ -82,10 +75,7 @@ public class FromSubquery extends FromTable", "removed": [ "\t\t\treturn", "\t\t\t \"generatedForGroupByClause: \" + generatedForGroupByClause + \"\\n\" +", "\t\t\t \"generatedForHavingClause: \" + generatedForHavingClause + \"\\n\" +", "\t\t\t super.toString();" ] }, { "added": [], "header": "@@ -123,29 +113,6 @@ public class FromSubquery extends FromTable", "removed": [ "\t/**", "\t * Mark this FromSubquery as being generated for a GROUP BY clause.", "\t * (This node represents the SELECT thru GROUP BY clauses. We", "\t * appear in the FromList of a SelectNode generated to represent", "\t * the result of the GROUP BY. This allows us to add ResultColumns", "\t * to the SelectNode for the user's query.", "\t */", "\tpublic void markAsForGroupByClause()", "\t{", "\t\tgeneratedForGroupByClause = true;", "\t}", "", "\t/**", "\t * Mark this FromSubquery as being generated for a HAVING clause.", "\t * (This node represents the SELECT thru GROUP BY clauses. We", "\t * appear in the FromList of a SelectNode generated to represent", "\t * the actual HAVING clause.", "\t */", "\tpublic void markAsForHavingClause()", "\t{", "\t\tgeneratedForHavingClause = true;", "\t}", "" ] }, { "added": [ "\t\treturn super.getFromTableByName(name, schemaName, exactMatch);" ], "header": "@@ -162,14 +129,7 @@ public class FromSubquery extends FromTable", "removed": [ "\t\tif (generatedForGroupByClause || generatedForHavingClause)", "\t\t{", "\t\t\treturn subquery.getFromTableByName(name, schemaName, exactMatch);", "\t\t}", "\t\telse ", "\t\t{", "\t\t\treturn super.getFromTableByName(name, schemaName, exactMatch);", "\t\t}" ] }, { "added": [ "\t\t * to subquery.bindExpressions() and .bindResultColumns()", "\t\tnestedFromList = emptyFromList;" ], "header": "@@ -249,15 +209,10 @@ public class FromSubquery extends FromTable", "removed": [ "\t\t * to subquery.bindExpressions() and .bindResultColumns(). However,", "\t\t * the parser rewrites queries which have GROUP BY and HAVING clauses.", "\t\t * For these rewritten pseudo-subqueries, we need to pass in the outer FromList", "\t\t * which contains correlated tables.", "\t\tif ( generatedForGroupByClause || generatedForHavingClause )", "\t\t{ nestedFromList = fromListParam; }", "\t\telse { nestedFromList = emptyFromList; }" ] }, { "added": [ "\t\t// post 681, 1 may be no longer needed. 5 is the default case", "\t\t// now but what happens if the condition is false? Investigate." ], "header": "@@ -323,85 +278,12 @@ public class FromSubquery extends FromTable", "removed": [ "\t\t/* We have 5 cases here:", "\t\t * 1. ColumnReference was generated to replace an aggregate.", "\t\t *\t\t(We are the wrapper for a HAVING clause and the ColumnReference", "\t\t *\t\twas generated to reference the aggregate which was pushed down into", "\t\t *\t\tthe SELECT list in the user's query.) ", "\t\t *\t\tJust do what you would expect. Try to resolve the", "\t\t *\t\tColumnReference against our RCL if the ColumnReference is unqualified", "\t\t *\t\tor if it is qualified with our exposed name.", "\t\t *\t2.\tWe are the wrapper for a GROUP BY and a HAVING clause and", "\t\t *\t\teither the ColumnReference is qualified or it is in", "\t\t *\t\tthe HAVING clause. For example:", "\t\t *\t\t\tselect a from t1 group by a having t1.a = 1", "\t\t *\t\t\tselect a as asdf from t1 group by a having a = 1", "\t\t *\t\tWe need to match against the underlying FromList and then find", "\t\t *\t\tthe grandparent ResultColumn in our RCL so that we return a", "\t\t *\t\tResultColumn from the correct ResultSetNode. It is okay not to", "\t\t *\t\tfind a matching grandparent node. In fact, this is how we ensure", "\t\t *\t\tthe correct semantics for ColumnReferences in the HAVING clause", "\t\t *\t\t(which must be bound against the GROUP BY list.)", "\t\t * 3.\tWe are the wrapper for a HAVING clause without a GROUP BY and", "\t\t *\t\tthe ColumnReference is from the HAVING clause. ColumnReferences", "\t\t *\t\tare invalid in this case, so we return null.", "\t\t * 4. We are the wrapper for a GROUP BY with no HAVING. This has", "\t\t *\t\tto be a separate case because of #5 and the following query:", "\t\t *\t\t\tselect * from (select c1 from t1) t, (select c1 from t1) tt", "\t\t *\t\t\tgroup by t1.c1, tt.c1", "\t\t *\t\t(The correlation names are lost in the generated FromSuquery.)", "\t\t * 5. Everything else - do what you would expect. Try to resolve the", "\t\t *\t\tColumnReference against our RCL if the ColumnReference is unqualified", "\t\t *\t\tor if it is qualified with our exposed name.", "\t\t */", "\t\telse if (generatedForGroupByClause && generatedForHavingClause &&", "\t\t\t (columnsTableName != null || ", "\t\t\t columnReference.getClause() != ValueNode.IN_SELECT_LIST)) // 2", "\t\t{", "\t\t\tif (SanityManager.DEBUG)", "\t\t\t{", "\t\t\t\tSanityManager.ASSERT(correlationName == null,", "\t\t\t\t\t\"correlationName expected to be null\");", "\t\t\t\tSanityManager.ASSERT(subquery instanceof SelectNode,", "\t\t\t\t\t\"subquery expected to be instanceof SelectNode, not \" +", "\t\t\t\t\tsubquery.getClass().getName());", "\t\t\t}", "", "\t\t\tSelectNode\t\tselect = (SelectNode) subquery;", "", "\t\t\tresultColumn = select.getFromList().bindColumnReference(columnReference);", "", "\t\t\t/* Find and return the matching RC from our RCL.", "\t\t\t * (Not an error if no match found. Let ColumnReference deal with it.", "\t\t\t */", "\t\t\tif (resultColumn != null)", "\t\t\t{", "\t\t\t\t/* Is there a matching resultColumn in the subquery's RCL? */", "\t\t\t\tresultColumn = subquery.getResultColumns().findParentResultColumn(", "\t\t\t\t\t\t\t\t\t\t\t\tresultColumn);", "\t\t\t\tif (resultColumn != null)", "\t\t\t\t{", "\t\t\t\t\t/* Is there a matching resultColumn in our RCL? */", "\t\t\t\t\tresultColumn = resultColumns.findParentResultColumn(", "\t\t\t\t\t\t\t\t\t\t\t\tresultColumn);", "\t\t\t\t}", "\t\t\t}", "\t\t}", "\t\telse if ((generatedForHavingClause && ! generatedForGroupByClause) // 3", "\t\t\t && (columnReference.getClause() != ValueNode.IN_SELECT_LIST) )", "\t\t{", "\t\t resultColumn = null;", "\t\t}", "\t\telse if (generatedForGroupByClause) // 4", "\t\t{", "\t\t resultColumn = resultColumns.getResultColumn(", "\t\t\t\t\t\t\t\t columnsTableName,", "\t\t\t\t\t\t\t\t columnReference.getColumnName());", "\t\t}" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/SelectNode.java", "hunks": [ { "added": [], "header": "@@ -112,9 +112,6 @@ public class SelectNode extends ResultSetNode", "removed": [ "\tprivate boolean generatedForGroupByClause;", "\tprivate boolean generatedForHavingClause;", "" ] }, { "added": [], "header": "@@ -159,8 +156,6 @@ public class SelectNode extends ResultSetNode", "removed": [ "\t\t\t\t\"generatedForGroupByClause: \" +generatedForGroupByClause +\"\\n\" +", "\t\t\t\t\"generatedForHavingClause: \" + generatedForHavingClause + \"\\n\" +" ] }, { "added": [], "header": "@@ -189,32 +184,6 @@ public class SelectNode extends ResultSetNode", "removed": [ "\t/**", "\t * Mark this SelectNode as being generated for a GROUP BY clause.", "\t */", "\tpublic void markAsForGroupByClause()", "\t{", "\t\tgeneratedForGroupByClause = true;", "\t}", "", "\t/**", "\t * Return whether or not this SelectNode was generated for a GROUP BY clause.", "\t *", "\t * @return boolean\tWhether or not this SelectNode was generated for a GROUP BY clause.", "\t */", "\tpublic boolean getGeneratedForGroupbyClause()", "\t{", "\t\treturn generatedForGroupByClause;", "\t}", "", "\t/**", "\t * Mark this SelectNode as being generated for a HAVING clause.", "\t */", "\tpublic void markAsForHavingClause()", "\t{", "\t\tgeneratedForHavingClause = true;", "\t}", "" ] }, { "added": [ "\t\t\t\treturn whereAggregates;", "\t\t\t\treturn null;" ], "header": "@@ -364,24 +333,10 @@ public class SelectNode extends ResultSetNode", "removed": [ "\t\t\t\tif (generatedForHavingClause)", "\t\t\t\t{", "\t\t\t\t\treturn null;", "\t\t\t\t}", "\t\t\t\telse", "\t\t\t\t{", "\t\t\t\t\treturn whereAggregates;", "\t\t\t\t}", "\t\t\t\tif (generatedForHavingClause)", "\t\t\t\t{", "\t\t\t\t\treturn whereAggregates;", "\t\t\t\t}", "\t\t\t\telse", "\t\t\t\t{", "\t\t\t\t\treturn null;", "\t\t\t\t}" ] }, { "added": [ "\t\t\tif (whereAggregates.size() > 0)" ], "header": "@@ -542,8 +497,7 @@ public class SelectNode extends ResultSetNode", "removed": [ "\t\t\tif ((whereAggregates.size() > 0) &&", "\t\t\t\t\t!generatedForHavingClause)" ] }, { "added": [], "header": "@@ -781,14 +735,6 @@ public class SelectNode extends ResultSetNode", "removed": [ "\t\t/* Select * always okay when SelectNode generated to wrap", "\t\t * GROUP BY or HAVING.", "\t\t */", "\t\tif (generatedForGroupByClause || generatedForHavingClause)", "\t\t{", "\t\t\treturn;", "\t\t}", "" ] }, { "added": [ "\t\tif ((groupByList != null) || (havingClause != null))" ], "header": "@@ -1255,7 +1201,7 @@ public class SelectNode extends ResultSetNode", "removed": [ "\t\tif ((groupByList != null) || generatedForHavingClause)" ] }, { "added": [ "\t\tif (groupByList != null || havingClause != null)" ], "header": "@@ -1933,7 +1879,7 @@ public class SelectNode extends ResultSetNode", "removed": [ "\t\tif (groupByList != null || generatedForHavingClause)" ] } ] } ]
derby-DERBY-2442-b3bf9cab
DERBY-2442: Remove code made redundant by DERBY 681. Patch contributed by Manish Khettry. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@520885 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/ResultColumnList.java", "hunks": [ { "added": [], "header": "@@ -1582,11 +1582,6 @@ public class ResultColumnList extends QueryTreeNodeVector", "removed": [ "\t\t\t\t/* Make sure that every RC and expression is marked as being in", "\t\t\t\t * the SELECT list.", "\t\t\t\t */", "\t\t\t\tallExpansion.setClause(ValueNode.IN_SELECT_LIST);", "" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/SelectNode.java", "hunks": [ { "added": [], "header": "@@ -318,36 +318,6 @@ public class SelectNode extends ResultSetNode", "removed": [ "\t/**", "\t * Return the specified aggregate vector for this SelectNode.", "\t *", "\t * @param clause\tWhich clause to get the aggregate list for", "\t *", "\t * @return aggregateVector\tThe specified aggregate vector for this SelectNode.", "\t */", "\tpublic Vector getAggregateVector(int clause)", "\t{", "\t\tswitch (clause)", "\t\t{", "\t\t\tcase ValueNode.IN_SELECT_LIST:", "\t\t\t\treturn selectAggregates;", "", "\t\t\tcase ValueNode.IN_WHERE_CLAUSE:", "\t\t\t\treturn whereAggregates;", "", "\t\t\tcase ValueNode.IN_HAVING_CLAUSE:", "\t\t\t\treturn null;", "", "\t\t\tdefault:", "\t\t\t\tif (SanityManager.DEBUG)", "\t\t\t\t{", "\t\t\t\t\tSanityManager.ASSERT(false,", "\t\t\t\t\t\t\"Unexpected value for clause\");", "\t\t\t\t}", "\t\t\t\treturn null;", "\t\t}", "\t}", "" ] }, { "added": [], "header": "@@ -460,7 +430,6 @@ public class SelectNode extends ResultSetNode", "removed": [ "\t\tresultColumns.setClause(ValueNode.IN_SELECT_LIST);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/ValueNode.java", "hunks": [ { "added": [], "header": "@@ -66,15 +66,9 @@ import java.util.Vector;", "removed": [ "\tpublic static final int IN_UNKNOWN_CLAUSE = 0;", "\tpublic static final int IN_SELECT_LIST = 1;", "\tpublic static final int IN_WHERE_CLAUSE = 2;", "\tpublic static final int IN_HAVING_CLAUSE = 3;", "", "\tprotected int\t\t\t\tclause = IN_UNKNOWN_CLAUSE;" ] }, { "added": [], "header": "@@ -165,7 +159,6 @@ public abstract class ValueNode extends QueryTreeNode", "removed": [ "\t\t\t\t\"clause: \" + clause + \"\\n\" +" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/ValueNodeList.java", "hunks": [ { "added": [], "header": "@@ -75,22 +75,6 @@ public class ValueNodeList extends QueryTreeNodeVector", "removed": [ "\t/**", "\t * Set the clause that this node appears in.", "\t *", "\t * @param clause\tThe clause that this node appears in.", "\t */", "\tpublic void setClause(int clause)", "\t{", "\t\tint size = size();", "", "\t\tfor (int index = 0; index < size; index++)", "\t\t{", "\t\t\tValueNode\t\tvalueNode;", "\t\t\t\tvalueNode = (ValueNode) elementAt(index);", "\t\t\tvalueNode.setClause(clause);", "\t\t}", "\t}" ] } ] } ]
derby-DERBY-2443-89459c8f
DERBY-2443: Implement ResultSet updateClob/updateBlob methods on the NetworkClient Patch contributed by V. Narayanan. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@522873 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/ResultSet.java", "hunks": [ { "added": [ " /**", " * Updates the designated column with a <code>java.sql.Blob</code> value.", " * The updater methods are used to update column values in the", " * current row or the insert row. The updater methods do not", " * update the underlying database; instead the <code>updateRow</code> or", " * <code>insertRow</code> methods are called to update the database.", " *", " * @param columnIndex the first column is 1, the second is 2, ...", " * @param x the new column value", " * @throws SQLException if the columnIndex is not valid;", " * if a database access error occurs;", " * the result set concurrency is <code>CONCUR_READ_ONLY</code>", " * or this method is called on a closed result set", " */", " synchronized (connection_) {", " if (agent_.loggingEnabled()) {", " agent_.logWriter_.traceEntry(this, \"updateBlob\",", " columnIndex, x);", " }", " try {", " checkUpdatePreconditions(columnIndex, \"updateBlob\");", " updateColumn(columnIndex,", " agent_.crossConverters_.setObject(", " resultSetMetaData_.types_[columnIndex -1],", " x));", " } catch (SqlException se) {", " throw se.getSQLException();", " }", " }", " /**", " * Updates the designated column with a <code>java.sql.Blob</code> value.", " * The updater methods are used to update column values in the", " * current row or the insert row. The updater methods do not", " * update the underlying database; instead the <code>updateRow</code> or", " * <code>insertRow</code> methods are called to update the database.", " *", " * @param columnName the label for the column specified with the SQL AS", " * clause. If the SQL AS clause was not specified, then the label is the", " * name of the column", " * @param x the new column value", " * @throws SQLException if the columnLabel is not valid;", " * if a database access error occurs;", " * the result set concurrency is <code>CONCUR_READ_ONLY</code>", " * or this method is called on a closed result set", " */", " try {", " updateBlob(findColumnX(columnName), x);", " } catch (SqlException se) {", " throw se.getSQLException();", " }", " ", " /**", " * Updates the designated column using the given input stream, which", " * will have the specified number of bytes.", " * The updater methods are used to update column values in the", " * current row or the insert row. The updater methods do not", " * update the underlying database; instead the <code>updateRow</code> or", " * <code>insertRow</code> methods are called to update the database.", " *", " * @param columnIndex the first column is 1, the second is 2, ...", " * @param x An object that contains the data to set the parameter", " * value to.", " * @param length the number of bytes in the parameter data.", " * @exception SQLException if the columnIndex is not valid;", " * if a database access error occurs;", " * the result set concurrency is <code>CONCUR_READ_ONLY</code>", " * or this method is called on a closed result set", " */", " public void updateBlob(int columnIndex, InputStream x, long length)", " throws SQLException {", " synchronized (connection_) {", " if (agent_.loggingEnabled()) {", " agent_.logWriter_.traceEntry(this, \"updateBlob\",", " columnIndex, x, (int)length);", " }", " try {", " checkUpdatePreconditions(columnIndex, \"updateBlob\");", " updateColumn(columnIndex,", " agent_.crossConverters_.setObject(", " resultSetMetaData_.types_[columnIndex -1],", " new Blob(agent_, x, (int)length)));", " } catch (SqlException se) {", " throw se.getSQLException();", " }", " }", " /**", " * Updates the designated column using the given input stream, which", " * will have the specified number of bytes.", " * The updater methods are used to update column values in the", " * current row or the insert row. The updater methods do not", " * update the underlying database; instead the <code>updateRow</code> or", " * <code>insertRow</code> methods are called to update the database.", " *", " * @param columnName the label for the column specified with the", " * SQL AS clause. If the SQL AS clause was not specified, then the", " * label is the name of the column", " * @param x An object that contains the data to set the parameter", " * value to.", " * @param length the number of bytes in the parameter data.", " * @exception SQLException if the columnLabel is not valid;", " * if a database access error occurs;", " * the result set concurrency is <code>CONCUR_READ_ONLY</code>", " * or this method is called on a closed result set", " */", " public void updateBlob(String columnName, InputStream x, long length)", " throws SQLException {", " try {", " updateBlob(findColumnX(columnName), x, length);", " } catch (SqlException se) {", " throw se.getSQLException();", " }" ], "header": "@@ -3805,20 +3805,124 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " throw jdbc3MethodNotSupported();", " throw jdbc3MethodNotSupported();", "", " public void updateClob(int columnIndex, java.sql.Clob x) throws SQLException {", " throw jdbc3MethodNotSupported();", " public void updateClob(String columnName, java.sql.Clob x) throws SQLException {", " throw jdbc3MethodNotSupported();" ] } ] } ]
derby-DERBY-2444-c9382f06
DERBY-2444: Implement not implemented methods Blob.getBinaryStream(long pos, long length) and Clob.getCharacterStream(long pos, long length) in the Network Client Patch contributed by V. Narayanan. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@528546 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-2446-504ba175
DERBY-2446; Remove notion of the old test harness from TestConfiguration Removing remaining references, and also removing remaining files from tests/junitTests git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1531729 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/junit/TestConfiguration.java", "hunks": [ { "added": [], "header": "@@ -131,13 +131,6 @@ public final class TestConfiguration {", "removed": [ " /**", " * Default Derby test configuration object based", " * upon system properties set by the old harness.", " */", " private static final TestConfiguration DERBY_HARNESS_CONFIG = ", " new TestConfiguration(getSystemProperties());", " " ] }, { "added": [ " static {", " DEFAULT_CONFIG = JUNIT_CONFIG;", " final File dsh = new File(\"system\");", " BaseTestCase.setSystemProperty(", " \"derby.system.home\", dsh.getAbsolutePath());" ], "header": "@@ -149,43 +142,14 @@ public final class TestConfiguration {", "removed": [ " ", " /**", " * Are we running in the harness, assume so if framework", " * was set so the ", " */", " private static final boolean runningInDerbyHarness;", " ", " static {", " boolean assumeHarness = false;", " // In the harness if the default configuration according", " // to system properties is not embedded.", " if (!DERBY_HARNESS_CONFIG.getJDBCClient().isEmbedded())", " assumeHarness = true;", " ", " // Assume harness if database name is not default", " if (!DERBY_HARNESS_CONFIG.getDefaultDatabaseName().equals(DEFAULT_DBNAME))", " assumeHarness = true;", " ", " // Assume harness if user name is not default", " if (!DERBY_HARNESS_CONFIG.getUserName().equals(DEFAULT_USER_NAME))", " assumeHarness = true;", " ", " // If derby.system.home set externally at startup assume", " // running in harness", " if (BaseTestCase.getSystemProperty(\"derby.system.home\") != null)", " assumeHarness = true;", "", " DEFAULT_CONFIG = assumeHarness ? DERBY_HARNESS_CONFIG : JUNIT_CONFIG;", " runningInDerbyHarness = assumeHarness;", " if (!assumeHarness) {", " final File dsh = new File(\"system\");", " BaseTestCase.setSystemProperty(", " \"derby.system.home\", dsh.getAbsolutePath());", " }" ] }, { "added": [ " //public static boolean runningInDerbyHarness()", " //{", " // return runningInDerbyHarness;", " //}" ], "header": "@@ -1999,10 +1963,10 @@ public final class TestConfiguration {", "removed": [ " public static boolean runningInDerbyHarness()", " {", " return runningInDerbyHarness;", " }" ] } ] } ]
derby-DERBY-2447-ad0c80a9
DERBY-2447: ejbql and floattypes fail intermittently A bug in the HotSpot optimization made normalization of floating point values return negative zero instead of positive zero. This patch adds a workaround for the bug and a regression test. The workaround also makes the intent of the code clearer. For details about the HotSpot bug, see: http://bugs.sun.com/view_bug.do?bug_id=6833879 git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@772449 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/types/NumberDataType.java", "hunks": [ { "added": [ " // DERBY-2447: It shouldn't matter whether we compare to 0.0f or -0.0f,", " // both should match negative zero, but comparing to 0.0f triggered", " // this JVM bug: http://bugs.sun.com/view_bug.do?bug_id=6833879", " if (v == -0.0f) v = 0.0f;" ], "header": "@@ -493,7 +493,10 @@ public abstract class NumberDataType extends DataType", "removed": [ " if (v == 0.0f) v = 0.0f;" ] }, { "added": [ " // DERBY-2447: It shouldn't matter whether we compare to 0.0d or -0.0d,", " // both should match negative zero, but comparing to 0.0d triggered", " // this JVM bug: http://bugs.sun.com/view_bug.do?bug_id=6833879", " if (v == -0.0d) v = 0.0d;" ], "header": "@@ -519,7 +522,10 @@ public abstract class NumberDataType extends DataType", "removed": [ " if (v == 0.0d) v = 0.0d;" ] } ] } ]
derby-DERBY-2457-e6d28aa0
DERBY-2457: Use of column aliases in GROUP BY / HAVING clauses may fail Some use of column aliases in group by / having clauses can cause queries to fail with error 42X04. The queries can sometimes be made to work by also aliasing the table or rewriting the query to use a subselect. After analyzing the script, and studying the standard, we came to feel that Derby was behaving correctly according to the standard. While column aliases are valid in the ORDER BY clause, they are not valid in the GROUP BY and HAVING clauses. Instead, Derby currently correctly enforces the standard's requirement that the underlying column name be used in these clauses. This change updates the Derby GroupByTest.java test program to contain the examples from the reproduction script, demonstrating that Derby's behavior is correct, and adding to the body of GROUP BY test cases. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@655947 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-246-88c25bba
DERBY-339 Network client XA should only keep XA state for transaction branch association Network client XA should only keep XA state for transaction branch association, to track whether to send commit in autocommit mode. All other state and state related decisions should be defered to the server. The client tries to track XA state to make decisions based on current XA state. Most of this state handling was removed with DERBY246, but it still was not being handled properly. This is evidenced by multiple failures in xaSimplePostive that now that it gets past DERBY-246. This fix will have the client track only branch association as outlined in the XA+ specification. Table 6-2, State Table for Transaction Branch Association. The client will track only XA_TO_NOT_ASSOCIATED XA_T1_ASSOCIATED Association Suspended (T2) will map to XA_TO_NOT_ASSOCIATED for the client's pupurposes. The client commit in autocommit mode only for XA_TO_NOT_ASSOCIATED. git-svn-id: https://svn.apache.org/repos/asf/incubator/derby/code/trunk@189710 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/Connection.java", "hunks": [ { "added": [ " // The client needs to keep track of the connection's transaction branch association", " // per table 2.6 in the XA+ specification in order to determine if commits should flow in", " // autocommit mode. There is no need to keep track of suspended transactions separately from", " // XA_TO_NOT_ASSOCIATED.", " // ", " /**", " * <code>XA_T0_NOT_ASSOCIATED</code>", " * This connection is not currently associated with an XA transaction", " * In this state commits will flow in autocommit mode.", " */", " public static final int XA_T0_NOT_ASSOCIATED = 0; ", " ", " /**", " * <code>XA_T1_ASSOCIATED</code>", " * In this state commits will not flow in autocommit mode.", " */", " public static final int XA_T1_ASSOCIATED = 1; ", " ", " //TODO: Remove XA_RECOVER entirely once indoubtlist is gone. ", " protected int xaState_ = XA_T0_NOT_ASSOCIATED;" ], "header": "@@ -85,13 +85,29 @@ public abstract class Connection implements java.sql.Connection,", "removed": [ " public static final int XA_LOCAL = 0; // No global transaction in process", " public static final int XA_GLOBAL = 1; // Global transaction in process", " //TODO: Remove entirely once indoubtlist is gone. ", " protected int xaState_ = XA_LOCAL;" ] }, { "added": [ " if ((xaState_ == XA_T0_NOT_ASSOCIATED) ) {" ], "header": "@@ -524,7 +540,7 @@ public abstract class Connection implements java.sql.Connection,", "removed": [ " if ((xaState_ == XA_LOCAL) ) {" ] }, { "added": [ " if ((xaState_ == XA_T0_NOT_ASSOCIATED) ) {" ], "header": "@@ -541,7 +557,7 @@ public abstract class Connection implements java.sql.Connection,", "removed": [ " if ((xaState_ == XA_LOCAL) ) {" ] } ] }, { "file": "java/client/org/apache/derby/client/am/Statement.java", "hunks": [ { "added": [ " return (connection_.xaState_ == Connection.XA_T0_NOT_ASSOCIATED) ;" ], "header": "@@ -1189,7 +1189,7 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " return (connection_.xaState_ == Connection.XA_LOCAL) ;" ] } ] }, { "file": "java/client/org/apache/derby/client/net/NetConnection.java", "hunks": [ { "added": [ " \tif (xaState_ == XA_T0_NOT_ASSOCIATED) {" ], "header": "@@ -1367,7 +1367,7 @@ public class NetConnection extends org.apache.derby.client.am.Connection {", "removed": [ " \tif (xaState_ == XA_LOCAL) {" ] } ] }, { "file": "java/client/org/apache/derby/client/net/NetXAConnection.java", "hunks": [ { "added": [ " if (xaState == XA_T0_NOT_ASSOCIATED){" ], "header": "@@ -132,7 +132,7 @@ public class NetXAConnection extends org.apache.derby.client.net.NetConnection {", "removed": [ " if (xaState == XA_LOCAL){" ] }, { "added": [ " if (xaState == XA_T0_NOT_ASSOCIATED) {" ], "header": "@@ -147,7 +147,7 @@ public class NetXAConnection extends org.apache.derby.client.net.NetConnection {", "removed": [ " if (xaState == XA_LOCAL) {" ] } ] }, { "file": "java/client/org/apache/derby/client/net/NetXAResource.java", "hunks": [ { "added": [ " if (conn_.isPhysicalConnClosed()) {" ], "header": "@@ -183,7 +183,7 @@ public class NetXAResource implements XAResource {", "removed": [ " if (conn_.isPhysicallyClosed()) {" ] }, { "added": [], "header": "@@ -215,9 +215,6 @@ public class NetXAResource implements XAResource {", "removed": [ " else {", " \tconn_.setXAState(Connection.XA_LOCAL);", " }" ] }, { "added": [ " if (conn_.isPhysicalConnClosed()) {" ], "header": "@@ -247,7 +244,7 @@ public class NetXAResource implements XAResource {", "removed": [ " if (conn_.isPhysicallyClosed()) {" ] }, { "added": [ " }else {", " \tconn_.setXAState(Connection.XA_T0_NOT_ASSOCIATED);" ], "header": "@@ -278,8 +275,9 @@ public class NetXAResource implements XAResource {", "removed": [ "" ] }, { "added": [ " if (conn_.isPhysicalConnClosed()) {" ], "header": "@@ -299,7 +297,7 @@ public class NetXAResource implements XAResource {", "removed": [ " if (conn_.isPhysicallyClosed()) {" ] }, { "added": [ " if (conn_.isPhysicalConnClosed()) {" ], "header": "@@ -352,7 +350,7 @@ public class NetXAResource implements XAResource {", "removed": [ " if (conn_.isPhysicallyClosed()) {" ] }, { "added": [ " if (conn_.isPhysicalConnClosed()) {" ], "header": "@@ -381,7 +379,7 @@ public class NetXAResource implements XAResource {", "removed": [ " if (conn_.isPhysicallyClosed()) {" ] }, { "added": [ " if (conn_.isPhysicalConnClosed()) {" ], "header": "@@ -447,7 +445,7 @@ public class NetXAResource implements XAResource {", "removed": [ " if (conn_.isPhysicallyClosed()) {" ] }, { "added": [ " if (conn_.isPhysicalConnClosed()) {" ], "header": "@@ -512,7 +510,7 @@ public class NetXAResource implements XAResource {", "removed": [ " if (conn_.isPhysicallyClosed()) {" ] }, { "added": [ " " ], "header": "@@ -543,9 +541,7 @@ public class NetXAResource implements XAResource {", "removed": [ " else {", " \tconn_.setXAState(Connection.XA_LOCAL);", " }" ] }, { "added": [ " if (conn_.isPhysicalConnClosed()) {" ], "header": "@@ -585,7 +581,7 @@ public class NetXAResource implements XAResource {", "removed": [ " if (conn_.isPhysicallyClosed()) {" ] }, { "added": [ " conn_.setXAState(Connection.XA_T1_ASSOCIATED);" ], "header": "@@ -609,7 +605,7 @@ public class NetXAResource implements XAResource {", "removed": [ " conn_.setXAState(Connection.XA_GLOBAL);" ] }, { "added": [], "header": "@@ -626,7 +622,6 @@ public class NetXAResource implements XAResource {", "removed": [ " // By default, throwXAException will reset the state of the failed connection" ] }, { "added": [ " setXaStateForXAException(rc); ", "", " /**", " * Reset the transaction branch association state to XA_T0_NOT_ASSOCIATED", " * for XAER_RM* and XA_RB* Exceptions. All other exeptions leave the state ", " * unchanged", " * ", " * @param rc // return code from XAException", " * @throws XAException", " */", " private void setXaStateForXAException(int rc) {", " \tswitch (rc)", "\t\t{", " \t// Reset to T0, not associated for XA_RB*, RM*", " // XAER_RMFAIL and XAER_RMERR will be fatal to the connection", " // but that is not dealt with here", " case javax.transaction.xa.XAException.XAER_RMFAIL:", " case javax.transaction.xa.XAException.XAER_RMERR:", " case javax.transaction.xa.XAException.XA_RBROLLBACK:", " case javax.transaction.xa.XAException.XA_RBCOMMFAIL:", " case javax.transaction.xa.XAException.XA_RBDEADLOCK:", " case javax.transaction.xa.XAException.XA_RBINTEGRITY:", " case javax.transaction.xa.XAException.XA_RBOTHER:", " case javax.transaction.xa.XAException.XA_RBPROTO:", " case javax.transaction.xa.XAException.XA_RBTIMEOUT:", " case javax.transaction.xa.XAException.XA_RBTRANSIENT:", " \tconn_.setXAState(Connection.XA_T0_NOT_ASSOCIATED);", " break;", " // No change for other XAExceptions", " // javax.transaction.xa.XAException.XA_NOMIGRATE", " //javax.transaction.xa.XAException.XA_HEURHAZ", " // javax.transaction.xa.XAException.XA_HEURCOM", " // javax.transaction.xa.XAException.XA_HEURRB", " // javax.transaction.xa.XAException.XA_HEURMIX", " // javax.transaction.xa.XAException.XA_RETRY", " // javax.transaction.xa.XAException.XA_RDONLY", " // javax.transaction.xa.XAException.XAER_ASYNC", " // javax.transaction.xa.XAException.XAER_NOTA", " // javax.transaction.xa.XAException.XAER_INVAL ", " // javax.transaction.xa.XAException.XAER_PROTO", " // javax.transaction.xa.XAException.XAER_DUPID", " // javax.transaction.xa.XAException.XAER_OUTSIDE \t", " default:", " \t\t\t return;", "\t\t}\t", " }", "" ], "header": "@@ -735,9 +730,56 @@ public class NetXAResource implements XAResource {", "removed": [] }, { "added": [ " if (conn_.isPhysicalConnClosed()) {" ], "header": "@@ -745,7 +787,7 @@ public class NetXAResource implements XAResource {", "removed": [ " if (conn_.isPhysicallyClosed()) {" ] } ] } ]
derby-DERBY-2462-1e762f52
DERBY-2462 Committed DERBY-2462-4, which fixes this issue and improves the test. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@538572 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/store/access/DiskHashtable.java", "hunks": [ { "added": [ "import org.apache.derby.shared.common.reference.SQLState;" ], "header": "@@ -24,6 +24,7 @@ import java.util.Enumeration;", "removed": [] }, { "added": [ " private final boolean keepAfterCommit;" ], "header": "@@ -63,6 +64,7 @@ public class DiskHashtable", "removed": [] }, { "added": [ " this.keepAfterCommit = keepAfterCommit;" ], "header": "@@ -88,6 +90,7 @@ public class DiskHashtable", "removed": [] }, { "added": [ " private RowLocation rowloc;", " keepAfterCommit," ], "header": "@@ -387,13 +390,14 @@ public class DiskHashtable", "removed": [ " false, // do not hold" ] }, { "added": [ " } else if (keepAfterCommit) {", " rowloc = rowConglomerate.newRowLocationTemplate();", " scan.fetchLocation(rowloc);" ], "header": "@@ -408,6 +412,9 @@ public class DiskHashtable", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/iapi/store/access/TransactionController.java", "hunks": [ { "added": [ " * @param keepAfterCommit If true then the hash table is kept after a", " * commit" ], "header": "@@ -901,6 +901,8 @@ public interface TransactionController", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/HashScanResultSet.java", "hunks": [ { "added": [ "\tprivate boolean keepAfterCommit;" ], "header": "@@ -104,6 +104,7 @@ public class HashScanResultSet extends ScanResultSet", "removed": [] }, { "added": [ "\t\tthis.keepAfterCommit = activation.getResultSetHoldability();" ], "header": "@@ -187,6 +188,7 @@ public class HashScanResultSet extends ScanResultSet", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/store/access/BackingStoreHashTableFromScan.java", "hunks": [ { "added": [ "\t\tboolean\t\t\t\t\tskipNullKeyColumns,", " boolean keepAfterCommit)" ], "header": "@@ -83,7 +83,8 @@ class BackingStoreHashTableFromScan extends BackingStoreHashtable", "removed": [ "\t\tboolean\t\t\t\t\tskipNullKeyColumns)" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/access/RAMTransaction.java", "hunks": [ { "added": [ " boolean\t\t skipNullKeyColumns,", " boolean keepAfterCommit)" ], "header": "@@ -1367,7 +1367,8 @@ public class RAMTransaction", "removed": [ " boolean\t\t skipNullKeyColumns)" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/access/conglomerate/GenericScanController.java", "hunks": [ { "added": [ "scan_state - a scan has 5 possible states:", " SCAN_INIT, SCAN_INPROGRESS, SCAN_DONE, SCAN_HOLD_INIT, and", " SCAN_HOLD_INPROGRESS" ], "header": "@@ -65,8 +65,9 @@ scan_position - this variable holds the current scan position, it may be", "removed": [ "scan_state - a scan has 3 possible states: ", " SCAN_INIT, SCAN_INPROGRESS, SCAN_DONE" ] } ] } ]
derby-DERBY-2465-3568cea1
DERBY-378 (partial) This patch adds some code required to support import/exoprt of lob data. 1) Addded code to read clob data using getCharacterStream() instead of getString() while importing clob data from an extern file. (Note: Clobs are read using getString() until DERBY-2465 is fixed). 2) Made some code changes to make each lob column has it it's own file handle to the lob file to read the data, otherwise streams can get corrupted when there are more than one clob/blob type column in the table. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@520197 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/load/ImportAbstract.java", "hunks": [ { "added": [ " if (isColumnInExtFile(columnIndex)) {", "\t\t\tval = importReadData.getClobColumnFromExtFileAsString(val, ", " columnIndex);" ], "header": "@@ -145,12 +145,13 @@ abstract class ImportAbstract extends VTITemplate {", "removed": [ "\t\tif (isColumnInExtFile(columnIndex)) {", "\t\t\tval = importReadData.getClobColumnFromExtFile(val);" ] }, { "added": [ "", " /**", " * Returns <code> java.sql.Clob </code> type object that ", " * contains the columnn data from the import file. ", " * @param columnIndex number of the column. starts at 1.", " * @exception SQLException if any occurs during create of the clob object.", " */", "\tpublic java.sql.Clob getClob(int columnIndex) throws SQLException {", "", " java.sql.Clob clob = null;", "\t\tif (lobsInExtFile) ", " {", " // lob data is in another file, read from the external file.", " clob = importReadData.getClobColumnFromExtFile(", " nextRow[columnIndex-1], columnIndex);", " } else {", " // data is in the main export file.", " String data = nextRow[columnIndex-1];", " if (data != null) {", " clob = new ImportClob(data); ", " }", " }", " ", " wasNull = (clob == null);", " return clob;", "\t}", "" ], "header": "@@ -160,6 +161,33 @@ abstract class ImportAbstract extends VTITemplate {", "removed": [] }, { "added": [ " nextRow[columnIndex-1], columnIndex);" ], "header": "@@ -174,7 +202,7 @@ abstract class ImportAbstract extends VTITemplate {", "removed": [ " nextRow[columnIndex-1]);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/load/ImportLobFile.java", "hunks": [ { "added": [ " " ], "header": "@@ -91,13 +91,7 @@ class ImportLobFile", "removed": [ "", " // setup a reader on top of the stream, so that calls ", " // to read the clob data from the file can read the ", " // with approapriate data code set. ", " lobReader = dataCodeset == null ?", " \t\tnew InputStreamReader(lobLimitIn) : ", " new InputStreamReader(lobLimitIn, dataCodeset); " ] }, { "added": [ " ", " // wrap a reader on top of the stream, so that calls ", " // to read the clob data from the file can read the ", " // with approapriate data code set. ", " lobReader = dataCodeset == null ?", " \t\tnew InputStreamReader(lobLimitIn) : ", " new InputStreamReader(lobLimitIn, dataCodeset); " ], "header": "@@ -128,6 +122,13 @@ class ImportLobFile", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/load/ImportReadData.java", "hunks": [ { "added": [ " private ImportLobFile[] lobFileHandles; // lob file handle object " ], "header": "@@ -105,7 +105,7 @@ final class ImportReadData implements java.security.PrivilegedExceptionAction {", "removed": [ " private ImportLobFile lobFile; // lob file object " ] }, { "added": [ "", " lobFileHandles = new ImportLobFile[numberOfColumns];", "" ], "header": "@@ -153,6 +153,9 @@ final class ImportReadData implements java.security.PrivilegedExceptionAction {", "removed": [] }, { "added": [ " if (lobFileHandles != null) {", " for (int i = 0 ; i < numberOfColumns ; i++) ", " {", " if(lobFileHandles[i] != null) ", " lobFileHandles[i].close();", " }", "" ], "header": "@@ -281,9 +284,14 @@ final class ImportReadData implements java.security.PrivilegedExceptionAction {", "removed": [ " if (lobFile !=null) {", " lobFile.close();" ] }, { "added": [ " ", " * @param colIndex number of the column. starts at 1. ", " String getClobColumnFromExtFileAsString(String lobLocationStr, int colIndex) ", " initExternalLobFile(lobLocationStr, colIndex);", " return lobFileHandles[colIndex-1].getString(lobOffset,lobLength);", " }", " ", "\t\t}catch(Exception ex) {", "\t\t\tthrow LoadError.unexpectedError(ex);", "\t\t}", "\t}", "", "", " /**", " * Returns a clob columnn data stored at the specified location as", " * a java.sql.Clob object. ", " * @param lobLocationStr location of the clob data.", " * @param colIndex number of the column. starts at 1. ", " * @exception SQLException on any errors. ", " */", " java.sql.Clob getClobColumnFromExtFile(String lobLocationStr, int colIndex) ", " throws SQLException ", " {", "\t\ttry {", " initExternalLobFile(lobLocationStr, colIndex);", " if (lobLength == -1 ){", " // lob length -1 indicates columnn value is a NULL, ", " // just return null. ", " return null;", " } else {", " return new ImportClob(lobFileHandles[colIndex -1],", " lobOffset,lobLength);" ], "header": "@@ -934,22 +942,51 @@ final class ImportReadData implements java.security.PrivilegedExceptionAction {", "removed": [ " String getClobColumnFromExtFile(String lobLocationStr) ", " initExternalLobFile(lobLocationStr);", " return lobFile.getString(lobOffset,lobLength);" ] }, { "added": [ " * @param colIndex number of the column. starts at 1. ", " java.sql.Blob getBlobColumnFromExtFile(String lobLocationStr, int colIndex)", " initExternalLobFile(lobLocationStr, colIndex);", " return new ImportBlob(lobFileHandles[colIndex -1], ", " lobOffset, lobLength);" ], "header": "@@ -961,19 +998,21 @@ final class ImportReadData implements java.security.PrivilegedExceptionAction {", "removed": [ " java.sql.Blob getBlobColumnFromExtFile(String lobLocationStr)", " initExternalLobFile(lobLocationStr);", " return new ImportBlob(lobFile, lobOffset, lobLength);" ] }, { "added": [ " * @param colIndex number of the column. starts at 1.", " private void initExternalLobFile(String lobLocationStr, int colIndex) " ], "header": "@@ -983,9 +1022,10 @@ final class ImportReadData implements java.security.PrivilegedExceptionAction {", "removed": [ " private void initExternalLobFile(String lobLocationStr) " ] }, { "added": [ " if (lobFileHandles[colIndex-1] == null) {", " // each lob column in the table has it's own file handle. ", " // separate file handles are must, lob stream objects", " // can not be reused until the whole row is inserted.", " lobFileHandles[colIndex-1] = new ImportLobFile(lobFileName, ", " controlFileReader.getDataCodeset());" ], "header": "@@ -1004,12 +1044,14 @@ final class ImportReadData implements java.security.PrivilegedExceptionAction {", "removed": [ "", " if (lobFile == null) {", " lobFile = new ImportLobFile(lobFileName, ", " controlFileReader.getDataCodeset());" ] }, { "added": [], "header": "@@ -1020,5 +1062,3 @@ final class ImportReadData implements java.security.PrivilegedExceptionAction {", "removed": [ "", "" ] } ] } ]
derby-DERBY-2466-59a5a70a
DERBY-2466: Introduce system procedure which reloads the security policy file. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@522515 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/catalog/SystemProcedures.java", "hunks": [ { "added": [ "import java.security.AccessControlException;", "import java.security.AccessController;", "import java.security.PrivilegedExceptionAction;", "import java.security.Policy;" ], "header": "@@ -21,6 +21,10 @@", "removed": [] }, { "added": [ " public static class ReloadPolicyAction implements PrivilegedExceptionAction", " {", " public ReloadPolicyAction() {}", " ", " public Object run()", " throws Exception", " {", " Policy policy = Policy.getPolicy();", " ", " policy.refresh();", " ", " return null;", " }", " }" ], "header": "@@ -73,6 +77,20 @@ public class SystemProcedures {", "removed": [] } ] }, { "file": "java/shared/org/apache/derby/shared/common/reference/SQLState.java", "hunks": [ { "added": [ "\t<LI>Security", "\t <UL>", "\t <LI> XK...", "\t </UL>", "" ], "header": "@@ -137,6 +137,11 @@ package org.apache.derby.shared.common.reference;", "removed": [] }, { "added": [ " ** Messages whose SQL states are prescribed by DRDA" ], "header": "@@ -1621,7 +1626,7 @@ public interface SQLState {", "removed": [ " ** Messages whose SQL states are proscribed by DRDA" ] } ] }, { "file": "java/testing/org/apache/derbyTesting/junit/SecurityManagerSetup.java", "hunks": [ { "added": [ "\tpublic SecurityManagerSetup(Test test, String policyResource)" ], "header": "@@ -59,7 +59,7 @@ public final class SecurityManagerSetup extends TestSetup {", "removed": [ "\tprivate SecurityManagerSetup(Test test, String policyResource)" ] }, { "added": [ " else if ( !externalSecurityManagerInstalled )", " {", " uninstallSecurityManager();", " }" ], "header": "@@ -117,6 +117,10 @@ public final class SecurityManagerSetup extends TestSetup {", "removed": [] }, { "added": [ "" ], "header": "@@ -140,7 +144,7 @@ public final class SecurityManagerSetup extends TestSetup {", "removed": [ "\t\t" ] }, { "added": [ "\t\t\tuninstallSecurityManager();" ], "header": "@@ -156,13 +160,7 @@ public final class SecurityManagerSetup extends TestSetup {", "removed": [ "\t\t\tAccessController.doPrivileged(new java.security.PrivilegedAction() {", "", "\t\t\t\tpublic Object run() {", "\t\t\t\t\tSystem.setSecurityManager(null);", "\t\t\t\t\treturn null;", "\t\t\t\t}", "\t\t\t});" ] }, { "added": [ "", " public Object run() {" ], "header": "@@ -178,7 +176,8 @@ public final class SecurityManagerSetup extends TestSetup {", "removed": [ "\t\t\tpublic Object run() {" ] } ] }, { "file": "java/testing/org/apache/derbyTesting/junit/SupportFilesSetup.java", "hunks": [ { "added": [ "", " public static final String EXTIN = \"extin\";", " public static final String EXTINOUT = \"extinout\";", " public static final String EXTOUT = \"extout\";" ], "header": "@@ -61,6 +61,10 @@ import junit.framework.Test;", "removed": [] }, { "added": [ " privCopyFiles(EXTIN, readOnly, readOnlyTargetFileNames);", " privCopyFiles(EXTINOUT, readWrite, readWriteTargetFileNames);", " privCopyFiles(EXTOUT, (String[]) null, (String[]) null);", " DropDatabaseSetup.removeDirectory(EXTIN);", " DropDatabaseSetup.removeDirectory(EXTINOUT);", " DropDatabaseSetup.removeDirectory(EXTOUT);", " public static void privCopyFiles(final String dirName, final String[] resources, final String[] targetNames)" ], "header": "@@ -112,19 +116,19 @@ public class SupportFilesSetup extends TestSetup {", "removed": [ " privCopyFiles(\"extin\", readOnly, readOnlyTargetFileNames);", " privCopyFiles(\"extinout\", readWrite, readWriteTargetFileNames);", " privCopyFiles(\"extout\", (String[]) null, (String[]) null);", " DropDatabaseSetup.removeDirectory(\"extin\");", " DropDatabaseSetup.removeDirectory(\"extinout\");", " DropDatabaseSetup.removeDirectory(\"extout\");", " private void privCopyFiles(final String dirName, final String[] resources, final String[] targetNames)" ] }, { "added": [ " private static void copyFiles(String dirName, String[] resources, String[] targetNames)" ], "header": "@@ -138,7 +142,7 @@ public class SupportFilesSetup extends TestSetup {", "removed": [ " private void copyFiles(String dirName, String[] resources, String[] targetNames)" ] }, { "added": [ " return getFile(EXTIN, name);" ], "header": "@@ -221,7 +225,7 @@ public class SupportFilesSetup extends TestSetup {", "removed": [ " return getFile(\"extin\", name);" ] }, { "added": [ " return getFile(EXTINOUT, name);" ], "header": "@@ -229,7 +233,7 @@ public class SupportFilesSetup extends TestSetup {", "removed": [ " return getFile(\"extinout\", name);" ] } ] }, { "file": "java/testing/org/apache/derbyTesting/junit/TestConfiguration.java", "hunks": [ { "added": [ "", " public final static String TEST_DBO = \"TEST_DBO\";" ], "header": "@@ -69,6 +69,8 @@ public class TestConfiguration {", "removed": [] }, { "added": [ " final File dsh = new File(\"system\");", " AccessController.doPrivileged", " (new java.security.PrivilegedAction(){", " public Object run(){", " BaseTestCase.setSystemProperty(\"derby.system.home\",", " dsh.getAbsolutePath());", " return null;", " }", " }", " ); " ], "header": "@@ -138,10 +140,17 @@ public class TestConfiguration {", "removed": [ " File dsh = new File(\"system\");", " BaseTestCase.setSystemProperty(\"derby.system.home\",", " dsh.getAbsolutePath());" ] }, { "added": [ " new String[] {TEST_DBO,\"U1\",\"U2\",}," ], "header": "@@ -419,7 +428,7 @@ public class TestConfiguration {", "removed": [ " new String[] {\"TEST_DBO\",\"U1\",\"U2\",}," ] }, { "added": [ " TEST_DBO, \"dummy\"); // DRDA doesn't like empty pw" ], "header": "@@ -446,7 +455,7 @@ public class TestConfiguration {", "removed": [ " \"TEST_DBO\", \"dummy\"); // DRDA doesn't like empty pw" ] }, { "added": [ " TEST_DBO," ], "header": "@@ -482,7 +491,7 @@ public class TestConfiguration {", "removed": [ " \"TEST_DBO\"," ] }, { "added": [ " usersWithDBO[0] = TEST_DBO;" ], "header": "@@ -507,7 +516,7 @@ public class TestConfiguration {", "removed": [ " usersWithDBO[0] = \"TEST_DBO\";" ] } ] } ]
derby-DERBY-2466-dc095348
DERBY-2466: Add upgrade test for new policy-reloading procedure. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@523399 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-2466-f35adc58
DERBY-2466: Attempt to fix NPE during run of SecurityPolicyReloadingTest on some environments. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@524252 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/junit/SecurityManagerSetup.java", "hunks": [ { "added": [ "\t\t\tString newPolicyProperty = set.getProperty(\"java.security.policy\" );", "\t\t\tif ( newPolicyProperty == null ) { newPolicyProperty = \"\"; } ", " ", "\t\t\tString oldPolicyProperty = BaseTestCase.getSystemProperty(\"java.security.policy\");", "\t\t\tif ( oldPolicyProperty == null ) { oldPolicyProperty = \"\"; }", "", "\t\t\tif ( newPolicyProperty.equals( oldPolicyProperty ) ) { return; }" ], "header": "@@ -155,9 +155,13 @@ public final class SecurityManagerSetup extends TestSetup {", "removed": [ "\t\t\tif (set.getProperty(\"java.security.policy\").equals(", "\t\t\t\t\tBaseTestCase.getSystemProperty(\"java.security.policy\")))", "\t\t\t\t\treturn;" ] } ] } ]
derby-DERBY-2468-b3bfe123
DERBY-4067: ClientConnectionPoolDataSource.getPooledConnection and ClientXADataSource.getXAConnection ignore connection attributes DERBY-2468: would be nice if traceFile=filename connection attribute would be supported with ClientConnectionPoolDataSource and ClientXADataSource Made two changes: - moved the parsing of the connection attribute string to before the log writer is constructed, since the construction may depend on some attributes specified in the connection attribute string - added parsing of the connection attribute string to the client ConnectionPool and XA data sources Also re-enabled 'testClientMessageTextConnectionAttribute' and removed a work-around for the issue fixed by this commit. Patch file: derby-4067-1a-update_attrs.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@965793 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/jdbc/ClientConnectionPoolDataSource.java", "hunks": [ { "added": [ " updateDataSourceValues(", " tokenizeAttributes(getConnectionAttributes(), null));" ], "header": "@@ -74,6 +74,8 @@ public class ClientConnectionPoolDataSource extends ClientDataSource", "removed": [] } ] }, { "file": "java/client/org/apache/derby/jdbc/ClientDataSource.java", "hunks": [ { "added": [], "header": "@@ -25,12 +25,9 @@ import java.sql.Connection;", "removed": [ "import org.apache.derby.client.am.ClientMessageId;", "import org.apache.derby.client.net.NetConnection;", "import org.apache.derby.shared.common.error.ExceptionUtil;" ] }, { "added": [ " LogWriter dncLogWriter = null;", " try {", " updateDataSourceValues(", " tokenizeAttributes(getConnectionAttributes(), null));", " dncLogWriter = super.computeDncLogWriterForNewConnection(\"_sds\");", " return getConnectionX(dncLogWriter, getUser(), getPassword());", " } catch (SqlException se) {", " // The method below may throw an exception.", " handleConnectionException(dncLogWriter, se);", " // If the exception wasn't handled so far, re-throw it.", " throw se.getSQLException();", " }" ], "header": "@@ -162,7 +159,18 @@ public class ClientDataSource extends ClientBaseDataSource implements DataSource", "removed": [ " return getConnection(getUser(), getPassword());" ] }, { "added": [ " updateDataSourceValues(", " tokenizeAttributes(getConnectionAttributes(), null));", " return getConnectionX(dncLogWriter, user, password);" ], "header": "@@ -184,11 +192,10 @@ public class ClientDataSource extends ClientBaseDataSource implements DataSource", "removed": [ " updateDataSourceValues(tokenizeAttributes(getConnectionAttributes(), null));", " return ClientDriver.getFactory().newNetConnection", " ((NetLogWriter) dncLogWriter, user,", " password, this, -1, false);" ] } ] }, { "file": "java/client/org/apache/derby/jdbc/ClientXADataSource.java", "hunks": [ { "added": [], "header": "@@ -22,11 +22,9 @@", "removed": [ "import javax.sql.DataSource;", "import org.apache.derby.client.ClientXAConnection;" ] } ] } ]
derby-DERBY-247-6558c147
DERBY-247: Update demos to support Derby Network Client driver. Committed for Lance Andersen <Lance.Andersen@sun.com> git-svn-id: https://svn.apache.org/repos/asf/incubator/derby/code/trunk@190803 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/demo/nserverdemo/NsSample.java", "hunks": [ { "added": [ " 2.\tloads the IBM DB2 JDBC Universal driver or derby client JDBC driver", " (default is the derby client JDBC driver)" ], "header": "@@ -40,7 +40,8 @@ import java.io.PrintWriter;", "removed": [ " 2.\tloads the IBM DB2 JDBC Universal driver" ] }, { "added": [ " public static final String DERBY_CLIENT_DRIVER = \"org.apache.derby.jdbc.ClientDriver\";" ], "header": "@@ -65,6 +66,7 @@ import java.io.PrintWriter;", "removed": [] }, { "added": [ " // URL for the Derby client JDBC driver.", "\tprivate static final String DERBY_CLIENT_URL= \"jdbc:derby://localhost:\"+NETWORKSERVER_PORT+\"/NSSampledb;create=true;\";", "", " // Default to using the Derby Client JDBC Driver for database connections", " String url = DERBY_CLIENT_URL;", " String jdbcDriver = DERBY_CLIENT_DRIVER;", " new nserverdemo.NsSample().startSample(args);", " }", "\tpublic void startSample(String[] args) throws Exception {", " ", " ", "", " // Determine which JDBC driver we are using with Derby", " parseArguments(args);", "", "\t\tpw.println(\"Using JDBC driver: \" + jdbcDriver);" ], "header": "@@ -80,19 +82,33 @@ public class NsSample {", "removed": [ "\t // DB2Connection provides additional functionality than java.sql.Connection", "\t // One can use either depending on the requirements" ] }, { "added": [ "\t\t// Load the JDBC Driver", "\t\t\tClass.forName(jdbcDriver).newInstance();", "\t\t\tpw.println(\"[NsSample] Unable to load the JDBC driver. Following exception was thrown\");" ], "header": "@@ -138,11 +154,11 @@ public class NsSample {", "removed": [ "\t\t// Load the JCC Driver", "\t\t\tClass.forName(DB2_JDBC_UNIVERSAL_DRIVER).newInstance();", "\t\t\tpw.println(\"[NsSample] Unable to load JCC driver. Following exception was thrown\");" ] }, { "added": [ "\t\t// Get database connection via DriverManager api", "\t\t\tconn = (Connection) DriverManager.getConnection(url, properties);" ], "header": "@@ -156,10 +172,10 @@ public class NsSample {", "removed": [ "\t\t// Get database connection using the JCC client via DriverManager api", "\t\t\tconn = (Connection) DriverManager.getConnection(CS_NS_DBURL, properties);" ] }, { "added": [ "\t\t\tclientThreads[i] = new NsSampleClientThread(i+1,url,properties,pw);" ], "header": "@@ -191,7 +207,7 @@ public class NsSample {", "removed": [ "\t\t\tclientThreads[i] = new NsSampleClientThread(i+1,CS_NS_DBURL,properties,pw);" ] } ] }, { "file": "java/demo/nserverdemo/SimpleNetworkClientSample.java", "hunks": [ { "added": [ " * 1) loads the DB2 Universal JDBC Driver or the Derby Network Client driver", " (default is the derby network client driver)" ], "header": "@@ -31,7 +31,8 @@ import java.io.InputStreamReader;", "removed": [ " * 1) loads the DB2 Universal JDBC Driver" ] }, { "added": [ "\t/**", "\t * Derby Network Client Driver class names", "\t */", "", "public static final String DERBY_CLIENT_DRIVER = \"org.apache.derby.jdbc.ClientDriver\";", "\tprivate static final String DERBY_CLIENT_DS = \"org.apache.derby.jdbc.ClientDataSource\";", "\t * This URL is for the DB2 JDBC Universal Driver", " // URL for the Derby client JDBC driver.", " private static final String DERBY_CLIENT_URL= \"jdbc:derby://localhost:\"+ NETWORKSERVER_PORT+\"/NSSampledb;create=true\";", " // Default to using the Derby Client JDBC Driver for database connections", " String url = DERBY_CLIENT_URL;", " String jdbcDriver = DERBY_CLIENT_DRIVER;", " String jdbcDataSource = DERBY_CLIENT_DS;", "", "\tpublic static void main (String[] args) throws Exception", " {", "", " new SimpleNetworkClientSample().startSample(args);", "", " }", "\tpublic void startSample (String[] args) throws Exception" ], "header": "@@ -62,17 +63,35 @@ public class SimpleNetworkClientSample", "removed": [ "\t * Also, this url describes the target database for type 4 connectivity", "\tpublic static void main (String[] args)", "\t\tthrows Exception" ] }, { "added": [ " // Determine which JDBC driver to use", " parseArguments(args);", "\t\t\t// load the appropriate JDBC Driver", "\t\t\tloadDriver();" ], "header": "@@ -82,10 +101,11 @@ public class SimpleNetworkClientSample", "removed": [ "\t\t\t// load DB2 JDBC UNIVERSAL DRIVER to enable client connections to", "\t\t\t// Derby Network Server", "\t\t\tloadJCCDriver();" ] }, { "added": [ "\tpublic Connection getClientDataSourceConn(javax.sql.DataSource ds)" ], "header": "@@ -128,7 +148,7 @@ public class SimpleNetworkClientSample", "removed": [ "\tpublic static Connection getClientDataSourceConn(javax.sql.DataSource ds)" ] }, { "added": [ "\tpublic javax.sql.DataSource getClientDataSource(String database, String user, String", "\t\tClass nsDataSource = Class.forName(jdbcDataSource);" ], "header": "@@ -147,10 +167,10 @@ public class SimpleNetworkClientSample", "removed": [ "\tpublic static javax.sql.DataSource getClientDataSource(String database, String user, String", "\t\tClass nsDataSource = Class.forName(DB2_JCC_DS);" ] }, { "added": [ " // The following is only applicable to the DB2 JDBC driver", " if(jdbcDataSource.equals( DB2_JCC_DS))", " {", "\t\t\t// driver type must be 4 to access Derby Network Server", "\t\t\tMethod drivertype = nsDataSource.getMethod(\"setDriverType\", methodParams);", "\t\t\targs = new Object[] {new Integer(4)};", "\t\t\tdrivertype.invoke(ds, args);", " }" ], "header": "@@ -180,10 +200,14 @@ public class SimpleNetworkClientSample", "removed": [ "\t\t// driver type must be 4 to access Derby Network Server", "\t\tMethod drivertype = nsDataSource.getMethod(\"setDriverType\", methodParams);", "\t\targs = new Object[] {new Integer(4)};", "\t\tdrivertype.invoke(ds, args);" ] }, { "added": [ "\t * Load the appropriate JDBC driver", "\tpublic void loadDriver()", "\t\t// Load the Driver", "\t\tClass.forName(jdbcDriver).newInstance();", "\t * @pre The JDBC driver must have been loaded before calling this method", "\tpublic Connection getClientDriverManagerConnection()" ], "header": "@@ -191,21 +215,21 @@ public class SimpleNetworkClientSample", "removed": [ "\t * Load DB2 JDBC UNIVERSAL DRIVER", "\tpublic static void loadJCCDriver()", "\t\t// Load the JCC Driver", "\t\tClass.forName(DB2_JDBC_UNIVERSAL_DRIVER).newInstance();", "\t * @pre DB2 JDBC Universal driver must have been loaded before calling this method", "\tpublic static Connection getClientDriverManagerConnection()" ] }, { "added": [ "\t\t// Get database connection via DriverManager api", "\t\tConnection conn = DriverManager.getConnection(url,properties); " ], "header": "@@ -217,8 +241,8 @@ public class SimpleNetworkClientSample", "removed": [ "\t\t// Get database connection using the JCC client via DriverManager api", "\t\tConnection conn = DriverManager.getConnection(CS_NS_DBURL,properties); " ] }, { "added": [ "\tpublic void test(Connection conn)" ], "header": "@@ -229,7 +253,7 @@ public class SimpleNetworkClientSample", "removed": [ "\tpublic static void test(Connection conn)" ] } ] }, { "file": "java/demo/simple/SimpleApp.java", "hunks": [ { "added": [ " * * derbyclient (will use the Net client driver to access Network Server)" ], "header": "@@ -44,6 +44,7 @@ import java.util.Properties;", "removed": [] }, { "added": [ " if (args[index].equalsIgnoreCase(\"derbyclient\"))", " {", " framework = \"derbyclient\";", " driver = \"org.apache.derby.jdbc.ClientDriver\";", " protocol = \"jdbc:derby://localhost:1527/\";", " }" ], "header": "@@ -239,6 +240,12 @@ public class SimpleApp", "removed": [] } ] } ]
derby-DERBY-2472-10951832
DERBY-2472 (partial) Use Throwable.initCause() to improve error reporting Let EmbedSQLException use initCause/getCause instead of getJavaException. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@537592 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedSQLException.java", "hunks": [ { "added": [], "header": "@@ -42,14 +42,6 @@ public class EmbedSQLException extends SQLException {", "removed": [ "\t/**", "\t\tJava exception that caused this exception, can be null.", "\t*/", " //Because it's transient, it doesn't get sent over to the client", " //side and hence the classes which needs to be included in the", " //client.jar file decreases 5 folds.", "\tprivate transient Throwable javaException;", "" ] } ] } ]
derby-DERBY-2472-1870e8fa
DERBY-2472 (partial) Use Throwable.initCause() to improve error reporting Fix some unsafe calls to initCause() by catching IllegalStateException. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@542446 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/conn/GenericLanguageConnectionContext.java", "hunks": [ { "added": [ "\t\t\t\tif (topLevelStandardException == null) {", "\t\t\t\t\t// always keep the first exception unchanged", "\t\t\t\t\ttopLevelStandardException = e;", "\t\t\t\t} else {", "\t\t\t\t\ttry {", "\t\t\t\t\t\t// Try to create a chain of exceptions. If successful,", "\t\t\t\t\t\t// the current exception is the top-level exception,", "\t\t\t\t\t\t// and the previous exception the cause of it.", "\t\t\t\t\t\te.initCause(topLevelStandardException);", "\t\t\t\t\t\ttopLevelStandardException = e;", "\t\t\t\t\t} catch (IllegalStateException ise) {", "\t\t\t\t\t\t// initCause() has already been called on e. We don't", "\t\t\t\t\t\t// expect this to happen, but if it happens, just skip", "\t\t\t\t\t\t// the current exception from the chain. This is safe", "\t\t\t\t\t\t// since we always keep the first exception.", "\t\t\t\t\t}", "\t\t\t\t}" ], "header": "@@ -613,8 +613,23 @@ public class GenericLanguageConnectionContext", "removed": [ "\t\t\t\te.initCause(topLevelStandardException);", "\t\t\t\ttopLevelStandardException = e;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/depend/BasicDependencyManager.java", "hunks": [ { "added": [ "\t\t\t\t\tif (noInvalidate == null) {", "\t\t\t\t\t\tnoInvalidate = sqle;", "\t\t\t\t\t} else {", "\t\t\t\t\t\ttry {", "\t\t\t\t\t\t\tsqle.initCause(noInvalidate);", "\t\t\t\t\t\t\tnoInvalidate = sqle;", "\t\t\t\t\t\t} catch (IllegalStateException ise) {", "\t\t\t\t\t\t\t// We weren't able to chain the exceptions. That's", "\t\t\t\t\t\t\t// OK, since we always have the first exception we", "\t\t\t\t\t\t\t// caught. Just skip the current exception.", "\t\t\t\t\t\t}", "\t\t\t\t\t}" ], "header": "@@ -382,10 +382,18 @@ public class BasicDependencyManager implements DependencyManager {", "removed": [ "\t\t\t\t\tif (noInvalidate != null)", "\t\t\t\t\t\tsqle.initCause(noInvalidate);", "", "\t\t\t\t\tnoInvalidate = sqle;" ] } ] } ]
derby-DERBY-2472-40888728
DERBY-2472 (partial) Use Throwable.initCause() to improve error reporting Chain exceptions from EmbedConnection.createDatabase() and EmbedConnection.bootDatabase() properly. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@537753 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedConnection.java", "hunks": [ { "added": [ " throw Util.seeNextException(SQLState.CREATE_DATABASE_FAILED,", " new Object[] { dbname },", " handleException(mse));" ], "header": "@@ -1760,10 +1760,9 @@ public abstract class EmbedConnection implements EngineConnection", "removed": [ "", "\t\t\tSQLException se = newSQLException(SQLState.CREATE_DATABASE_FAILED, dbname);", "\t\t\tse.setNextException(handleException(mse));", "\t\t\tthrow se;" ] }, { "added": [], "header": "@@ -1810,7 +1809,6 @@ public abstract class EmbedConnection implements EngineConnection", "removed": [ "\t\t\tSQLException se = newSQLException(SQLState.BOOT_DATABASE_FAILED, dbname);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/Util.java", "hunks": [ { "added": [ " /**", " * Generate an <code>SQLException</code> which points to another", " * <code>SQLException</code> nested within it with", " * <code>setNextException()</code>.", " *", " * @param messageId message id", " * @param args the arguments to the message creation", " * @param next the next SQLException", " * @return an SQLException wrapping another SQLException", " */", " static SQLException seeNextException(String messageId, Object[] args,", " SQLException next) {", " return newEmbedSQLException(messageId, args, next,", " StandardException.getSeverityFromIdentifier(messageId), null);", " }", "" ], "header": "@@ -209,6 +209,22 @@ public abstract class Util {", "removed": [] } ] } ]
derby-DERBY-2472-de3b1087
DERBY-1440: jdk 1.6 client driver omits SQLStates and chained exceptions in error messages While working on DERBY-2472 I found out what caused this difference between the JDBC 3.0 driver and the JDBC 4.0 driver. There are three problems. Firstly, StandardException.unexpectedUserException() doesn't recognize that an SQLException is generated Derby since it is not an EmbedSQLException. Secondly, TransactionResourceImpl.wrapInSQLException() invokes SQLException.setNextException() explicitly so that the required chaining/ferrying logic in the exception factory and in EmbedSQLException's constructor is not used. Thirdly, SQLException40.wrapArgsForTransportAcrossDRDA() puts a standard SQLException, not an EmbedSQLException, in the argument ferry's next-exception chain, which prevents the network server from seeing it as a Derby exception. The attached patch fixes the problem by 1) using SQLExceptionFactory.getArgumentFerry() to find out whether the exception is a Derby exception in unexpectedUserException() 2) using factory methods instead of setNextException() to do the chaining in wrapInSQLException() 3) improving Util.javaException() so that it sets up a next-exception chain if the Java exception contains nested exceptions 4) changing wrapArgsForTransportAcrossDRDA() to create an argument ferry whose next exception is the argument ferry of the main exception's next exception This patch also fixes all the JUnit tests that contain code looking like this: assertStatementError(JDBC.vmSupportsJDBC4() ? "38000" : "42X62", cSt); Now, the check for JDBC level is not needed anymore for those tests. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@541435 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/error/StandardException.java", "hunks": [ { "added": [ "import org.apache.derby.impl.jdbc.Util;" ], "header": "@@ -24,6 +24,7 @@ package org.apache.derby.iapi.error;", "removed": [] }, { "added": [ " // If the exception is an SQLException generated by Derby, it has an", " // argument ferry which is an EmbedSQLException. Use this to check", " // whether the exception was generated by Derby.", " EmbedSQLException ferry = null;", " if (t instanceof SQLException) {", " SQLException sqle =", " Util.getExceptionFactory().getArgumentFerry((SQLException) t);", " if (sqle instanceof EmbedSQLException) {", " ferry = (EmbedSQLException) sqle;", " }", " }", "" ], "header": "@@ -436,6 +437,18 @@ public class StandardException extends Exception", "removed": [] }, { "added": [ "\t\tif ((t instanceof SQLException) && (ferry == null))" ], "header": "@@ -443,8 +456,7 @@ public class StandardException extends Exception", "removed": [ "\t\tif ((t instanceof SQLException) &&", "\t\t !(t instanceof EmbedSQLException)) " ] }, { "added": [ "\t\tif (ferry != null) {", "\t\t\tif (ferry.isSimpleWrapper()) {", "\t\t\t\tThrowable wrapped = ferry.getCause();" ], "header": "@@ -463,10 +475,9 @@ public class StandardException extends Exception", "removed": [ "\t\tif (t instanceof EmbedSQLException) {", "\t\t\tEmbedSQLException csqle = (EmbedSQLException) t;", "\t\t\tif (csqle.isSimpleWrapper()) {", "\t\t\t\tThrowable wrapped = csqle.getCause();" ] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/TransactionResourceImpl.java", "hunks": [ { "added": [ "\t\t\treturn wrapInSQLException(thrownException);" ], "header": "@@ -342,7 +342,7 @@ public final class TransactionResourceImpl", "removed": [ "\t\t\treturn wrapInSQLException((SQLException) null, thrownException);" ] }, { "added": [ "\t\t\tthrow wrapInSQLException(t);", "\tpublic static SQLException wrapInSQLException(Throwable thrownException) {", "\t\t\treturn null;" ], "header": "@@ -355,16 +355,16 @@ public final class TransactionResourceImpl", "removed": [ "\t\t\tthrow wrapInSQLException((SQLException) null, t);", "\tpublic static final SQLException wrapInSQLException(SQLException sqlException, Throwable thrownException) {", "\t\t\treturn sqlException;" ] } ] } ]
derby-DERBY-2472-e70c716c
DERBY-2472 (partial) Use Throwable.initCause() to improve error reporting Replace StandardException's implementation of exception chaining with initCause/getCause. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@537412 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/error/StandardException.java", "hunks": [ { "added": [], "header": "@@ -57,7 +57,6 @@ public class StandardException extends Exception", "removed": [ "\tprivate Throwable nestedException;" ] }, { "added": [ "\t\tif (t != null) {", "\t\t\tinitCause(t);", "\t\t}" ], "header": "@@ -85,8 +84,10 @@ public class StandardException extends Exception", "removed": [ "\t\tthis.nestedException = t;" ] }, { "added": [], "header": "@@ -128,23 +129,6 @@ public class StandardException extends Exception", "removed": [ "\t/**", "\t * Sets the nested exception for this exception.", "\t */", "\tpublic final void setNestedException(Throwable nestedException)", "\t{", "\t\tthis.nestedException = nestedException;", "\t}", "", "\t/**", "\t * Returns the nested exception for this exception,", "\t * if there is one.", "\t */", "\tpublic final Throwable getNestedException()", "\t{", "\t\treturn nestedException;", "\t}", "" ] }, { "added": [ " se.initCause(t);" ], "header": "@@ -446,7 +430,7 @@ public class StandardException extends Exception", "removed": [ " se.nestedException = t;" ] }, { "added": [ "\t\t\t\t\tse.initCause(sqlex.getNextException());" ], "header": "@@ -472,7 +456,7 @@ public class StandardException extends Exception", "removed": [ "\t\t\t\t\tse.setNestedException(sqlex.getNextException());" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/services/context/ErrorStringBuilder.java", "hunks": [ { "added": [], "header": "@@ -21,7 +21,6 @@", "removed": [ "import org.apache.derby.iapi.error.StandardException;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/services/monitor/BaseMonitor.java", "hunks": [ { "added": [ "\t\t\tStandardException se;", "\t\t\t\tse = (StandardException) t;", "\t\t\t\tse = Monitor.exceptionStartingModule(t);", "\t\t\t\tcm.cleanupOnError(se);" ], "header": "@@ -1840,14 +1840,15 @@ nextModule:", "removed": [ "\t\t\t\t;", "\t\t\t\tt = Monitor.exceptionStartingModule(t);", "\t\t\t\tcm.cleanupOnError(t);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/conn/GenericLanguageConnectionContext.java", "hunks": [ { "added": [ "\t\t\t\te.initCause(topLevelStandardException);" ], "header": "@@ -613,7 +613,7 @@ public class GenericLanguageConnectionContext", "removed": [ "\t\t\t\te.setNestedException(topLevelStandardException);" ] } ] }, { "file": "java/testing/org/apache/derbyTesting/unitTests/harness/T_Generic.java", "hunks": [ { "added": [ "\t\t\tFAIL(t.toString());", "\t\t\tt.printStackTrace(out.getPrintWriter());" ], "header": "@@ -120,16 +120,8 @@ public abstract class T_Generic implements UnitTest, ModuleControl", "removed": [ "\t\t\t", "\t\t\twhile (t != null) {", "\t\t\t\tFAIL(t.toString());", "\t\t\t\tt.printStackTrace(out.getPrintWriter());", "\t\t\t\tif (t instanceof StandardException) {", "\t\t\t\t\tt = ((StandardException) t).getNestedException();", "\t\t\t\t\tcontinue;", "\t\t\t\t}", "\t\t\t\tbreak;", "\t\t\t}" ] } ] } ]
derby-DERBY-2485-f06ca0f0
DERBY-2485 Example code for a simple transaction listener scheme. Not yet used or compiled by the build.xml files. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@522656 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/transaction/TransactionListener.java", "hunks": [ { "added": [ "/*", "", " Derby - Class org.apache.derby.iapi.transaction.TransactionListener", "", " Licensed to the Apache Software Foundation (ASF) under one or more", " contributor license agreements. See the NOTICE file distributed with", " this work for additional information regarding copyright ownership.", " The ASF licenses this file to You under the Apache License, Version 2.0", " (the \"License\"); you may not use this file except in compliance with", " the License. You may obtain a copy of the License at", "", " http://www.apache.org/licenses/LICENSE-2.0", "", " Unless required by applicable law or agreed to in writing, software", " distributed under the License is distributed on an \"AS IS\" BASIS,", " WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", " See the License for the specific language governing permissions and", " limitations under the License.", "", " */", "package org.apache.derby.iapi.transaction;", "", "import org.apache.derby.iapi.error.StandardException;", "", "/**", " * An interface that must be implemented by a object that", " * wants to be notified when a significant transaction event occurs.", " */", "public interface TransactionListener {", " ", " /**", " * Notifies registered listener that the transaction", " * is about to commit. Called before the commit is", " * recorded and flushed to the transaction log device.", " * ", " * @return true to remove this listener once this", " * method returns.", " * ", " * @throws StandardException If thrown the commit attempt", " * will be stopped and instead the transaction will be rolled back.", " */", " boolean preCommit() throws StandardException;", " ", " /**", " * Notifies registered listener that the transaction", " * is about to rollback. Called before any physical rollback.", " * The listener will be removed from the current transaction", " * once the method returns.", " * ", " * @throws StandardException If thrown the rollback attempt", " * will be stopped and instead the database will be shut down.", " * ", " * TODO: Define behaviour on exception during rollback.", " */", " void preRollback() throws StandardException;", "", " // to support statement/savepoint rollback.", " // void preSavepointRollback() throws StandardException;", "", "}" ], "header": "@@ -0,0 +1,60 @@", "removed": [] } ] } ]
derby-DERBY-2486-84178905
DERBY-2486 Rename SortController.close to completeInserts which better matches its current use and future use once SortController is used to completely control a sort. Also some minor protection changes of methods in the sort implementation. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@522280 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/store/access/SortController.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.types.DataValueDescriptor;" ], "header": "@@ -21,11 +21,8 @@", "removed": [ "import org.apache.derby.iapi.types.CloneableObject;", "", "import org.apache.derby.iapi.types.DataValueDescriptor;", "" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/AlterTableConstantAction.java", "hunks": [ { "added": [ "\t\t\tsorters[index].completedInserts();" ], "header": "@@ -1531,7 +1531,7 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction", "removed": [ "\t\t\tsorters[index].close();" ] }, { "added": [ "\tprivate void\tcleanUp() throws StandardException" ], "header": "@@ -1987,11 +1987,10 @@ class AlterTableConstantAction extends DDLSingleTableConstantAction", "removed": [ "\t * @see ResultSet#cleanUp", "\tpublic void\tcleanUp() throws StandardException" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/GroupedAggregateResultSet.java", "hunks": [ { "added": [ "\t\t\tsorter.completedInserts();" ], "header": "@@ -270,7 +270,7 @@ class GroupedAggregateResultSet extends GenericAggregateResultSet", "removed": [ "\t\t\tsorter.close();" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/InsertResultSet.java", "hunks": [ { "added": [ "\t\t\tsorters[index].completedInserts();" ], "header": "@@ -1805,7 +1805,7 @@ class InsertResultSet extends DMLWriteResultSet implements TargetResultSet", "removed": [ "\t\t\tsorters[index].close();" ] }, { "added": [ "\t\t\t\t\tsorters[index].completedInserts();" ], "header": "@@ -1923,7 +1923,7 @@ class InsertResultSet extends DMLWriteResultSet implements TargetResultSet", "removed": [ "\t\t\t\t\tsorters[index].close();" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/access/sort/MergeInserter.java", "hunks": [ { "added": [ "final class MergeInserter implements SortController", "\tprivate MergeSort sort;", "\tprivate TransactionManager tran;", "\tprivate Vector mergeRuns;", "\tprivate SortBuffer sortBuffer;" ], "header": "@@ -35,28 +35,28 @@ import org.apache.derby.iapi.types.DataValueDescriptor;", "removed": [ "public final class MergeInserter implements SortController", "\tprotected MergeSort sort = null;", "\tprotected TransactionManager tran;", "\tVector mergeRuns = null;", "\tSortBuffer sortBuffer = null;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/access/sort/MergeSort.java", "hunks": [ { "added": [ "final class MergeSort implements Sort" ], "header": "@@ -58,7 +58,7 @@ import java.util.Vector;", "removed": [ "public final class MergeSort implements Sort" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/access/sort/SortBuffer.java", "hunks": [ { "added": [ "\tvoid setNextAux(int aux)" ], "header": "@@ -112,7 +112,7 @@ class SortBuffer", "removed": [ "\tpublic void setNextAux(int aux)" ] }, { "added": [ "\tint getLastAux()" ], "header": "@@ -121,7 +121,7 @@ class SortBuffer", "removed": [ "\tpublic int getLastAux()" ] }, { "added": [ "\tSortBuffer(MergeSort sort)" ], "header": "@@ -130,7 +130,7 @@ class SortBuffer", "removed": [ "\tpublic SortBuffer(MergeSort sort)" ] }, { "added": [ "\tboolean init()" ], "header": "@@ -139,7 +139,7 @@ class SortBuffer", "removed": [ "\tpublic boolean init()" ] }, { "added": [ "\tvoid reset()", "\tvoid close()" ], "header": "@@ -159,14 +159,14 @@ class SortBuffer", "removed": [ "\tpublic void reset()", "\tpublic void close()" ] }, { "added": [ "\tvoid grow(int percent)" ], "header": "@@ -178,7 +178,7 @@ class SortBuffer", "removed": [ "\tpublic void grow(int percent)" ] }, { "added": [ "\tint capacity()" ], "header": "@@ -190,7 +190,7 @@ class SortBuffer", "removed": [ "\tpublic int capacity()" ] }, { "added": [ "\tint insert(DataValueDescriptor[] k)" ], "header": "@@ -204,7 +204,7 @@ class SortBuffer", "removed": [ "\tpublic int insert(DataValueDescriptor[] k)" ] }, { "added": [ "\tDataValueDescriptor[] removeFirst()" ], "header": "@@ -415,7 +415,7 @@ class SortBuffer", "removed": [ "\tpublic DataValueDescriptor[] removeFirst()" ] } ] } ]
derby-DERBY-2487-48a98847
DERBY-2487: Enhance Derby with XPLAIN functionality. This feature was contributed by Felix Beyer (beyer dot felix at gmx dot net). This feature introduces an alternate handling of runtime statistics information. Derby can now be configured so that it will capture the statistics in a machine-readable form and will store them into a set of simply-structured tables in a schema which is specified by the user. We call this behavior "XPLAIN style", and we call the tables which are used the "XPLAIN tables". Having captured statistics about statement execution, you can then analyze the statement behavior by querying these tables. For example, you can determine how much time was taken, what resources were used, what query plan was chosen, and so on. This is the initial commit of this new functionality. Subsequent changes will provide documentation, additional tests, fully-tested upgrade support. As the community gains more experience with this feature, we will find more enhancements to make, but the initial commit provides a working feature with an initial set of basic regression tests. Many thanks to the community for reviewing the changes through several iterations of development. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@768597 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/catalog/SystemProcedures.java", "hunks": [ { "added": [ "import java.sql.Statement;" ], "header": "@@ -26,6 +26,7 @@ import java.security.PrivilegedAction;", "removed": [] }, { "added": [ "import org.apache.derby.impl.sql.catalog.XPLAINTableDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINScanPropsDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINSortPropsDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINStatementDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINStatementTimingsDescriptor;", "import org.apache.derby.iapi.sql.dictionary.CatalogRowFactory;", "import org.apache.derby.iapi.sql.dictionary.SystemColumn;", "import org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator;" ], "header": "@@ -50,10 +51,20 @@ import org.apache.derby.impl.jdbc.EmbedDatabaseMetaData;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/iapi/sql/execute/ExecutionFactory.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.execute.xplain.XPLAINFactoryIF;" ], "header": "@@ -32,6 +32,7 @@ import org.apache.derby.iapi.sql.ResultDescription;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/iapi/sql/execute/RunTimeStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;", "" ], "header": "@@ -26,6 +26,8 @@ import java.io.Serializable;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/catalog/DD_Version.java", "hunks": [ { "added": [ "\t\tcase DataDictionary.DD_VERSION_DERBY_10_6:", "\t\t\treturn \"10.6\";" ], "header": "@@ -154,6 +154,8 @@ public\tclass DD_Version implements\tFormatable", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/catalog/DataDictionaryImpl.java", "hunks": [ { "added": [ " // add 10.6 specific system procedures", " create_10_6_system_procedures(tc, newlyCreatedRoutines );" ], "header": "@@ -10190,6 +10190,8 @@ public final class\tDataDictionaryImpl", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/conn/GenericLanguageConnectionContext.java", "hunks": [ { "added": [ "\t/**", "\t * If xplainOnlyMode is set (via SYSCS_SET_XPLAIN_MODE), then the", "\t * connection does not actually execute statements, but only", "\t * compiles them, and emits the query plan information into the", "\t * XPLAIN tables.", "\t */", "\tprivate boolean xplainOnlyMode = false;", " ", "\t/** the current xplain schema. Is usually NULL. Can be set via", "\t * SYSCS_SET_XPLAIN_SCHEMA, in which case it species the schema into", "\t * which XPLAIN information should be stored in user tables.", "\t */", "\tprivate String xplain_schema = null;", "\t/**", "\t * For each XPLAIN table, this map stores a SQL INSERT statement which", "\t * can be prepared and used to insert rows into the table during the", "\t * capturing of statistics data into the user XPLAIN tables.", "\t */", "\tprivate Map xplain_statements = new HashMap();", "\t" ], "header": "@@ -117,6 +117,26 @@ public class GenericLanguageConnectionContext", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/BasicNoPutResultSetImpl.java", "hunks": [ { "added": [ "\t\t\tLanguageConnectionContext lcc = getLanguageConnectionContext();", "\t\t\tif(lcc.getRunTimeStatisticsMode() && lcc.getXplainOnlyMode()) {", "\t\t\t\t// do nothing", "\t\t\t} else {", "\t\t\t\topenCore();", "\t\t\t}" ], "header": "@@ -241,8 +241,12 @@ implements NoPutResultSet", "removed": [ "", "\t\t\topenCore();" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/GenericExecutionFactory.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.execute.xplain.XPLAINFactoryIF;" ], "header": "@@ -40,6 +40,7 @@ import org.apache.derby.iapi.sql.execute.ExecutionContext;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/NoPutResultSetImpl.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.execute.ExecutionFactory;", "import org.apache.derby.iapi.sql.execute.ResultSetStatisticsFactory;", "import org.apache.derby.iapi.sql.execute.RunTimeStatistics;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -33,6 +33,10 @@ import org.apache.derby.iapi.sql.ResultDescription;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/NoRowsResultSetImpl.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.execute.RunTimeStatistics;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -43,6 +43,8 @@ import org.apache.derby.iapi.sql.dictionary.TableDescriptor;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealAnyResultSetStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -27,6 +27,11 @@ import org.apache.derby.iapi.services.i18n.MessageService;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealBasicNoPutResultSetStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "" ], "header": "@@ -21,6 +21,11 @@", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealCurrentOfStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;" ], "header": "@@ -24,6 +24,8 @@ package org.apache.derby.impl.sql.execute.rts;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealDeleteCascadeResultSetStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;" ], "header": "@@ -23,6 +23,8 @@ package org.apache.derby.impl.sql.execute.rts;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealDeleteResultSetStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;", "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;" ], "header": "@@ -22,6 +22,11 @@", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealDeleteVTIResultSetStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;" ], "header": "@@ -22,6 +22,11 @@", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealDistinctScalarAggregateStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -22,6 +22,8 @@", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealDistinctScanStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -24,6 +24,9 @@ package org.apache.derby.impl.sql.execute.rts;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealGroupedAggregateStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINSortPropsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -21,7 +21,14 @@", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealHashJoinStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;" ], "header": "@@ -23,6 +23,7 @@ package org.apache.derby.impl.sql.execute.rts;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealHashLeftOuterJoinStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "" ], "header": "@@ -21,6 +21,11 @@", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealHashScanStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINScanPropsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -29,6 +29,12 @@ import org.apache.derby.iapi.reference.SQLState;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealHashTableStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINScanPropsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -28,6 +28,10 @@ import org.apache.derby.iapi.reference.SQLState;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealIndexRowToBaseRowStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;", "" ], "header": "@@ -33,6 +33,9 @@ import java.io.ObjectOutput;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealInsertResultSetStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -27,6 +27,11 @@ import org.apache.derby.iapi.services.i18n.MessageService;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealInsertVTIResultSetStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -27,6 +27,11 @@ import org.apache.derby.iapi.services.i18n.MessageService;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealJoinResultSetStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "" ], "header": "@@ -21,6 +21,11 @@", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealLastIndexKeyScanStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINScanPropsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -28,6 +28,12 @@ import org.apache.derby.iapi.reference.SQLState;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealMaterializedResultSetStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "", "" ], "header": "@@ -21,6 +21,11 @@", "removed": [] }, { "added": [ "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;", "" ], "header": "@@ -32,6 +37,8 @@ import java.io.ObjectOutput;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealNestedLoopJoinStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;" ], "header": "@@ -27,6 +27,8 @@ import org.apache.derby.iapi.services.i18n.MessageService;", "removed": [] }, { "added": [ "\tpublic String resultSetName;" ], "header": "@@ -50,7 +52,7 @@ public class RealNestedLoopJoinStatistics", "removed": [ "\tprotected String resultSetName;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealNestedLoopLeftOuterJoinStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "" ], "header": "@@ -21,6 +21,11 @@", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealNoRowsResultSetStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "" ], "header": "@@ -21,6 +21,11 @@", "removed": [] }, { "added": [ "\tpublic long executeTime;" ], "header": "@@ -48,7 +53,7 @@ abstract class RealNoRowsResultSetStatistics", "removed": [ "\tprotected long executeTime;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealNormalizeResultSetStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -27,6 +27,8 @@ import org.apache.derby.iapi.services.i18n.MessageService;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealOnceResultSetStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -27,6 +27,8 @@ import org.apache.derby.iapi.services.i18n.MessageService;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealProjectRestrictStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -21,12 +21,17 @@", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealRowCountStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -23,6 +23,8 @@ package org.apache.derby.impl.sql.execute.rts;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealRowResultSetStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;", "" ], "header": "@@ -32,6 +32,12 @@ import java.io.ObjectOutput;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealScalarAggregateStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -27,6 +27,11 @@ import org.apache.derby.iapi.services.i18n.MessageService;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealScrollInsensitiveResultSetStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -27,6 +27,8 @@ import org.apache.derby.iapi.services.i18n.MessageService;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealSetOpResultSetStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -22,6 +22,11 @@", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealSortStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINSortPropsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "" ], "header": "@@ -21,6 +21,12 @@", "removed": [] }, { "added": [ "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -28,6 +34,7 @@ import org.apache.derby.iapi.reference.SQLState;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealTableScanStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINScanPropsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -29,6 +29,12 @@ import org.apache.derby.iapi.reference.SQLState;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealUnionResultSetStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -31,6 +31,11 @@ import org.apache.derby.iapi.services.io.FormatableHashtable;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealUpdateResultSetStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.UUID;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetDescriptor;", "import org.apache.derby.impl.sql.catalog.XPLAINResultSetTimingsDescriptor;", "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -27,6 +27,11 @@ import org.apache.derby.iapi.services.i18n.MessageService;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealVTIStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -27,6 +27,8 @@ import org.apache.derby.iapi.services.i18n.MessageService;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RealWindowResultSetStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.impl.sql.execute.xplain.XPLAINUtil;", "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;" ], "header": "@@ -22,6 +22,8 @@", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/ResultSetStatistics.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.execute.xplain.XPLAINable;", "" ], "header": "@@ -21,6 +21,8 @@", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/rts/RunTimeStatisticsImpl.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.execute.xplain.XPLAINVisitor;", "" ], "header": "@@ -30,6 +30,8 @@ import org.apache.derby.iapi.reference.SQLState;", "removed": [] } ] } ]
derby-DERBY-2487-d54fabde
DERBY-2487: Enhance Derby with EXPLAIN Functionality Updated list of expected functions in TestDbMetaData to contain the XPLAIN functions. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@768671 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-2488-8fcae923
DERBY-2488: Patch to ensure that JDBC 4 Pooled and XA connections are returned from JDBC 3 DataSources if the app is running with Java SE 6. In particular: 1. Adds two methods, "getNewPooledConnection()" and "getNewXAConnection()", to the Driver30 class and makes those methods return JDBC 3 objects. 2. Overides the two methods from #1 in Driver40 to return JDBC 4 objects. 3. Changes the "createPooledConnection()" method and "createXAConnection()" methods in EmbeddedConnectionPoolDataSource and EmbeddedXADataSource (respectively) to call the new methods on the DriverXX object returned from the existing "findDriver()" method. 4. Removes the now unused "createPooledConnection()" method from EmbedConnectionionPoolDataSource40, and removes "createXAConnection()" from EmbedXADataSource40. 5. Changes ClientXADataSource, which had a problem similar to the Embedded data sources, to match the behavior of ClientConnectionPoolDataSource so that client now correctly returns JDBC 4 objects for Java SE 6 apps. 6. Includes a new test, jdbc4/JDBC4FromJDBC3DataSourceTest, which verifies that JDBC 4 connections will be returned from JDBC 3 data sources if the JDK in use is JDK 1.6. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@531129 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/ClientJDBCObjectFactory.java", "hunks": [ { "added": [ "import org.apache.derby.client.ClientXAConnection;" ], "header": "@@ -22,6 +22,7 @@", "removed": [] } ] }, { "file": "java/client/org/apache/derby/client/net/ClientJDBCObjectFactoryImpl.java", "hunks": [ { "added": [ "import org.apache.derby.client.ClientXAConnection;" ], "header": "@@ -25,6 +25,7 @@ import java.rmi.UnexpectedException;", "removed": [] }, { "added": [ "import org.apache.derby.client.net.NetLogWriter;", "import org.apache.derby.jdbc.ClientXADataSource;" ], "header": "@@ -37,7 +38,9 @@ import org.apache.derby.client.am.Section;", "removed": [] } ] }, { "file": "java/client/org/apache/derby/client/net/ClientJDBCObjectFactoryImpl40.java", "hunks": [ { "added": [ "import org.apache.derby.client.ClientXAConnection;", "import org.apache.derby.client.ClientXAConnection40;" ], "header": "@@ -23,6 +23,8 @@ package org.apache.derby.client.net;", "removed": [] }, { "added": [ "import org.apache.derby.client.net.NetLogWriter;", "import org.apache.derby.jdbc.ClientXADataSource;", "", "import java.sql.SQLException;" ], "header": "@@ -42,9 +44,12 @@ import org.apache.derby.client.am.Statement;", "removed": [ "import java.sql.SQLException;" ] } ] }, { "file": "java/client/org/apache/derby/jdbc/ClientXADataSource.java", "hunks": [ { "added": [ "import org.apache.derby.client.am.LogWriter;" ], "header": "@@ -28,6 +28,7 @@ import javax.sql.XADataSource;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/jdbc/Driver30.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.jdbc.ResourceAdapter;", "", "/** -- jdbc 2.0. extension -- */", "import javax.sql.PooledConnection;", "import javax.sql.XAConnection;", "" ], "header": "@@ -30,10 +30,16 @@ import org.apache.derby.iapi.error.StandardException;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/jdbc/Driver40.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.jdbc.ResourceAdapter;" ], "header": "@@ -39,6 +39,7 @@ import org.apache.derby.impl.jdbc.EmbedDatabaseMetaData40;", "removed": [] }, { "added": [ "/** -- jdbc 2.0. extension -- */", "import javax.sql.PooledConnection;", "import javax.sql.XAConnection;" ], "header": "@@ -47,6 +48,9 @@ import java.sql.PreparedStatement;", "removed": [] } ] }, { "file": "java/testing/org/apache/derbyTesting/junit/JDBCDataSource.java", "hunks": [ { "added": [ " /**", " * Return a ConnectionPoolDataSource corresponding to the current", " * configuration. This method returns a generic Object (as opposed", " * to a ConnectionPoolDataSource) because this class has to work", " * with JSR169 JVMs, as well, and those JVMs do not include the", " * javax.sql.ConnectionPoolDataSource class.", " */", " public static Object getConnectionPoolDataSource()", " {", " TestConfiguration config = TestConfiguration.getCurrent();", " return getDataSource(config, (HashMap) null,", "\t\t\tconfig.getJDBCClient().getConnectionPoolDataSourceClassName());", " }", " ", " /*", " * Return an XADataSource corresponding to the current configuration.", " * This method returns a generic Object (as opposed to an XADataSource)", " * because this class has to work with JSR169 JVMs, as well, and those", " * JVMs do not include the javax.sql.XADataSource class.", " */", " public static Object getXADataSource()", " {", " TestConfiguration config = TestConfiguration.getCurrent();", " return getDataSource(config, (HashMap) null,", " config.getJDBCClient().getXADataSourceClassName());", " }", " " ], "header": "@@ -43,6 +43,33 @@ public class JDBCDataSource {", "removed": [] } ] }, { "file": "java/testing/org/apache/derbyTesting/junit/ServerSetup.java", "hunks": [ { "added": [ " private JDBCClient client;" ], "header": "@@ -32,6 +32,7 @@ public final class ServerSetup extends ChangeConfigurationSetup {", "removed": [] } ] }, { "file": "java/testing/org/apache/derbyTesting/junit/TestConfiguration.java", "hunks": [ { "added": [ "", " /**", " * Return a Test suite that contains all the test fixtures", " * for the passed in class running in embedded and client-", " * server *JDBC3* configurations.", " * <BR>", " * Each set of embedded and set of client server tests is", " * decorated with a CleanDatabaseTestSetup.", " * <BR>", " */", " public static Test forceJDBC3Suite(Class testClass)", " {", " final TestSuite suite = new TestSuite(suiteName(testClass));", "", " suite.addTest(", " new CleanDatabaseTestSetup(", " forceJDBC3Embedded(embeddedSuite(testClass))));", "", " suite.addTest(", " new CleanDatabaseTestSetup(", " forceJDBC3NetClient(clientServerSuite(testClass))));", "", " return (suite);", " }", "" ], "header": "@@ -227,7 +227,31 @@ public class TestConfiguration {", "removed": [ " " ] }, { "added": [ " /**", " * Returns a decorator that forces the JDBC 3 network client in", " * a Java SE 6/JDBC 4 environment. The only difference is that", " * the DataSource class names will be the \"old\" JDBC 3 versions", " * and not the JDBC 4 specific ones.", " *", " * Assumption is that the received Test is an instance of ServerSetup,", " * which is the decorator for client server tests. If that is not", " * the case then this method is a no-op.", " *", " * @param test Test around which to wrap the JDBC 3 network client", " * configuration.", " */", " public static Test forceJDBC3NetClient(Test test)", " {", " if (JDBC.vmSupportsJDBC4() && (test instanceof ServerSetup))", " ((ServerSetup)test).setJDBCClient(JDBCClient.DERBYNETCLIENT_30);", " return test;", " }", " " ], "header": "@@ -570,6 +594,26 @@ public class TestConfiguration {", "removed": [] } ] } ]
derby-DERBY-2493-0b3c2c9d
DERBY-2493 (partial) Use unsynchronized collections in BackingStoreHashtable Replace Hashtable with HashMap. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@527402 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/store/access/BackingStoreHashtable.java", "hunks": [ { "added": [ "import java.util.Collections;", "import java.util.HashMap;", "import java.util.Iterator;" ], "header": "@@ -32,8 +32,10 @@ import org.apache.derby.iapi.types.DataValueDescriptor;", "removed": [ "import java.util.Hashtable;" ] }, { "added": [ " private HashMap hash_table;" ], "header": "@@ -109,7 +111,7 @@ public class BackingStoreHashtable", "removed": [ " private Hashtable hash_table;" ] }, { "added": [ " * inserted into a java.util.HashMap. In this case no" ], "header": "@@ -145,7 +147,7 @@ public class BackingStoreHashtable", "removed": [ " * inserted into a java.util.Hashtable. In this case no " ] }, { "added": [ " * scan result row to be the key to the HashMap.", " * @param remove_duplicates Should the HashMap automatically remove" ], "header": "@@ -158,12 +160,12 @@ public class BackingStoreHashtable", "removed": [ " * scan result row to be the key to the Hashtable.", " * @param remove_duplicates Should the Hashtable automatically remove" ] }, { "added": [ " * @param initialCapacity If not \"-1\" used to initialize the java HashMap", " * @param loadFactor If not \"-1\" used to initialize the java HashMap" ], "header": "@@ -175,11 +177,9 @@ public class BackingStoreHashtable", "removed": [ " * @param initialCapacity If not \"-1\" used to initialize the java ", " * Hashtable.", " * @param loadFactor If not \"-1\" used to initialize the java ", " * Hashtable." ] }, { "added": [ " new HashMap(initialCapacity) :", " new HashMap(initialCapacity, loadFactor));" ], "header": "@@ -222,8 +222,8 @@ public class BackingStoreHashtable", "removed": [ " new Hashtable(initialCapacity) : ", " new Hashtable(initialCapacity, loadFactor));" ] }, { "added": [ " * create a hash table of size estimated_rowcnt can cause", " * OutOfMemory errors when we try to create the hash table.", " * it's very likely that creating a hash table with a capacity", " * for the hash table might be.", " new HashMap() :", " new HashMap((int) estimated_rowcnt) :" ], "header": "@@ -237,26 +237,26 @@ public class BackingStoreHashtable", "removed": [ " * create a Hashtable of size estimated_rowcnt can cause", " * OutOfMemory errors when we try to create the Hashtable.", " * it's very likely that creating a Hashtable with a capacity", " * for the Hashtable might be.", " new Hashtable() :", " new Hashtable((int) estimated_rowcnt) :" ] }, { "added": [ " // because a hash table with capacity estimated_rowcnt would" ], "header": "@@ -267,7 +267,7 @@ public class BackingStoreHashtable", "removed": [ " // because a Hashtable with capacity estimated_rowcnt would" ] }, { "added": [ " // capacity of the hash table.", " hash_table =", " new HashMap((int)(max_inmemory_size / rowUsage));", " add_row_to_hash_table(row, needsToClone);" ], "header": "@@ -278,12 +278,13 @@ public class BackingStoreHashtable", "removed": [ " // capacity of the Hashtable.", " hash_table = new Hashtable((int)(max_inmemory_size / rowUsage));", " add_row_to_hash_table(hash_table, row, needsToClone);" ] }, { "added": [ " hash_table = new HashMap();" ], "header": "@@ -295,7 +296,7 @@ public class BackingStoreHashtable", "removed": [ " hash_table = new Hashtable();" ] }, { "added": [ " private void add_row_to_hash_table(Object[] row, boolean needsToClone)", " if (spillToDisk(row))" ], "header": "@@ -401,18 +402,14 @@ public class BackingStoreHashtable", "removed": [ " * @param hash_table The java HashTable to load into.", " private void add_row_to_hash_table(", " Hashtable hash_table,", " Object[] row,", " boolean needsToClone )", " if( spillToDisk( hash_table, row))" ] }, { "added": [ " private boolean spillToDisk(Object[] row) throws StandardException {" ], "header": "@@ -474,17 +471,13 @@ public class BackingStoreHashtable", "removed": [ " * @param hash_table The in-memory hash table", " private boolean spillToDisk( Hashtable hash_table,", " Object[] row)", "\t\tthrows StandardException", " {" ] }, { "added": [ " return Collections.enumeration(hash_table.values());" ], "header": "@@ -599,7 +592,7 @@ public class BackingStoreHashtable", "removed": [ " return(hash_table.elements());" ] }, { "added": [ " * hash table.", " * of the hash table entry is a row or is a Vector of rows." ], "header": "@@ -613,14 +606,14 @@ public class BackingStoreHashtable", "removed": [ " * Hashtable.", " * of the Hashtable entry is a row or is a Vector of rows." ] }, { "added": [ " add_row_to_hash_table(row, needsToClone);" ], "header": "@@ -758,7 +751,7 @@ public class BackingStoreHashtable", "removed": [ " add_row_to_hash_table(hash_table, row, needsToClone);" ] }, { "added": [ " private Iterator memoryIterator;", " memoryIterator = hash_table.values().iterator();" ], "header": "@@ -781,12 +774,12 @@ public class BackingStoreHashtable", "removed": [ " private Enumeration memoryEnumeration;", " memoryEnumeration = hash_table.elements();" ] }, { "added": [ " if (memoryIterator != null) {", " if (memoryIterator.hasNext()) {", " }", " memoryIterator = null;" ], "header": "@@ -802,11 +795,11 @@ public class BackingStoreHashtable", "removed": [ " if( memoryEnumeration != null)", " {", " if( memoryEnumeration.hasMoreElements())", " memoryEnumeration = null;" ] }, { "added": [ " if (memoryIterator != null) {", " if (memoryIterator.hasNext()) {", " return memoryIterator.next();", " }", " memoryIterator = null;" ], "header": "@@ -815,11 +808,11 @@ public class BackingStoreHashtable", "removed": [ " if( memoryEnumeration != null)", " {", " if( memoryEnumeration.hasMoreElements())", " return memoryEnumeration.nextElement();", " memoryEnumeration = null;" ] } ] } ]
derby-DERBY-2493-1523f049
DERBY-2493 (partial) Use unsynchronized collections in BackingStoreHashtable Updated some tests so that they are not sensitive to the order of the rows in the returned ResultSet. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@523621 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/junit/JDBC.java", "hunks": [ { "added": [ " /**", " * Assert that the ResultSet contains the same rows as the specified", " * two-dimensional array. The order of the results is ignored. Convert the", " * results to trimmed strings before comparing. The ResultSet object will", " * be closed.", " *", " * @param rs the ResultSet to check", " * @param expectedRows the expected rows", " */", " public static void assertUnorderedResultSet(", " ResultSet rs, String[][] expectedRows) throws SQLException {", " assertUnorderedResultSet(rs, expectedRows, true);", " }", "", " /**", " * Assert that the ResultSet contains the same rows as the specified", " * two-dimensional array. The order of the results is ignored. Objects are", " * read out of the ResultSet with the <code>getObject()</code> method and", " * compared with <code>equals()</code>. If the", " * <code>asTrimmedStrings</code> is <code>true</code>, the objects are read", " * with <code>getString()</code> and trimmed before they are compared. The", " * ResultSet object will be closed when this method returns.", " *", " * @param rs the ResultSet to check", " * @param expectedRows the expected rows", " * @param asTrimmedStrings whether the object should be compared as trimmed", " * strings", " */", " public static void assertUnorderedResultSet(", " ResultSet rs, Object[][] expectedRows, boolean asTrimmedStrings)", " throws SQLException {", "", " if (expectedRows.length == 0) {", " assertEmpty(rs);", " }", "", " ResultSetMetaData rsmd = rs.getMetaData();", " Assert.assertEquals(\"Unexpected column count\",", " expectedRows[0].length, rsmd.getColumnCount());", "", " ArrayList expected = new ArrayList(expectedRows.length);", " for (int i = 0; i < expectedRows.length; i++) {", " Assert.assertEquals(\"Different column count in expectedRows\",", " expectedRows[0].length, expectedRows[i].length);", " if (asTrimmedStrings) {", " ArrayList row = new ArrayList(expectedRows[i].length);", " for (int j = 0; j < expectedRows[i].length; j++) {", " String val = (String) expectedRows[i][j];", " row.add(val == null ? null : val.trim());", " }", " expected.add(row);", " } else {", " expected.add(Arrays.asList(expectedRows[i]));", " }", " }", "", " ArrayList actual = new ArrayList(expectedRows.length);", " while (rs.next()) {", " ArrayList row = new ArrayList(expectedRows[0].length);", " for (int i = 1; i <= expectedRows[0].length; i++) {", " if (asTrimmedStrings) {", " String s = rs.getString(i);", " row.add(s == null ? null : s.trim());", " } else {", " row.add(rs.getObject(i));", " }", " }", " actual.add(row);", " }", " rs.close();", "", " Assert.assertEquals(\"Unexpected row count\",", " expectedRows.length, actual.size());", "", " Assert.assertTrue(\"Missing rows in ResultSet\",", " actual.containsAll(expected));", "", " actual.removeAll(expected);", " Assert.assertTrue(\"Extra rows in ResultSet\", actual.isEmpty());", " }", "" ], "header": "@@ -832,6 +832,87 @@ public class JDBC {", "removed": [] } ] } ]
derby-DERBY-2493-bd34a3ac
DERBY-2493: Use unsynchronized collections in BackingStoreHashtable Store the duplicates in ArrayLists instead of Vectors. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@528374 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/store/access/BackingStoreHashtable.java", "hunks": [ { "added": [ "import java.util.ArrayList;", "import java.util.List;" ], "header": "@@ -32,12 +32,13 @@ import org.apache.derby.iapi.types.DataValueDescriptor;", "removed": [ "import java.util.Vector;" ] }, { "added": [ " * The estimated number of bytes used by ArrayList(0)", " private final static int ARRAY_LIST_SIZE =", " ClassSize.estimateBaseFromCatalog(ArrayList.class);" ], "header": "@@ -127,9 +128,10 @@ public class BackingStoreHashtable", "removed": [ " * The estimated number of bytes used by Vector(0)", " private final static int vectorSize = ClassSize.estimateBaseFromCatalog(java.util.Vector.class);" ] }, { "added": [ " * duplicates, or should it create the list of" ], "header": "@@ -166,7 +168,7 @@ public class BackingStoreHashtable", "removed": [ " * duplicates, or should it create the Vector of" ] }, { "added": [ " List row_vec;", " if (duplicate_value instanceof List)", " row_vec = (List) duplicate_value;", " // allocate list to hold duplicates", " row_vec = new ArrayList(2);", " row_vec.add(duplicate_value);", " // insert new row into list", " row_vec.add(row);", " // store list of rows back into hash table," ], "header": "@@ -425,28 +427,28 @@ public class BackingStoreHashtable", "removed": [ " Vector row_vec;", " if ((duplicate_value instanceof Vector))", " row_vec = (Vector) duplicate_value;", " // allocate vector to hold duplicates", " row_vec = new Vector(2);", " row_vec.addElement(duplicate_value);", " // insert new row into vector", " row_vec.addElement(row);", " // store vector of rows back into hash table," ] }, { "added": [ " max_inmemory_size -= ARRAY_LIST_SIZE;" ], "header": "@@ -464,7 +466,7 @@ public class BackingStoreHashtable", "removed": [ " max_inmemory_size -= vectorSize;" ] }, { "added": [ " if (duplicateValue instanceof List)", " List duplicateVec = (List) duplicateValue;", " Object[] dupRow = (Object[]) duplicateVec.get(i);" ], "header": "@@ -515,12 +517,12 @@ public class BackingStoreHashtable", "removed": [ " if( duplicateValue instanceof Vector)", " Vector duplicateVec = (Vector) duplicateValue;", " Object[] dupRow = (Object[]) duplicateVec.elementAt(i);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/HashScanResultSet.java", "hunks": [ { "added": [], "header": "@@ -23,17 +23,10 @@ package org.apache.derby.impl.sql.execute;", "removed": [ "import org.apache.derby.iapi.services.monitor.Monitor;", "", "import org.apache.derby.iapi.services.stream.HeaderPrintWriter;", "import org.apache.derby.iapi.services.stream.InfoStreams;", "", "import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;", "" ] }, { "added": [], "header": "@@ -46,9 +39,7 @@ import org.apache.derby.iapi.sql.execute.ExecutionContext;", "removed": [ "import org.apache.derby.iapi.sql.ResultSet;", "import org.apache.derby.iapi.store.access.ConglomerateController;" ] }, { "added": [], "header": "@@ -56,8 +47,6 @@ import org.apache.derby.iapi.store.access.StaticCompiledOpenConglomInfo;", "removed": [ "", "import org.apache.derby.iapi.types.Orderable;" ] }, { "added": [ "import java.util.List;", " * either <code>DataValueDescriptor[]</code>s or <code>List</code>s of", " * <code>DataValueDescriptor[]</code>. The store builds the hash table. When a", " * collision occurs, the store builds a <code>List</code> with the colliding", " * <code>DataValueDescriptor[]</code>s." ], "header": "@@ -66,18 +55,17 @@ import org.apache.derby.iapi.services.io.FormatableArrayHolder;", "removed": [ "import java.util.Enumeration;", "import java.util.Vector;", " * either DataValueDescriptor[]s or Vectors of DataValueDescriptor[]. The store builds ", " * the hash table. When a collision occurs, the store builds a Vector with", " * the colliding DataValueDescriptor[]s.", " *" ] }, { "added": [ "\tprivate List\t\tentryVector;" ], "header": "@@ -92,7 +80,7 @@ public class HashScanResultSet extends NoPutResultSetImpl", "removed": [ "\tprivate Vector\t\tentryVector;" ] }, { "added": [ "\t\t\t\t\tif (hashEntry instanceof List)", "\t\t\t\t\t\tentryVector = (List) hashEntry;", " (DataValueDescriptor[]) entryVector.get(0);" ], "header": "@@ -508,12 +496,12 @@ public class HashScanResultSet extends NoPutResultSetImpl", "removed": [ "\t\t\t\t\tif (hashEntry instanceof Vector)", "\t\t\t\t\t\tentryVector = (Vector) hashEntry;", " (DataValueDescriptor[]) entryVector.firstElement();" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/HashTableResultSet.java", "hunks": [ { "added": [], "header": "@@ -23,28 +23,20 @@ package org.apache.derby.impl.sql.execute;", "removed": [ "import org.apache.derby.iapi.services.monitor.Monitor;", "", "import org.apache.derby.iapi.services.stream.HeaderPrintWriter;", "import org.apache.derby.iapi.services.stream.InfoStreams;", "", "import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;", "", "import org.apache.derby.iapi.sql.ResultSet;" ] }, { "added": [ "import java.util.List;" ], "header": "@@ -59,8 +51,8 @@ import org.apache.derby.iapi.store.access.KeyHasher;", "removed": [ "import java.util.Vector;" ] }, { "added": [ "\tprivate List\t\tentryVector;" ], "header": "@@ -96,7 +88,7 @@ class HashTableResultSet extends NoPutResultSetImpl", "removed": [ "\tprivate Vector\t\tentryVector;" ] }, { "added": [ "\t\t\t\t\tif (hashEntry instanceof List)", "\t\t\t\t\t\tentryVector = (List) hashEntry;", " (DataValueDescriptor[]) entryVector.get(0);" ], "header": "@@ -346,12 +338,12 @@ class HashTableResultSet extends NoPutResultSetImpl", "removed": [ "\t\t\t\t\tif (hashEntry instanceof Vector)", "\t\t\t\t\t\tentryVector = (Vector) hashEntry;", " (DataValueDescriptor[]) entryVector.firstElement();" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/access/conglomerate/GenericScanController.java", "hunks": [ { "added": [], "header": "@@ -40,8 +40,6 @@ import org.apache.derby.iapi.store.raw.LockingPolicy;", "removed": [ "import org.apache.derby.iapi.store.access.Qualifier;", "" ] } ] } ]
derby-DERBY-2498-f9f83296
DERBY-2498; fix NullPointerException in clientDataSource.getConnection and ClientConnectionPoolDataSource.getConnection when the databaseName is invalid. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@711266 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/net/NetConnection.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.reference.Attribute;" ], "header": "@@ -33,6 +33,7 @@ import org.apache.derby.shared.common.reference.MessageId;", "removed": [] } ] } ]
derby-DERBY-2500-435c451f
DERBY-2500: Fix PredicateList.orderUsefulPredicates() to recognize when we're doing a hash join and to explicitly avoid generating probe predicates in that case. Also re-enable the "testResultSetInOrderWhenUsingIndex()" fixture for lang/DistinctTest.java and add some additional test cases. Finally, update comments where appropriate to better explain the need for this restriction on probe predicates. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@525925 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/PredicateList.java", "hunks": [ { "added": [ "\t\t/* If we have a \"useful\" IN list probe predicate we will generate a", "\t\t * start/stop key for optTable of the form \"col = <val>\", where <val>", "\t\t * is the first value in the IN-list. Then during normal index multi-", "\t\t * probing (esp. as implemented by exec/MultiProbeTableScanResultSet)", "\t\t * we will use that start/stop key as a \"placeholder\" into which we'll", "\t\t * plug the values from the IN-list one at a time.", "\t\t *", "\t\t * That said, if we're planning to do a hash join with optTable then", "\t\t * we won't generate a MultiProbeTableScanResult; instead we'll", "\t\t * generate a HashScanResultSet, which does not (yet) account for", "\t\t * IN-list multi-probing. That means the start/stop key \"col = <val>\"", "\t\t * would be treated as a regular restriction, which could lead to", "\t\t * incorrect results. So if we're dealing with a hash join, we do", "\t\t * not consider IN-list probe predicates to be \"useful\". DERBY-2500.", "\t\t *", "\t\t * Note that it should be possible to enhance HashScanResultSet to", "\t\t * correctly perform index multi-probing at some point, and there", "\t\t * would indeed be benefits to doing so (namely, we would scan fewer", "\t\t * rows from disk and build a smaller hash table). But until that", "\t\t * happens we have to make sure we do not consider probe predicates", "\t\t * to be \"useful\" for hash joins.", "\t\t *", "\t\t * Only need to do this check if \"pushPreds\" is true, i.e. if we're", "\t\t * modifying access paths and thus we know for sure that we are going", "\t\t * to generate a hash join.", "\t\t */", "\t\tboolean skipProbePreds = pushPreds &&", "\t\t\toptTable.getTrulyTheBestAccessPath().getJoinStrategy().isHashJoin();", "" ], "header": "@@ -596,6 +596,35 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [] } ] } ]
derby-DERBY-2502-031843a0
DERBY-2502: Fix error in XMLTypeAndOpsTest introduced by ResultSet-closing patch. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@525924 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/junit/JDBC.java", "hunks": [ { "added": [ " {", " assertFullResultSet( rs, expectedRows, allAsTrimmedStrings, true );", " }", "", " /**", " * Takes a result set and a two-dimensional array and asserts", " * that the rows and columns in the result set match the number,", " * order, and values of those in the array. Each row in", " * the array is compared with the corresponding row in the", " * result set.", " *", " * Will throw an assertion failure if any of the following", " * is true:", " *", " * 1. Expected vs actual number of columns doesn't match", " * 2. Expected vs actual number of rows doesn't match", " * 3. Any column in any row of the result set does not \"equal\"", " * the corresponding column in the expected 2-d array. If", " * \"allAsTrimmedStrings\" is true then the result set value", " * will be retrieved as a String and compared, via the \".equals()\"", " * method, to the corresponding object in the array (with the", " * assumption being that the objects in the array are all ", " * Strings). Otherwise the result set value will be retrieved", " * and compared as an Object, which is useful when asserting", " * the JDBC types of the columns in addition to their values.", " *", " * NOTE: It follows from #3 that the order of the rows in the", " * in received result set must match the order of the rows in", " * the received 2-d array. Otherwise the result will be an", " * assertion failure.", " *", " * @param rs The actual result set.", " * @param expectedRows 2-Dimensional array of objects representing", " * the expected result set.", " * @param allAsTrimmedStrings Whether or not to fetch (and compare)", " * all values from the actual result set as trimmed Strings; if", " * false the values will be fetched and compared as Objects. For", " * more on how this parameter is used, see assertRowInResultSet().", " * @param closeResultSet If true, the ResultSet is closed on the way out.", " */", " public static void assertFullResultSet(ResultSet rs,", " Object [][] expectedRows, boolean allAsTrimmedStrings, boolean closeResultSet)", " throws SQLException" ], "header": "@@ -672,6 +672,49 @@ public class JDBC {", "removed": [] }, { "added": [ " if ( closeResultSet ) { rs.close(); }" ], "header": "@@ -693,7 +736,7 @@ public class JDBC {", "removed": [ " rs.close();" ] } ] } ]
derby-DERBY-2502-ccce12c3
DERBY-2502: Add a header comment to XML.java, explaining how to configure your environment so that the XML tests run. Also fix a javadoc warning in BaseJDBCTestCase.java. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@525933 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/junit/XML.java", "hunks": [ { "added": [ " * <p>", " * </p>", " *", " * <p>", " * Note that The XML tests require a more advanced version of Xalan", " * than the default version bundled with JDK 1.4. The XML tests silently", " * exit if the required environment is not found.", " * </p>", " *", " * <p>", " * To run the XML tests under JDK 1.4, you must do the following:", " * </p>", " *", " * <ul>", " * <li>Download the latest version of Xalan (2.7.0 as of this writing).</li>", " * <li>Copy all of the downloaded jars into the jre/lib/endorsed directory", " * of your JDK 1.4 installation. Those jar files are:", " * serializer.jar, xalan.jar, xercesImpl.jar, xml-apis.jar, and xsltc.jar.</li>", " * </ul>", " *", " * <p>", " *That's it! Now the XML tests should run for you under JDK 1.4.", " * </p>", " *", " * <p>", " * To run the XML tests under a higher version of the JDK, you must do the", " * following:", " * </p>", " *", " * <ul>", " * <li>Download the latest version of Xalan as described above.</li>", " * <li>Wire the downloaded jar files into your CLASSPATH.</li>", " * </ul>" ], "header": "@@ -39,7 +39,40 @@ import java.util.Properties;", "removed": [] } ] } ]
derby-DERBY-2505-38520af2
DERBY-2505 (partial): Convert derbynet/testij.java to JUnit. Getting an NPE looking for the test script when running with jars, need to resolve that before hooking it into the derbynet _Suite. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@525326 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/tools/org/apache/derby/impl/tools/ij/ConnectionEnv.java", "hunks": [ { "added": [ "import java.security.AccessController;", "import java.security.PrivilegedAction;" ], "header": "@@ -28,6 +28,8 @@ import java.util.Hashtable;", "removed": [] } ] }, { "file": "java/tools/org/apache/derby/impl/tools/ij/Main.java", "hunks": [ { "added": [ "import java.security.AccessController;", "import java.security.PrivilegedAction;" ], "header": "@@ -37,6 +37,8 @@ import java.io.PrintStream;", "removed": [] }, { "added": [ "\t\tfinal String outFile = util.getSystemProperty(\"ij.outfile\");", "\t\t\tFileOutputStream fos = (FileOutputStream) AccessController.doPrivileged(new PrivilegedAction() {", "\t\t\t\tpublic Object run() {", "\t\t\t\t\tFileOutputStream out = null;", "\t\t\t\t\ttry {", "\t\t\t\t\t\tout = new FileOutputStream(outFile);", "\t\t\t\t\t} catch (FileNotFoundException e) {", "\t\t\t\t\t\tout = null;", "\t\t\t\t\t}", "\t\t\t\t\treturn out;", "\t\t\t\t}", "\t\t\t});", "\t\t\tout = langUtil.getNewOutput(fos);", "", "\t\t\tif (out == null)", "\t\t\t oldOut.println(langUtil.getTextMessage(\"IJ_IjErroUnabTo\",outFile));", "\t" ], "header": "@@ -132,15 +134,25 @@ public class Main {", "removed": [ "\t\tString outFile = util.getSystemProperty(\"ij.outfile\");", "\t\t\ttry {", "\t\t\t\tout = langUtil.getNewOutput(new FileOutputStream(outFile));", "\t\t\t}", "\t\t\tcatch (IOException ioe) {", "\t\t\t\toldOut.println(langUtil.getTextMessage(\"IJ_IjErroUnabTo\",outFile));", "\t\t\t}" ] } ] }, { "file": "java/tools/org/apache/derby/impl/tools/ij/util.java", "hunks": [ { "added": [ "import java.security.AccessController;", "import java.security.PrivilegedAction;" ], "header": "@@ -31,6 +31,8 @@ import java.io.InputStream;", "removed": [] } ] }, { "file": "java/tools/org/apache/derby/impl/tools/ij/utilMain.java", "hunks": [ { "added": [ "import java.security.AccessController;", "import java.security.PrivilegedAction;" ], "header": "@@ -42,6 +42,8 @@ import java.io.FileInputStream;", "removed": [] }, { "added": [ " \t\t//boolean showNoCountForSelect = Boolean.getBoolean(\"ij.showNoCountForSelect\");", " \t\tString prop = (String) AccessController.doPrivileged(new PrivilegedAction() {", " \t\t\t\t\tpublic Object run() {", " \t\t\t\t\t\treturn System.getProperty(\"ij.showNoCountForSelect\");", " \t\t\t\t\t}", " \t\t});", " \t\tboolean showNoCountForSelect = Boolean.valueOf(prop).booleanValue();", " \t\tprop = (String) AccessController.doPrivileged(new PrivilegedAction() {", "\t\t\t\tpublic Object run() {", "\t\t\t\t\treturn System.getProperty(\"ij.showNoConnectionsAtStart\");", "\t\t\t\t}", " \t});", " \t\tboolean showNoConnectionsAtStart = Boolean.valueOf(prop).booleanValue();", "", " \t\tif (!(showNoConnectionsAtStart)) {" ], "header": "@@ -248,13 +250,25 @@ public class utilMain implements java.security.PrivilegedAction {", "removed": [ " \t\tboolean showNoCountForSelect = Boolean.getBoolean(\"ij.showNoCountForSelect\");", " \t\tboolean showNoConnectionsAtStart = Boolean.getBoolean(\"ij.showNoConnectionsAtStart\");", " \t\tif (!(showNoConnectionsAtStart)) {" ] } ] } ]
derby-DERBY-2515-932c26ff
DERBY-2515: Improve exception handling when copying INOUT args. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1087346 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/CallableStatement.java", "hunks": [ { "added": [ " ", " for ( int i = 0; i < cursorParamCount; i++ )", " {", " if ( parameterMetaData_.sqlxParmmode_[ i ] == java.sql.ParameterMetaData.parameterModeInOut )", " int jdbcParamNumber = i + 1;", " Object returnArg;", " ", " try {", " returnArg = singletonParams.isNull_[ i ] ? null : singletonParams.getObject( jdbcParamNumber );", " } catch (SqlException se)", " IllegalArgumentException iae = new IllegalArgumentException( se.getMessage() );", " iae.initCause( se );", " throw iae;", " }", " ", " //", " // special case to coerce Integer to Short for SMALLINT", " //", " if ( parameterMetaData_.types_[ i ] == Types.SMALLINT )", " {", " if ( (returnArg != null) && (returnArg instanceof Integer) )", " returnArg = new Short( ((Integer) returnArg).shortValue() );", " ", " setInput( jdbcParamNumber, returnArg );", " } // end if INOUT arg", " } // end loop through args" ], "header": "@@ -1185,32 +1185,37 @@ public class CallableStatement extends PreparedStatement", "removed": [ " try {", " for ( int i = 0; i < cursorParamCount; i++ )", " if ( parameterMetaData_.sqlxParmmode_[ i ] == java.sql.ParameterMetaData.parameterModeInOut )", " int jdbcParamNumber = i + 1;", " Object returnArg = singletonParams.isNull_[ i ] ? null : singletonParams.getObject( jdbcParamNumber );", "", " //", " // special case to coerce Integer to Short for SMALLINT", " //", " if ( parameterMetaData_.types_[ i ] == Types.SMALLINT )", " if ( (returnArg != null) && (returnArg instanceof Integer) )", " {", " returnArg = new Short( ((Integer) returnArg).shortValue() );", " }", " ", " setInput( jdbcParamNumber, returnArg );", " }", " } catch (Exception se)", " {", " throw new IllegalArgumentException( se.getMessage() );", " }" ] } ] } ]
derby-DERBY-2516-b98b60ea
DERBY-2516: Network Client allows execution of callable statement when one of the parameters is not set Check that INOUT parameters are set, not only registered with registerOutParam(). git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1428256 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-2519-b224e664
DERBY-2519: Commit Oystein's derby-2519.diff, cleaning up the BlobClob4BlobTest. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@533785 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-2524-13117b07
My earlier commit 525568 caused grantRevokeDDL.sql to fail because in TypeDescriptorImpl's equals method, I was comparing the collaiton type and derivation for non-character datatypes to derive equality. Collation type and derivation should only be checked for character datatypes. This commit addresses that problem. I ran the derbyall suite and noticed no new test failures because of this patch. The patch is being tracked as part of DERBY-2524 git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@527033 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/catalog/types/TypeDescriptorImpl.java", "hunks": [ { "added": [ "\t {", "\t\t\tswitch (typeId.getJDBCTypeId()) {", "\t\t\tcase Types.CHAR:", "\t\t\tcase Types.VARCHAR:", "\t\t\tcase Types.LONGVARCHAR:", "\t\t\tcase Types.CLOB:", "\t\t\t\t//if we are dealing with character types, then we should ", "\t\t\t\t//also compare the collation information on them.", "\t\t\t\tif(this.collationDerivation != typeDescriptor.getCollationDerivation() ||", "\t\t\t\t\t\tthis.collationType != typeDescriptor.getCollationType())", "\t\t\t\t\treturn false;", "\t\t\t\telse", "\t\t\t\t\treturn true;", "\t\t\tdefault:", "\t\t\t\t//no collation checking required if we are dealing with ", "\t\t\t\t//non-char datatypes.", "\t\t\t\treturn true;", "\t\t\t}", "\t }" ], "header": "@@ -404,13 +404,29 @@ public class TypeDescriptorImpl implements TypeDescriptor, Formatable", "removed": [ "\t\t this.collationDerivation == typeDescriptor.getCollationDerivation() ||", "\t\t this.collationType == typeDescriptor.getCollationType() || ", "\t\t\treturn true;" ] } ] } ]
derby-DERBY-2524-7752cf6d
This patch is for DERBY-2524 (DataTypeDescriptor(DTD) needs to have collation type and collation derivation. These new fields will apply only for character string types. Other types should ignore them.) and it does following 2 things 1)Add collation type and collation derivation attributes and apis to TypeDescriptor interface and it's implementations. 2)Save the collation type in the scale field of character types in writeExternal method of TypeDescriptorImpl. And read the scale field into the collation type for character types in readExternal method of TypeDescriptorImpl. svn stat -q M java\engine\org\apache\derby\iapi\types\DataTypeDescriptor.java M java\engine\org\apache\derby\catalog\TypeDescriptor.java M java\engine\org\apache\derby\catalog\types\TypeDescriptorImpl.java Details of the patch 1)Added getters and setters for collationType and collationDerivation in TypeDescriptor. In addition, TypeDescriptor has new constants defined in them which will be used by the rest of the collation related code in Derby. One of the constants is COLLATION_DERIVATION_INCORRECT I am initializing the collation derivation for all the data types to COLLATION_DERIVATION_INCORRECT in TypeDescriptorImpl. This should get changed to "implicit" or "none" for character string types before the runtime code kicks in. For all the other types, it will remain set to COLLATION_DERIVATION_INCORRECT because collation does not apply to those data types. 2)DTD implements the new apis in the TypeDescriptor interface. 3)2 set of changes went into a)TypeDescriptorImpl has 2 new fields, namely, collationType and collationDerivation. collationDerivation is initialized to TypeDescriptor.COLLATION_DERIVATION_INCORRECT. For character string types, these field should get set correctly. In addition, there are apis to set and get values out of these 2 fields. b)The next change for this class is in writeExternal and readExternal methods. I would like community's feedback on my assumption for this particular change. The collation type of a character string type will get saved in the existing scale field since scale does not apply to character string types. My question is about collation derivation. The collation derivation infromation does not get saved like collation type. But may be that is ok because I am assuming that writeExternal and readExternal get called only for the persistent columns (ie columns belonging to system and user tables). Collation derivation of such character string columns (coming from persistent tables) is always implicit. And, hence in readExternal, for character string types, I can initialize collation derivation to be implicit. My assumption is that readExternal and writeExternal methods will never get called for character string types with collation of none or explicit. Today we don't have explicit as one of the possible values for collation derivation, but a character string type will have the collation derivation of none if it was the result of an aggregate method involving operands with different collation derivations. This comes from item 11) from Section Collation Determination section at http://wiki.apache.org/db-derby/BuiltInLanguageBasedOrderingDERBY-1478 Questions 1)I have included all the constant definitions related to collation in TypeDescriptor. If anyone has suggestion on a better place to define them, let me know. Wonder if there is already a class to define miscellaneous constant definitions like the ones I have added. TypeDescriptor does look like a good place for these constants defined by me because these constants all belong to the data type world. 2)Is it right to assume that readExternal and writeExternal methods in TypeDescriptorImpl will get called only for persistent columns? git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@525568 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/catalog/TypeDescriptor.java", "hunks": [ { "added": [ "\t/**", "\t For a character string type, the collation derivation should always be ", "\t \"explicit\"(not possible in Derby 10.3), \"implicit\" or \"none\". We will ", "\t start by setting it to \"error\" here. But at runtime, no TypeDescriptor ", "\t which belongs to a character string type should ever have a collation ", "\t derivation of \"error\". The initialization to \"error\" is for catching an ", "\t edge(bug) case where the collation derivation may not have gotten set ", "\t correctly for a character string type.", "\t For all the other types (which are not character string types, the ", "\t collation derivation will be set to \"error\".", "\t */", "\tpublic\tstatic\tString COLLATION_DERIVATION_INCORRECT = \"error\";", "\tpublic\tstatic\tString COLLATION_DERIVATION_IMPLICIT = \"implicit\";", "\tpublic\tstatic\tString COLLATION_DERIVATION_NONE = \"none\";", "\t/**", "\t * In Derby 10.3, all the character columns could have a collation type of", "\t * UCS_BASIC. This is same as what we do in Derby 10.3 release. The other", "\t * option in Derby 10.3 is that all the character string types belonging to", "\t * system tables will collate using UCS_BASIC but all the character string", "\t * types belonging to user tables will collate using TERRITORY_BASED", "\t * collation.", "\t */", "\tpublic\tstatic\tint COLLATION_VALUE_UCS_BASIC = 0;", "\tpublic\tstatic\tint COLLATION_VALUE_TERRITORY_BASED = 1;", "" ], "header": "@@ -46,6 +46,31 @@ public interface TypeDescriptor", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/catalog/types/TypeDescriptorImpl.java", "hunks": [ { "added": [ "\t/** @see TypeDescriptor.getCollationType */", "\tprivate int\t\t\t\t\t\tcollationType;", "\t/** @see TypeDescriptor.getCollationDerivation() */", "\tprivate String\t\t\t\t\tcollationDerivation = TypeDescriptor.COLLATION_DERIVATION_INCORRECT;" ], "header": "@@ -52,6 +52,10 @@ public class TypeDescriptorImpl implements TypeDescriptor, Formatable", "removed": [] }, { "added": [ "\t/** @see TypeDescriptor.getCollationType */", "\tpublic int\tgetCollationType()", "\t{", "\t\treturn collationType;", "\t}", "", "\t/** @see TypeDescriptor.setCollationType */", "\tpublic void\tsetCollationType(int collationTypeValue)", "\t{", "\t\tcollationType = collationTypeValue;", "\t}", "", "\t/** @see TypeDescriptor.getCollationDerivation */", "\tpublic String\tgetCollationDerivation()", "\t{", "\t\treturn collationDerivation;", "\t}", "", "\t/** @see TypeDescriptor.setCollationDerivation */", "\tpublic void\tsetCollationDerivation(String collationDerivationValue)", "\t{", "\t\tcollationDerivation = collationDerivationValue;", "\t}", "" ], "header": "@@ -327,6 +331,30 @@ public class TypeDescriptorImpl implements TypeDescriptor, Formatable", "removed": [] }, { "added": [ "\t\t this.collationDerivation.equals(typeDescriptor.getCollationDerivation()) ||", "\t\t this.collationType == typeDescriptor.getCollationType() || " ], "header": "@@ -374,6 +402,8 @@ public class TypeDescriptorImpl implements TypeDescriptor, Formatable", "removed": [] }, { "added": [ "\t\t", "\t\tswitch (typeId.getJDBCTypeId()) {", "\t\tcase Types.CHAR:", "\t\tcase Types.VARCHAR:", "\t\tcase Types.LONGVARCHAR:", "\t\tcase Types.CLOB:", "\t\t\tscale = 0;", "\t\t\tcollationType = in.readInt();", "\t\t\t//I am assuming that the readExternal gets called only on ", "\t\t\t//persistent columns. Since all persistent character string type", "\t\t\t//columns always have the collation derivation of implicit, I will ", "\t\t\t//simply use that value for collation derivation here for character ", "\t\t\t//string type columns.", "\t\t\tcollationDerivation = TypeDescriptor.COLLATION_DERIVATION_IMPLICIT;", "\t\t\tbreak;", "\t\tdefault:", "\t\t\tscale = in.readInt();", "\t\t\tcollationType = 0;", "\t\t\tcollationDerivation = TypeDescriptor.COLLATION_DERIVATION_INCORRECT;", "\t\t\tbreak;", "\t\t}", "\t\t" ], "header": "@@ -396,7 +426,28 @@ public class TypeDescriptorImpl implements TypeDescriptor, Formatable", "removed": [ "\t\tscale = in.readInt();" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/types/DataTypeDescriptor.java", "hunks": [ { "added": [ "\t/** @see TypeDescriptor.getCollationType */", "\tpublic int\tgetCollationType()", "\t{", "\t\treturn typeDescriptor.getCollationType();", "\t}", "", "\t/** @see TypeDescriptor.setCollationType */", "\tpublic void\tsetCollationType(int collationTypeValue)", "\t{", "\t\ttypeDescriptor.setCollationType(collationTypeValue);", "\t}", "", "\t/** @see TypeDescriptor.getCollationDerivation */", "\tpublic String\tgetCollationDerivation()", "\t{", "\t\treturn typeDescriptor.getCollationDerivation();", "\t}", "", "\t/** @see TypeDescriptor.setCollationDerivation */", "\tpublic void\tsetCollationDerivation(String collationDerivationValue)", "\t{", "\t\ttypeDescriptor.setCollationDerivation(collationDerivationValue);", "\t}", "" ], "header": "@@ -777,6 +777,30 @@ public final class DataTypeDescriptor implements TypeDescriptor, Formatable", "removed": [] } ] } ]
derby-DERBY-2524-96a630c7
Submitting a patch (DERBY2524_Collation_Info_In_DTD_v2_diff.txt) attached to DERBY-2524. This is a followup to the earlier commited patch (DERBY2524_Collation_Info_In_DTD_v1_diff.txt svn revision 525568) svn stat -q M java\engine\org\apache\derby\iapi\types\DataTypeDescriptor.java M java\engine\org\apache\derby\iapi\types\StringDataValue.java M java\engine\org\apache\derby\catalog\TypeDescriptor.java M java\engine\org\apache\derby\catalog\types\TypeDescriptorImpl.java The patch does following to address feedback received on the earlier patch in thread http://www.nabble.com/-jira--Created%3A-%28DERBY-2524%29-DataTypeDescriptor%28DTD%29-needs-to-have-collation-type-and-collation-derivation.-These-new-fields-will-apply-only-for-character-string-types.-Other-types-should-ignore-them.-p9842379.html 1)Moved the constant definitions from TypeDescriptor to StringDataValue. 2)Added javadoc comments for all the constants. One big javadoc comment for one of the constants in the related constants and other constants in that group will just have a javadoc of @see. 3)I had used string costants for collation derivation since they are more verbose. But that is more expensive than simply using int. As a middle ground, I have defined collation derivation constants as int but the names of the constants are verbose :) I also changed the api for collation derivation to work with int rather than String. Finally, changed collationDerivation from String to int in TypeDescriptorImpl. 4)Rather than using "error" to indicate incorrect collation derivation, we will just initialize collation derivation to "none". For all character string types, the collation derivation should get changed to "implicit" unless we are working with aggregate result type of character string type and the operands to the aggregate have different collation types. 5)Currently, I only save collation type of a persistent character string type column into SYSCOLUMNS's COLUMNDATATYPE column. Collation derivation for such character string type is assumed as "implicit" because that is the only possible option in Derby 10.3 for persistent columns. But in some future release of Derby, when we will start supporting SQL COLLATE clause, we will want to differentiate between "explicit" and "implicit" collation derivation for such persistent columns. So, may be it will be good for us to start saving collation derivation too. For now, I have added this task as a line item under wiki page http://wiki.apache.org/db-derby/BuiltInLanguageBasedOrderingDERBY-1478 under "Performance/Desirable items" section. 6)I caused several javadoc errors for using @see Classname.methodname rather than @see Classname#methodname. Sorry about that. Fixed those errors in this patch. I think with this patch, I have taken care of all the feedback received on the earlier patch DERBY2524_Collation_Info_In_DTD_v1_diff.txt. Again, if anyone has any comment on this committed patch or earlier commit svn revision 525568, please send your feedback on Derby mailing list. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@525729 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/catalog/TypeDescriptor.java", "hunks": [ { "added": [], "header": "@@ -46,31 +46,6 @@ public interface TypeDescriptor", "removed": [ "\t/**", "\t For a character string type, the collation derivation should always be ", "\t \"explicit\"(not possible in Derby 10.3), \"implicit\" or \"none\". We will ", "\t start by setting it to \"error\" here. But at runtime, no TypeDescriptor ", "\t which belongs to a character string type should ever have a collation ", "\t derivation of \"error\". The initialization to \"error\" is for catching an ", "\t edge(bug) case where the collation derivation may not have gotten set ", "\t correctly for a character string type.", "\t For all the other types (which are not character string types, the ", "\t collation derivation will be set to \"error\".", "\t */", "\tpublic\tstatic\tString COLLATION_DERIVATION_INCORRECT = \"error\";", "\tpublic\tstatic\tString COLLATION_DERIVATION_IMPLICIT = \"implicit\";", "\tpublic\tstatic\tString COLLATION_DERIVATION_NONE = \"none\";", "\t/**", "\t * In Derby 10.3, all the character columns could have a collation type of", "\t * UCS_BASIC. This is same as what we do in Derby 10.3 release. The other", "\t * option in Derby 10.3 is that all the character string types belonging to", "\t * system tables will collate using UCS_BASIC but all the character string", "\t * types belonging to user tables will collate using TERRITORY_BASED", "\t * collation.", "\t */", "\tpublic\tstatic\tint COLLATION_VALUE_UCS_BASIC = 0;", "\tpublic\tstatic\tint COLLATION_VALUE_TERRITORY_BASED = 1;", "" ] }, { "added": [ "\t * Set the collation type of this TypeDescriptor", "\t * @param collationTypeValue This will be 0(UCS_BASIC)/1(TERRITORY_BASED)" ], "header": "@@ -180,8 +155,8 @@ public interface TypeDescriptor", "removed": [ "\t * Set the collation type of this DTD", "\t * @param collationDerivationValue This will be UCS_BASIC/TERRITORY_BASED" ] } ] }, { "file": "java/engine/org/apache/derby/catalog/types/TypeDescriptorImpl.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.types.StringDataValue;", "" ], "header": "@@ -26,6 +26,8 @@ import org.apache.derby.iapi.services.io.Formatable;", "removed": [] }, { "added": [ "\t/** @see TypeDescriptor#getCollationType() */", "\t/** @see TypeDescriptor#getCollationDerivation() */", "\tprivate int\t\t\t\t\tcollationDerivation = StringDataValue.COLLATION_DERIVATION_IMPLICIT;" ], "header": "@@ -52,10 +54,10 @@ public class TypeDescriptorImpl implements TypeDescriptor, Formatable", "removed": [ "\t/** @see TypeDescriptor.getCollationType */", "\t/** @see TypeDescriptor.getCollationDerivation() */", "\tprivate String\t\t\t\t\tcollationDerivation = TypeDescriptor.COLLATION_DERIVATION_INCORRECT;" ] }, { "added": [ "\t/** @see TypeDescriptor#getCollationType() */", "\t/** @see TypeDescriptor#setCollationType(int) */", "\t/** @see TypeDescriptor#getCollationDerivation() */", "\tpublic int\tgetCollationDerivation()", "\t/** @see TypeDescriptor#setCollationDerivation(int) */", "\tpublic void\tsetCollationDerivation(int collationDerivationValue)" ], "header": "@@ -331,26 +333,26 @@ public class TypeDescriptorImpl implements TypeDescriptor, Formatable", "removed": [ "\t/** @see TypeDescriptor.getCollationType */", "\t/** @see TypeDescriptor.setCollationType */", "\t/** @see TypeDescriptor.getCollationDerivation */", "\tpublic String\tgetCollationDerivation()", "\t/** @see TypeDescriptor.setCollationDerivation */", "\tpublic void\tsetCollationDerivation(String collationDerivationValue)" ] }, { "added": [ "\t\t this.collationDerivation == typeDescriptor.getCollationDerivation() ||" ], "header": "@@ -402,7 +404,7 @@ public class TypeDescriptorImpl implements TypeDescriptor, Formatable", "removed": [ "\t\t this.collationDerivation.equals(typeDescriptor.getCollationDerivation()) ||" ] }, { "added": [ "\t\t//Scale does not apply to character data types. Starting 10.3 release,", "\t\t//the scale field in TypeDescriptor in SYSCOLUMNS will be used to save", "\t\t//the collation type of the character data types. Because of this, in", "\t\t//this method, we check if we are dealing with character types. If yes,", "\t\t//then read the on-disk scale field of TypeDescriptor into collation", "\t\t//type. In other words, the on-disk scale field has 2 different ", "\t\t//meanings depending on what kind of data type we are dealing with.", "\t\t//For character data types, it really represents the collation type of", "\t\t//the character data type. For all the other data types, it represents", "\t\t//the scale of that data type." ], "header": "@@ -427,6 +429,16 @@ public class TypeDescriptorImpl implements TypeDescriptor, Formatable", "removed": [] }, { "added": [ "\t\t\tcollationDerivation = StringDataValue.COLLATION_DERIVATION_IMPLICIT;", "\t\t\tcollationDerivation = StringDataValue.COLLATION_DERIVATION_IMPLICIT;" ], "header": "@@ -439,12 +451,12 @@ public class TypeDescriptorImpl implements TypeDescriptor, Formatable", "removed": [ "\t\t\tcollationDerivation = TypeDescriptor.COLLATION_DERIVATION_IMPLICIT;", "\t\t\tcollationDerivation = TypeDescriptor.COLLATION_DERIVATION_INCORRECT;" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/types/StringDataValue.java", "hunks": [ { "added": [ "\t/**", "\t For a character string type, the collation derivation should always be ", "\t \"explicit\"(not possible in Derby 10.3), \"implicit\" or \"none\". We will ", "\t start by setting it to \"none\" in TypeDescriptorImpl. At runtime, only ", "\t character string types which are results of aggregate methods dealing ", "\t with operands with different collation types should have a collation ", "\t derivation of \"none\". All the other character string types should have ", "\t their collation derivation set to \"implicit\". ", "\t */", "\tpublic\tstatic\tint COLLATION_DERIVATION_NONE = 0;", "\t/** @see StringDataValue#COLLATION_DERIVATION_NONE */", "\tpublic\tstatic\tint COLLATION_DERIVATION_IMPLICIT = 1;", "\t/** @see StringDataValue#COLLATION_DERIVATION_NONE */", "\tpublic\tstatic\tint COLLATION_DERIVATION_EXPLICIT = 2;", "\t/**", "\t * In Derby 10.3, it is possible to have database with one of the following", "\t * two configurations", "\t * 1)all the character columns will have a collation type of UCS_BASIC. ", "\t * This is same as what we do in Derby 10.2 release. ", "\t * 2)all the character string columns belonging to system tables will have ", "\t * collation type of UCS_BASIC but all the character string columns ", "\t * belonging to user tables will have collation type of TERRITORY_BASED.", "\t */", "\tpublic\tstatic\tint COLLATION_TYPE_UCS_BASIC = 0;", "\t/** @see StringDataValue#COLLATION_TYPE_UCS_BASIC */", "\tpublic\tstatic\tint COLLATION_TYPE_TERRITORY_BASED = 1;", "" ], "header": "@@ -30,6 +30,33 @@ public interface StringDataValue extends ConcatableDataValue", "removed": [] } ] } ]
derby-DERBY-2526-125f9182
DERBY-4679 Several left outer joins causes unstable query with incorrect results Patch derby-4679b, which solves the following problem: When transitive closure generates new criteria into the query, it is sometimes confused by situations where the same column name appears in a result column list multiple times due to flattening of sub-queries. Flattening requires remapping of (table, column) numbers in column references. In cases where the same column name appears in a result column list multiple times, this lead to remapping (reassigning) wrong (table, column) numbers to column references in join predicates transformed to where clauses as a result of the flattening. See also DERBY-2526 and DERBY-3023 whose fixes which were partial solutions to the problem of wrong column number remappings confusing the transitive closure of search predicates performed by the preprocessing step of the optimizer. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@952237 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/ColumnReference.java", "hunks": [ { "added": [ " /* For remembering original (tn,cn) of this CR during join flattening. */", " private int tableNumberBeforeFlattening = -1;", " private int columnNumberBeforeFlattening = -1;", "" ], "header": "@@ -71,6 +71,10 @@ public class ColumnReference extends ValueNode", "removed": [] }, { "added": [], "header": "@@ -850,12 +854,6 @@ public class ColumnReference extends ValueNode", "removed": [ "\t\t\t\ttableNumber = ft.getTableNumber();", "\t\t\t\tif (SanityManager.DEBUG)", "\t\t\t\t{", "\t\t\t\t\tSanityManager.ASSERT(tableNumber != -1,", "\t\t\t\t\t\t\"tableNumber not expected to be -1\");", "\t\t\t\t}" ] } ] } ]
derby-DERBY-2530-6e48cbee
Committing the patch DERBY2530_create_alter_table_collation_type_v1_diff.txt attached to DERBY-2530. This patch changes alter table and create table code so that the persistent user defined character types will have collation type of their schema. svn stat -q M java\engine\org\apache\derby\impl\sql\compile\AlterTableNode.java M java\engine\org\apache\derby\impl\sql\compile\CreateTableNode.java M java\engine\org\apache\derby\iapi\sql\dictionary\SchemaDescriptor.java This patch includes following changes 1)If a character type is getting added by alter table statement then AlterTable node needs to set the collation type of that character column to same value as schema descriptor's collation type. Collation derivation of this column will be set to implicit. 2)For all the character columns that get added by create table statement, CreateTable node should set their collation type to same value as schema descriptor's collation type. Collation derivation of such columns will be set to implicit. 3)Provide a method called getCollationType on SchemaDescriptor so that AlterTable node and CreateTable node can call that method. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@526385 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/AlterTableNode.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.types.StringDataValue;" ], "header": "@@ -35,6 +35,7 @@ import org.apache.derby.iapi.sql.dictionary.SchemaDescriptor;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/CreateTableNode.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.types.StringDataValue;" ], "header": "@@ -44,6 +44,7 @@ import org.apache.derby.iapi.error.StandardException;", "removed": [] }, { "added": [ "\t\t", "\t\t//Set the collation type and collation derivation of all the character", "\t\t//type columns. Their collation type will be same as the collation of", "\t\t//the schema they belong to. Theie collation derivation will be ", "\t\t//\"implicit\".", " for (int i = 0; i < colInfos.length; i++) {", " \tDataTypeDescriptor dts = colInfos[i].dataType;", " \tif (dts.getTypeId().isStringTypeId()) {", " \t\tdts.setCollationType(sd.getCollationType());", "sqlc \t\tdts.setCollationDerivation(StringDataValue.COLLATION_DERIVATION_IMPLICIT);", " \t}", " }" ], "header": "@@ -424,6 +425,18 @@ public class CreateTableNode extends DDLStatementNode", "removed": [] } ] } ]
derby-DERBY-2534-0cab10f1
I am committing an intermediate patch for language based ordering. This patch is also attached to DERBY-2534 as DERBY2534_getValue_On_StringDataValue_v1_diff.txt. The patch adds a new api to StringDataValue interface and the new api looks as follows public StringDataValue getValue(RuleBasedCollator collatorForComparison); The new api will be needed in quite a few different places. 2 distinct uses that I can see at this point are 1)Store will have a format id and collation type when it is trying to construct a DVD template. Using the formatid, we will first always get the base class DVD for char datatypes namely SQLChar, SQLVarchar, SQLLongvarchar or SQLClob. But, if the collation type is not 0 ie it is not UCS_BASIC, then we want to use Collation sensitive DVDs of base char DVDs because we want to use the passed Collator for collation rather than the default UCS_BASIC Collator. The collation sensitive DVDs of char datatypes are CollatorSQLChar, CollatorSQLVarchar, CollatorSQLLongvarchar and CollatorSQLClob. In order to derive these collation sensitive DVDs of character datatypes, we will use this new api called getValue on base character DVDs. The getValue method will have the Collator object as parameter to it. If the Collator object is null, then we can continue to use the base DVD. But if the Collator object is not null, then we want to construct collation sensitive DVD. The new api on StringDataValue will help achieve this behavior. 2)Another place which I can envision using this new api is in DataTypeDescriptor.getNull() method which returns a DVD. Currently, the implementation of this method looks as follows public DataValueDescriptor getNull() { return typeId.getNull(); } So, if the typeid of DTD is character data type, this method will always return base char DVD, no matter what is the collation type of the DTD. But, if the DTD has a territory based collation set for it, then this method should return collation sensitive char DVD. This functionality can be achieved by using the new api on StringDataValue. I do not anticipate this new method ever getting called on collation sensitive DVDs in Derby 10.3 In future, when Derby will start supporting SQL standard COLLATE clause, this method might get called on the collation sensitive DVDs but for Derby 10.3, the new api in collation sensitive DVDs is just a place holder. Another change to note is I have changed all the collation sensitive subclasses to have their method setCollator changed from private to protected. This is so that the getValue method from their correspoding base classes can call the setCollator method on subclasses. The files changed by this commit are svn stat -q M java\engine\org\apache\derby\iapi\types\SQLLongvarchar.java M java\engine\org\apache\derby\iapi\types\StringDataValue.java M java\engine\org\apache\derby\iapi\types\CollatorSQLChar.java M java\engine\org\apache\derby\iapi\types\CollatorSQLClob.java M java\engine\org\apache\derby\iapi\types\CollatorSQLVarchar.java M java\engine\org\apache\derby\iapi\types\SQLChar.java M java\engine\org\apache\derby\iapi\types\SQLClob.java M java\engine\org\apache\derby\iapi\types\SQLVarchar.java M java\engine\org\apache\derby\iapi\types\CollatorSQLLongvarchar.java The code compiles ok with my changes. None of the tests should get impacted because currently, this new api on StringDataValue is not called by any other code in Derby. If anyone has any feedback, please let me know. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@526668 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/types/CollatorSQLChar.java", "hunks": [ { "added": [ "\tprotected void setCollator(RuleBasedCollator collatorForCharacterDatatypes)" ], "header": "@@ -65,7 +65,7 @@ public class CollatorSQLChar extends SQLChar implements CollationElementsInterfa", "removed": [ "\tprivate void setCollator(RuleBasedCollator collatorForCharacterDatatypes)" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/types/CollatorSQLClob.java", "hunks": [ { "added": [ "\tprotected void setCollator(RuleBasedCollator collatorForCharacterDatatypes)" ], "header": "@@ -65,7 +65,7 @@ public class CollatorSQLClob extends SQLClob implements CollationElementsInterfa", "removed": [ "\tprivate void setCollator(RuleBasedCollator collatorForCharacterDatatypes)" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/types/CollatorSQLLongvarchar.java", "hunks": [ { "added": [ "\tprotected void setCollator(RuleBasedCollator collatorForCharacterDatatypes)" ], "header": "@@ -65,7 +65,7 @@ public class CollatorSQLLongvarchar extends SQLLongvarchar implements CollationE", "removed": [ "\tprivate void setCollator(RuleBasedCollator collatorForCharacterDatatypes)" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/types/CollatorSQLVarchar.java", "hunks": [ { "added": [ "\tprotected void setCollator(RuleBasedCollator collatorForCharacterDatatypes)" ], "header": "@@ -65,7 +65,7 @@ public class CollatorSQLVarchar extends SQLVarchar implements CollationElementsI", "removed": [ "\tprivate void setCollator(RuleBasedCollator collatorForCharacterDatatypes)" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/types/SQLClob.java", "hunks": [ { "added": [ "import java.text.RuleBasedCollator;" ], "header": "@@ -36,6 +36,7 @@ import java.sql.Date;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/iapi/types/SQLLongvarchar.java", "hunks": [ { "added": [ "import java.text.RuleBasedCollator;", "" ], "header": "@@ -21,6 +21,8 @@", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/iapi/types/SQLVarchar.java", "hunks": [ { "added": [ "import java.text.RuleBasedCollator;", "" ], "header": "@@ -21,6 +21,8 @@", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/iapi/types/StringDataValue.java", "hunks": [ { "added": [ "import java.text.RuleBasedCollator;", "" ], "header": "@@ -23,6 +23,8 @@ package org.apache.derby.iapi.types;", "removed": [] }, { "added": [ "\t/**", "\t * Gets either SQLChar/SQLVarchar/SQLLongvarchar/SQLClob(base classes) or ", "\t * CollatorSQLChar/CollatorSQLVarchar/CollatorSQLLongvarch/CollatorSQLClob", "\t * (subclasses). Whether this method returns the base class or the subclass ", "\t * depends on the value of the RuleBasedCollator. If RuleBasedCollator is ", "\t * null, then the object returned would be baseclass otherwise it would be ", "\t * subcalss.", "\t */", "\tpublic StringDataValue getValue(RuleBasedCollator collatorForComparison);" ], "header": "@@ -175,4 +177,13 @@ public interface StringDataValue extends ConcatableDataValue", "removed": [] } ] } ]
derby-DERBY-2543-4d83c528
DERBY-2543: Wrapping the original SystemPropertyTestSetup in a DatabasePropertyTestSetup that shutdowns and restarts the engine allows the new setting for maxMemoryPerTable to be properly set for testVirtualMemoryHeap. Reenabling the test. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@528677 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-2543-9584a612
DERBY-2543: remove testVirtualMemoryHeap from the nightly runs until it can be fixed. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@528670 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-2546-2d3034fb
DERBY-2546 - avoiding running a section of the test that hits a CharConversionException with JSR169 (weme6.1) git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@545846 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-2549-f576b26f
DERBY-2549 contributed by Mayuresh Nirhali Fix null pointer when running inplace compress. Change code to correctly handle when more than 100 rows are moved from a single page. The new code returns to the caller after processing the 100 rows, and the next trip through the loop picks up the scan where it left off on that same page. Test case was added to existing test. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@540657 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/store/access/heap/HeapCompressScan.java", "hunks": [ { "added": [ " // only fetch maximum number of rows per \"group\" as the size of", " // the array. If more than one group is available on page, just", " // leave the scan on the page and the next group will come from", " // this page also.", " int max_rowcnt = row_array.length;", "" ], "header": "@@ -102,6 +102,12 @@ class HeapCompressScan", "removed": [] }, { "added": [ "" ], "header": "@@ -175,6 +181,7 @@ class HeapCompressScan", "removed": [] }, { "added": [ " int restart_slot = scan_position.current_slot;" ], "header": "@@ -191,6 +198,7 @@ class HeapCompressScan", "removed": [] }, { "added": [ " // position the scan at previous slot, so next trip" ], "header": "@@ -226,7 +234,7 @@ class HeapCompressScan", "removed": [ " // postion the scan at previous slot, so next trip" ] } ] } ]
derby-DERBY-255-43592363
This fixes DERBY-255 Closing a resultset after retriving BLOB or CLOB data > 32K, does not release locks properly. Network server/client materializes the LOB on the client and cannot differentiate getBlob from getBytes or getBinaryStream. Previously, network server would always call getBlob/getClob for any lob related call. This change changes network server to use getBytes/getString and not hold locks for any of the calls. The implementation adds a new class EXTDTAInputStream to network server to localize the stream handling for large objects. This should make it easier to adjust in the future as improvements are made in the large object handling. Because we need a length in order to write a stream, EXTDTAInputStream currently call getBytes or getString to get the length and stream out that object. This is apparently required because we cannot reset the input stream after traversing it to get the length. Future suggestions for changes to network server to handle this in a more complete way would be to: 1) Change DDMWriter.writeScalarStream to not require a length and optimize EXTDTAObjectInputStream accordingly 2) Add support for lob locators with network server. The getBlob, getClob calls would use the locators and would hold locks until the end of the transaction. git-svn-id: https://svn.apache.org/repos/asf/incubator/derby/code/trunk@179014 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/drda/org/apache/derby/impl/drda/DDMWriter.java", "hunks": [ { "added": [ "\t// TODO: Rewrite writeScalarStream to avoid passing a length.", "\t// The length is never written and not required by the DRDA spec.", "\t// Also looks like on IOException we just pad out the stream instead", "\t// of actually sending an exception. Similar code is in client, so ", "\t// should be fixed in both places." ], "header": "@@ -637,6 +637,11 @@ class DDMWriter", "removed": [] } ] }, { "file": "java/drda/org/apache/derby/impl/drda/DRDAConnThread.java", "hunks": [ { "added": [ "\t\t\t\t\t\t\tEXTDTAInputStream extdtaStream= ", "\t\t\t\t\t\t\t\tEXTDTAInputStream.getEXTDTAStream(rs, i, drdaType);", "\t\t\t\t\t\t\twriteFdocaVal(i,extdtaStream, drdaType," ], "header": "@@ -5910,11 +5910,10 @@ public class DRDAConnThread extends Thread {", "removed": [ "\t\t\t\t\t\t\twriteFdocaVal(i,rs.getBlob(i),drdaType,", "\t\t\t\t\t\t\t\t\t\t precision,scale,rs.wasNull(),stmt);", "\t\t\t\t\t\t\tbreak;", "\t\t\t\t\t\t\twriteFdocaVal(i,rs.getClob(i),drdaType," ] }, { "added": [ "\t\t\t\tcase FdocaConstants.DRDA_TYPE_NLOBCMIXED:", "\t\t\t\t\tvalLength = ((EXTDTAInputStream) val).length();" ], "header": "@@ -6550,19 +6549,13 @@ public class DRDAConnThread extends Thread {", "removed": [ "\t\t\t\t\tvalLength = ((Blob) val).length();", "\t\t\t\tcase FdocaConstants.DRDA_TYPE_NLOBCMIXED:", "\t\t\t\t\tvalLength = ((Clob) val).length();", "\t\t\t\t\t// do not send EXTDTA for lob of length 0, beetle 5967", "\t\t\t\t\tif (valLength > 0) ", "\t\t\t\t\t\tstmt.addExtDtaObject(val,index);", "\t\t\t\t\twriter.writeExtendedLength(valLength);", "\t\t\t\t\tbreak;" ] }, { "added": [ " if (o instanceof EXTDTAInputStream) {", "\t\t\tEXTDTAInputStream stream = (EXTDTAInputStream) o;", "\t\t\tlong lobLength = stream.length();", "\t\t\t\t\t\t\t\t\t (int) Math.min(lobLength,", "\t\t\t\t\t\t\t\t\t stream,", "\t\t\ttry {", "\t\t\t\t// close the stream when done", "\t\t\t\tif (stream != null)", "\t\t\t\t\tstream.close();", "\t\t\t} catch (IOException e) {", "\t\t\t\tUtil.javaException(e);", "\t\t\t}", " }" ], "header": "@@ -7125,29 +7118,24 @@ public class DRDAConnThread extends Thread {", "removed": [ " if (o instanceof Blob) {", "\t\t\tBlob b = (Blob) o;", "\t\t\tlong blobLength = b.length();", "\t\t\t\t\t\t\t\t\t (int) Math.min(blobLength,", "\t\t\t\t\t\t\t\t\t b.getBinaryStream (),", "\t\t}", "\t\telse if (o instanceof Clob) {", "\t\t\tClob c = (Clob) o;", "\t\t\tlong[] outlen = {-1};", "\t\t\tByteArrayInputStream unicodeStream =", "\t\t\t\tconvertClobToUnicodeStream(c, outlen);", "\t\t\twriter.writeScalarStream (chainedWithSameCorrelator,", "\t\t\t\t\t\t\t\t\t CodePoint.EXTDTA,", "\t\t\t\t\t\t\t\t\t (int) Math.min(outlen[0],", "\t\t\t\t\t\t\t\t\t\t\t\t\t Integer.MAX_VALUE),\t\t ", "\t\t\t\t\t\t\t\t\t unicodeStream,", "\t\t\t\t\t\t\t\t\t writeNullByte);", "\t\t}" ] } ] } ]
derby-DERBY-255-a3acaa59
Add test for verification of DERBY-255 fix that ResultSet.getString(), ResultSet.getCharacterStream(), ResultSet.getBytes(), and ResultSet.getBinaryStream() do not hold locks after the ResultSet is closed. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@554073 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/junit/Utilities.java", "hunks": [ { "added": [ " \t/**", " \t * repeatChar is used to create strings of varying lengths.", " \t * called from various tests to test edge cases and such.", " \t *", " \t * @param c character to repeat", " \t * @param repeatCount Number of times to repeat character", " \t * @return String of repeatCount characters c", " \t */", " public static String repeatChar(String c, int repeatCount)", " {", " \t char ch = c.charAt(0);", "", " \t char[] chArray = new char[repeatCount];", " \t for (int i = 0; i < repeatCount; i++)", " \t {", " \t\t chArray[i] = ch;", " \t }", "", " \t return new String(chArray);", "", " }" ], "header": "@@ -81,6 +81,27 @@ public class Utilities {", "removed": [] } ] } ]
derby-DERBY-2551-2dabeef0
DERBY-2551: Global Xid value garbled in syscs_diag.transaction_table Fix contributed by Julius Stroffek. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@531468 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/store/access/GlobalXact.java", "hunks": [ { "added": [ " if (mask < 16) {", " globalhex += \"0\" + Integer.toHexString(mask);", " } else {", " globalhex += Integer.toHexString(mask);", " }" ], "header": "@@ -83,7 +83,11 @@ public abstract class GlobalXact {", "removed": [ "\t\t\t\tglobalhex += Integer.toHexString(mask);" ] }, { "added": [ " if (mask < 16) {", " branchhex += \"0\" + Integer.toHexString(mask);", " } else {", " branchhex += Integer.toHexString(mask);", " }" ], "header": "@@ -93,7 +97,11 @@ public abstract class GlobalXact {", "removed": [ "\t\t\t\tbranchhex += Integer.toHexString(mask);" ] } ] } ]
derby-DERBY-2556-0c41dd14
DERBY-2556 Code paths for db restore do not use doPrivileged-calls, causing SecurityException Add priv blocks for File.exists() calls in restore. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@536677 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/services/monitor/StorageFactoryService.java", "hunks": [ { "added": [ "\t\t\tif(privExists(backupRoot))", "\t\t\t\tif(privExists(bserviceProp))" ], "header": "@@ -574,11 +574,11 @@ final class StorageFactoryService implements PersistentService", "removed": [ "\t\t\tif(backupRoot.exists())", "\t\t\t\tif(bserviceProp.exists())" ] }, { "added": [ " ", " /**", " * Wrap File.exists() in a priv block to avoid Security exceptions", " * @param fileToCheck", "\t * @return true if file exists, false if it does not", "\t * @throws SecurityException", "\t */", " private boolean privExists(final File fileToCheck) throws SecurityException{", " try {", " ", " Boolean exist = (Boolean) AccessController.doPrivileged(", " new PrivilegedExceptionAction()", " {", " public Object run()", " throws SecurityException", " {", " return new Boolean(fileToCheck.exists());", " }", " }); ", " return exist.booleanValue();", " }", " catch( PrivilegedActionException pae)", " {", " throw (SecurityException) pae.getException();", " }", " }", "" ], "header": "@@ -909,4 +909,31 @@ final class StorageFactoryService implements PersistentService", "removed": [] } ] } ]
derby-DERBY-2556-27c7e012
DERBY-2556: Replaced tabs with spaces and updated JavaDoc for method privExists. Patch file: derby-2556-2a_whitespace-javadoc.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@536856 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/services/monitor/StorageFactoryService.java", "hunks": [ { "added": [ " * Wrap {@link File#exists} in a priv block to avoid security exceptions.", " * <p>", " * This method allows Derby to check if a file exists even when the higher", " * layer code (application code) does not have the required privileges to", " * do so. Note that the Derby code base must be granted the appropriate", " * permissions (typically {@link java.io.FilePermission}). ", " *", " * @param fileToCheck the pathname to check the existence of", " * @return <code>true</code> if file exists, <code>false</code> if not.", " * @throws SecurityException if the required privileges to check if the file", " * exists are missing", " */" ], "header": "@@ -911,11 +911,18 @@ final class StorageFactoryService implements PersistentService", "removed": [ " * Wrap File.exists() in a priv block to avoid Security exceptions", " * @param fileToCheck", "\t * @return true if file exists, false if it does not", "\t * @throws SecurityException", "\t */" ] } ] } ]
derby-DERBY-2556-63b1dd7a
DERBY-2556: Code paths for db restore do not use doPrivileged-calls, causing SecurityException. Modified patch by correcting a few spelling errors of mine and deleted a blank line. Patch file: derby-2556-4a_alternative-patch.diff (M) Patch contributed by Kathey Marsden and Kristian Waagan. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@537735 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/services/monitor/StorageFactoryService.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.PrivilegedFileOps;" ], "header": "@@ -21,6 +21,7 @@", "removed": [] }, { "added": [ "\t\t\tif(PrivilegedFileOps.exists(backupRoot))", "\t\t\t\tif(PrivilegedFileOps.exists(bserviceProp))" ], "header": "@@ -574,11 +575,11 @@ final class StorageFactoryService implements PersistentService", "removed": [ "\t\t\tif(privExists(backupRoot))", "\t\t\t\tif(privExists(bserviceProp))" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/data/BaseDataFileFactory.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.PrivilegedFileOps;" ], "header": "@@ -86,6 +86,7 @@ import org.apache.derby.iapi.reference.SQLState;", "removed": [] }, { "added": [ " String[] bfilelist = PrivilegedFileOps.list(backupRoot);" ], "header": "@@ -2513,7 +2514,7 @@ public class BaseDataFileFactory", "removed": [ " String[] bfilelist = backupRoot.list();" ] }, { "added": [ " if(PrivilegedFileOps.exists(bsegdir) &&", " PrivilegedFileOps.isDirectory(bsegdir))" ], "header": "@@ -2523,7 +2524,8 @@ public class BaseDataFileFactory", "removed": [ " if(bsegdir.exists() && bsegdir.isDirectory())" ] } ] } ]
derby-DERBY-2556-65882228
DERBY-2556: Code paths for db restore do not use doPrivileged-calls, causing SecurityException. Removed utility class calling AccessController.doPrivileged, because it is a security hole and it is strongly discouraged by the Java docs. Adjusted the policy files as the code bases changed when the code moved into different classes. Patch file: derby-2556-5b-reworked_fix.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@691576 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/drda/org/apache/derby/impl/drda/DssTrace.java", "hunks": [ { "added": [ "import java.security.PrivilegedAction;" ], "header": "@@ -24,12 +24,10 @@ import java.io.File;", "removed": [ "import org.apache.derby.iapi.util.PrivilegedFileOps;", "", "" ] } ] }, { "file": "java/engine/org/apache/derby/impl/load/Export.java", "hunks": [ { "added": [ "import java.security.AccessController;", "import java.security.PrivilegedAction;" ], "header": "@@ -22,12 +22,11 @@", "removed": [ "import java.sql.ResultSet;", "import java.util.*; ", "import org.apache.derby.iapi.util.PrivilegedFileOps;" ] }, { "added": [ " return fileExists(file);" ], "header": "@@ -126,7 +125,7 @@ public class Export extends ExportAbstract{", "removed": [ " return PrivilegedFileOps.exists(file);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/services/monitor/StorageFactoryService.java", "hunks": [ { "added": [], "header": "@@ -21,7 +21,6 @@", "removed": [ "import org.apache.derby.iapi.util.PrivilegedFileOps;" ] }, { "added": [ "\t\t\tif (fileExists(backupRoot))", "\t\t\t\tif(fileExists(bserviceProp))" ], "header": "@@ -572,11 +571,11 @@ final class StorageFactoryService implements PersistentService", "removed": [ "\t\t\tif(PrivilegedFileOps.exists(backupRoot))", "\t\t\t\tif(PrivilegedFileOps.exists(bserviceProp))" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/data/BaseDataFileFactory.java", "hunks": [ { "added": [], "header": "@@ -22,7 +22,6 @@", "removed": [ "import org.apache.derby.iapi.reference.SQLState;" ] }, { "added": [], "header": "@@ -35,7 +34,6 @@ import org.apache.derby.impl.store.raw.data.RecordId;", "removed": [ "import org.apache.derby.iapi.services.info.ProductGenusNames;" ] }, { "added": [], "header": "@@ -50,7 +48,6 @@ import org.apache.derby.iapi.services.monitor.Monitor;", "removed": [ "import org.apache.derby.iapi.services.io.FormatIdUtil;" ] }, { "added": [], "header": "@@ -67,7 +64,6 @@ import org.apache.derby.iapi.store.raw.ContainerKey;", "removed": [ "import org.apache.derby.iapi.store.raw.RecordHandle;" ] }, { "added": [], "header": "@@ -86,7 +82,6 @@ import org.apache.derby.iapi.reference.SQLState;", "removed": [ "import org.apache.derby.iapi.util.PrivilegedFileOps;" ] }, { "added": [], "header": "@@ -95,8 +90,6 @@ import java.util.Hashtable;", "removed": [ "import java.io.FilePermission;", "import java.io.OutputStream;" ] }, { "added": [ " // Root dir of backup db", " final File backupRoot = new java.io.File(backupPath);\t\t", "", " String[] bfilelist = (String[])AccessController.doPrivileged(", " new PrivilegedAction() {", " public Object run() {", " return backupRoot.list();", " }", " });" ], "header": "@@ -2469,16 +2462,21 @@ public class BaseDataFileFactory", "removed": [ " File bsegdir; //segment directory in the backup", " File backupRoot = new java.io.File(backupPath);\t//root dir of backup db", "\t\t", " String[] bfilelist = PrivilegedFileOps.list(backupRoot);" ] }, { "added": [ " // Segment directory in the backup", " final File bsegdir = new File(backupRoot , bfilelist[i]);", " boolean bsegdirExists = ((Boolean)", " AccessController.doPrivileged(", " new PrivilegedAction() {", " public Object run() {", " return new Boolean(bsegdir.exists());", " }", " })).booleanValue();", " if (bsegdirExists) {", " // Make sure the file object points at a directory.", " boolean isDirectory = ((Boolean)", " AccessController.doPrivileged(", " new PrivilegedAction() {", " public Object run() {", " return new Boolean(bsegdir.isDirectory());", " }", " })).booleanValue();", " if (isDirectory) {", " segmentexist = true;", " break;", " }" ], "header": "@@ -2487,12 +2485,28 @@ public class BaseDataFileFactory", "removed": [ " bsegdir = new File(backupRoot , bfilelist[i]);", " if(PrivilegedFileOps.exists(bsegdir) &&", " PrivilegedFileOps.isDirectory(bsegdir))", " {", " segmentexist = true;", " break;" ] } ] } ]
derby-DERBY-2557-57cb9986
DERBY-2557 : This commit renames getInstanceUsingFormatIdAndCollationType method on DVF to getNull. It also changes the implementation of the that method so that it bypasses the InstanceGetter. This implementation is much simpler than the old one because there is no InstanceGetter involved. Lastly, the code associated with getting a DVD from a format id is moved out of DTSClassinfo into a static method on DataValueFactoryImpl. This static method will be called by DTSClassinfo. The changes made in the patch are as follows 1)DataValueFactory Changed the name of the new interface from getInstanceUsingFormatIdAndCollationType to getNull. This interface now returns a DVD rather than an Object. The functionality of the interface remains the same but the implementation has changed. 2)DataValueFactoryImpl a)Removed the class level array and the code associated with InstanceGetter. b)Added a new static method called getNullDVDWithUCS_BASICcollation(int formatId). This static method handles all the format ids associated with DVDs with the exception of Decimals. The class to be returned for Decimals depends on what VM is being used. This dependency on the VM is handled by getNullDecimal defined on DVF. But since getNullDecimal is not a static method, it can't be called by static method getNullDVDWithUCS_BASICcollation. I could go the path of defining getNullDecimal as static but that will require changes in NumericTypeCompiler.nullMethodName method. Also, all the other getNullXXX on DVF are non-static So, the code for returning the right DVD for Decimal is not in getNullDVDWithUCS_BASICcollation. Rather it is in the calling method, getNull. For other format ids associated with DVDs, getNull will check if the DVD is of type StringDataValue and the collation type is territory based and if so, then it will return ((StringDataValue)returnDVD).getValue(getCharacterCollator(collationType)); 3)DTSClassInfo This class now calls the static method in DVF to get the DVDs. But if the format id is not for a DVD, then it checks if it needs to return TypeId. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@530546 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/types/DataValueFactoryImpl.java", "hunks": [ { "added": [], "header": "@@ -79,12 +79,6 @@ abstract class DataValueFactoryImpl implements DataValueFactory, ModuleControl", "removed": [ " \t/** ", " \t * For performance purposes, cache InstanceGetters for various formatid", " \t * as we get them in getInstanceUsingFormatIdAndCollationType method.", " \t */ ", " \tprivate InstanceGetter[] instanceGettersForFormatIds;", "" ] }, { "added": [ " * @see DataValueFactory#getNull(int, int)", " public DataValueDescriptor getNull(int formatId, int collationType) ", " throws StandardException {", "", " \t//For StoredFormatIds.SQL_DECIMAL_ID, different implementations are ", " \t//required for different VMs. getNullDecimal method is not static and ", " \t//hence can't be called in the static getNullDVDWithUCS_BASICcollation", " \t//method in this class. That is why StoredFormatIds.SQL_DECIMAL_ID is ", " \t//getting handled here.", " \tif (formatId == StoredFormatIds.SQL_DECIMAL_ID)", " \t\treturn getNullDecimal(null);", "\t\telse {", "\t\t\tDataValueDescriptor returnDVD = ", "\t\t\t\tDataValueFactoryImpl.getNullDVDWithUCS_BASICcollation(formatId);", "\t\t\t//If we are dealing with default collation, then we have got the", "\t\t\t//right DVD already. Just return it.", "\t\t\tif (collationType == StringDataValue.COLLATION_TYPE_UCS_BASIC)", "\t\t\t\treturn returnDVD;\t\t\t", "\t\t\t//If we are dealing with territory based collation and returnDVD is ", "\t\t\t//of type StringDataValue, then we need to return a StringDataValue ", "\t\t\t//with territory based collation.", "\t\t\tif (returnDVD instanceof StringDataValue) ", "\t\t\t\treturn ((StringDataValue)returnDVD).getValue(getCharacterCollator(collationType));", "\t\t\telse", "\t\t\t\treturn returnDVD;\t\t\t", " }", " ", " /**", " * This method will return a DVD based on the formatId. It doesn't take", " * into account the collation that should be associated with collation", " * sensitive DVDs, which are all the character type DVDs. Such DVDs ", " * returned from this method have default UCS_BASIC collation associated", " * with them. If collation associated should be terriotry based, then that", " * needs to be handled by the caller of this method. An example of such ", " * code in the caller can be seen in DataValueFactory.getNull method.", " * ", " * Another thing to note is this method does not deal with format id", " * associated with decimal. This is because different implementation are", " * required for different VMs. This is again something that needs to be", " * handled by the caller. An example of such code in the caller can be ", " * seen in DataValueFactory.getNull method.", " * ", " * @param formatId Return a DVD based on the format id", " * @return DataValueDescriptor with default collation of UCS_BASIC ", " */", " public static DataValueDescriptor getNullDVDWithUCS_BASICcollation(int formatId){", " switch (formatId) {", " /* Wrappers */", " case StoredFormatIds.SQL_BIT_ID: return new SQLBit();", " case StoredFormatIds.SQL_BOOLEAN_ID: return new SQLBoolean();", " case StoredFormatIds.SQL_CHAR_ID: return new SQLChar();", " case StoredFormatIds.SQL_DATE_ID: return new SQLDate();", " case StoredFormatIds.SQL_DOUBLE_ID: return new SQLDouble();", " case StoredFormatIds.SQL_INTEGER_ID: return new SQLInteger();", " case StoredFormatIds.SQL_LONGINT_ID: return new SQLLongint();", " case StoredFormatIds.SQL_NATIONAL_CHAR_ID: return new SQLNationalChar();", " case StoredFormatIds.SQL_NATIONAL_LONGVARCHAR_ID: return new SQLNationalLongvarchar();", " case StoredFormatIds.SQL_NATIONAL_VARCHAR_ID: return new SQLNationalVarchar();", " case StoredFormatIds.SQL_REAL_ID: return new SQLReal();", " case StoredFormatIds.SQL_REF_ID: return new SQLRef();", " case StoredFormatIds.SQL_SMALLINT_ID: return new SQLSmallint();", " case StoredFormatIds.SQL_TIME_ID: return new SQLTime();", " case StoredFormatIds.SQL_TIMESTAMP_ID: return new SQLTimestamp();", " case StoredFormatIds.SQL_TINYINT_ID: return new SQLTinyint();", " case StoredFormatIds.SQL_VARCHAR_ID: return new SQLVarchar();", " case StoredFormatIds.SQL_LONGVARCHAR_ID: return new SQLLongvarchar();", " case StoredFormatIds.SQL_VARBIT_ID: return new SQLVarbit();", " case StoredFormatIds.SQL_LONGVARBIT_ID: return new SQLLongVarbit();", " case StoredFormatIds.SQL_USERTYPE_ID_V3: return new UserType();", " case StoredFormatIds.SQL_BLOB_ID: return new SQLBlob();", " case StoredFormatIds.SQL_CLOB_ID: return new SQLClob();", " case StoredFormatIds.SQL_NCLOB_ID: return new SQLNClob();", " case StoredFormatIds.XML_ID: return new XML();", " default:return null;", " }" ], "header": "@@ -1117,96 +1111,83 @@ abstract class DataValueFactoryImpl implements DataValueFactory, ModuleControl", "removed": [ " * @see DataValueFactory#getInstanceUsingFormatIdAndCollationType(int, int)", " public Object getInstanceUsingFormatIdAndCollationType(", " \t\tint formatId, int collationType) throws StandardException {", "\t\tString className;", "\t\tint fmtIdPositionInInstanceGetterArray;", "\t\tInstanceGetter instanceGetter;", "", "\t\ttry {", "\t\t\tfmtIdPositionInInstanceGetterArray = ", "\t\t\t\tformatId - StoredFormatIds.MIN_TWO_BYTE_FORMAT_ID;", "\t\t\t//If this is the first time this method is getting called, then", "\t\t\t//instanceGettersForFormatIds will be null. If so, allocate it.", "\t\t\tif (instanceGettersForFormatIds == null) {", "\t\t\t\tinstanceGettersForFormatIds = new InstanceGetter[RegisteredFormatIds.TwoByte.length];", "\t\t\t}", "\t\t\t//Check if we have already called this method for the passed format", "\t\t\t//id. ", "\t\t\tinstanceGetter = ", "\t\t\t\tinstanceGettersForFormatIds[fmtIdPositionInInstanceGetterArray];", "\t\t\t//If following if is true, then this method has already been called", "\t\t\t//for the passed format id. We can just use the cached InstanceGetter", "\t\t\t//from instanceGettersForFormatIds", "\t\t\tif (instanceGetter != null) {", "\t\t\t\t//Get the object from the InstanceGetter", "\t\t\t\tObject returnObject = instanceGetter.getNewInstance();", "\t\t\t\t//If we are dealing with default collation, then we have ", "\t\t\t\t//got the right DVD already. Just return it.", "\t\t\t\tif (collationType == StringDataValue.COLLATION_TYPE_UCS_BASIC)", "\t\t\t\t\treturn returnObject;", "\t\t\t\t//If we are dealing with territory based collation and ", "\t\t\t\t//the object is of type StringDataValue, then we need to ", "\t\t\t\t//return a StringDataValue with territory based collation.", "\t\t\t\tif (returnObject instanceof StringDataValue) ", "\t\t\t\t\treturn ((StringDataValue)returnObject).getValue(getCharacterCollator(collationType));", "\t\t\t}", "\t\t\t//This is the first time this method has been called for the passed", "\t\t\t//format id and hence it's InstanceGetter is not in ", "\t\t\t//instanceGettersForFormatIds. Get the InstanceGetter's name for", "\t\t\t//this format id from RegisteredFormatIds", "\t\t\tclassName = RegisteredFormatIds.TwoByte[fmtIdPositionInInstanceGetterArray];", "\t\t} catch (ArrayIndexOutOfBoundsException aioobe) {", "\t\t\tclassName = null;", "\t\t\tfmtIdPositionInInstanceGetterArray = 0;", "\t\t} catch (Exception ite) {", "\t\t\tthrow StandardException.newException(SQLState.REGISTERED_CLASS_INSTANCE_ERROR,", "\t\t\t\t\tite, new Integer(formatId), \"XX\" /*ci.getClassName()*/);", "\t\t}", "", "\t\tif (className != null) {", "\t\t\tThrowable t;", "\t\t\ttry {", "\t\t\t\tClass clazz = Class.forName(className);", "\t\t\t\t// See if the InstanceGetter class for this format id is a ", "\t\t\t\t//FormatableInstanceGetter", "\t\t\t\tif (FormatableInstanceGetter.class.isAssignableFrom(clazz)) {", "\t\t\t\t\tFormatableInstanceGetter tfig = (FormatableInstanceGetter) clazz.newInstance();", "\t\t\t\t\ttfig.setFormatId(formatId);", "\t\t\t\t\t//Cache this InstanceGetter in instanceGettersForFormatIds", "\t\t\t\t\tinstanceGettersForFormatIds[fmtIdPositionInInstanceGetterArray] = tfig;", "\t\t\t\t\t//Get the object from the InstanceGetter", "\t\t\t\t\tObject returnObject = tfig.getNewInstance();", "\t\t\t\t\t//If we are dealing with default collation, then we have ", "\t\t\t\t\t//got the right DVD already. Just return it.", "\t\t\t\t\tif (collationType == StringDataValue.COLLATION_TYPE_UCS_BASIC)", "\t\t\t\t\t\treturn returnObject;", "\t\t\t\t\t//If we are dealing with territory based collation and ", "\t\t\t\t\t//the object is of type StringDataValue, then we need to ", "\t\t\t\t\t//return a StringDataValue with territory based collation.", "\t\t\t\t\tif (returnObject instanceof StringDataValue) ", "\t\t\t\t\t\treturn ((StringDataValue)returnObject).getValue(getCharacterCollator(collationType));", "\t\t\t\t}", "\t\t\t\t//InstanceGetter is not of the type FormatableInstanceGetter", "\t\t\t\tinstanceGettersForFormatIds[fmtIdPositionInInstanceGetterArray] = new ClassInfo(clazz);", "\t\t\t\treturn instanceGettersForFormatIds[fmtIdPositionInInstanceGetterArray].getNewInstance();", "\t\t\t} catch (ClassNotFoundException cnfe) {", "\t\t\t\tt = cnfe;", "\t\t\t} catch (IllegalAccessException iae) {", "\t\t\t\tt = iae;", "\t\t\t} catch (InstantiationException ie) {", "\t\t\t\tt = ie;", "\t\t\t} catch (LinkageError le) {", "\t\t\t\tt = le;", "\t\t\t} catch (java.lang.reflect.InvocationTargetException ite) {", "\t\t\t\tt = ite;", "\t\t\t}", "\t\t\tthrow StandardException.newException(SQLState.REGISTERED_CLASS_LINAKGE_ERROR,", "\t\t\t\tt, FormatIdUtil.formatIdToString(formatId), className);", "\t\tthrow StandardException.newException(SQLState.REGISTERED_CLASS_NONE, FormatIdUtil.formatIdToString(formatId)); \t" ] } ] } ]
derby-DERBY-2557-d19a66fe
The previous patch for DERBY-2557 (revision 529697) had a bug in it. After getting a collation sensitive DVD, the new method on DVF (getInstanceUsingFormatIdAndCollationType) was still returning the DVD with UCS_BASIC collation. This patch fixes that problem. More info on this can be found in DERBY-2557. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@529866 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/types/DataValueFactoryImpl.java", "hunks": [ { "added": [ "\t\t\t\t//return a StringDataValue with territory based collation.", "\t\t\t\t\treturn ((StringDataValue)returnObject).getValue(getCharacterCollator(collationType));" ], "header": "@@ -1149,10 +1149,9 @@ abstract class DataValueFactoryImpl implements DataValueFactory, ModuleControl", "removed": [ "\t\t\t\t//create a StringDataValue with territory based collation.", "\t\t\t\t\t((StringDataValue)returnObject).getValue(getCharacterCollator(collationType));", "\t\t\t\treturn returnObject;" ] }, { "added": [ "\t\t\t\t\t//return a StringDataValue with territory based collation.", "\t\t\t\t\t\treturn ((StringDataValue)returnObject).getValue(getCharacterCollator(collationType));" ], "header": "@@ -1186,10 +1185,9 @@ abstract class DataValueFactoryImpl implements DataValueFactory, ModuleControl", "removed": [ "\t\t\t\t\t//create a StringDataValue with territory based collation.", "\t\t\t\t\t\t((StringDataValue)returnObject).getValue(getCharacterCollator(collationType));", "\t\t\t\t\t\treturn returnObject;" ] } ] } ]
derby-DERBY-2559-0b884ead
DERBY-2559: recreating a datasource using javax.naming.Reference from a ClientDataSource40 fails. A followup patch to avoid throwing an exception when the input arguments are not as expected. The point is to allow another factory to try recreating the object, which will not happen if an exception is thrown. Null will now be returned for null objects and objects whose class does not start with "org.apache.derby.jdbc.Client". Patch file: derby-2559-2a-defenses.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@628647 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/ClientDataSourceFactory.java", "hunks": [ { "added": [ " Object ds = null;", " if (refObj instanceof javax.naming.Reference) {", " javax.naming.Reference ref = (javax.naming.Reference) refObj;", "", " // See if this object belongs to Derby.", " String className = ref.getClassName();", " if (className != null &&", " className.startsWith(\"org.apache.derby.jdbc.Client\")) {", " // Create the proper data source object shell.", " ds = Class.forName(className).newInstance();", "", " // Fill in the data source object shell with values from the", " // jndi reference.", " ClientDataSourceFactory.setBeanProperties(ds, ref);", " }", " }" ], "header": "@@ -73,14 +73,22 @@ public class ClientDataSourceFactory implements javax.naming.spi.ObjectFactory {", "removed": [ " javax.naming.Reference ref = (javax.naming.Reference) refObj;", "", " // Create the proper data source object shell.", " Object ds = Class.forName(ref.getClassName()).newInstance();", "", " // Fill in the data source object shell with values from the jndi reference.", " ClientDataSourceFactory.setBeanProperties(ds, ref);", "" ] } ] } ]
derby-DERBY-2559-a8d30d78
DERBY-2559: recreating a datasource using javax.naming.Reference from a ClientDataSource40 fails. Incremental commit, which enables the factory to create all Derby (client) datasources. However, some mechanisms to avoid throwing exceptions must be added, for instance if a null object is passed. The test that was disabled has been enabled again. Patch file: derby-2559-1a.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@617492 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/ClientDataSourceFactory.java", "hunks": [ { "added": [], "header": "@@ -26,11 +26,6 @@ import java.util.Enumeration;", "removed": [ "import org.apache.derby.jdbc.ClientBaseDataSource;", "", "import org.apache.derby.jdbc.ClientConnectionPoolDataSource;", "import org.apache.derby.jdbc.ClientDataSource;", "import org.apache.derby.jdbc.ClientXADataSource;" ] } ] } ]
derby-DERBY-2559-af931719
DERBY-2559: recreating a datasource using javax.naming.Reference from a ClientDataSource40 fails. Documentation / JavaDoc changes (contents and formatting). Patch file: derby-2559-3b-documentation.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@628654 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/ClientDataSourceFactory.java", "hunks": [ { "added": [ " * The data source factory for Derby client driver data sources.", " * <p>", " * This factory reconstructs a Derby data source object when it is retrieved", " * from JNDI. References are needed since many naming services don't have the", " * ability to store Java objects in their serialized form. When a data source", " * object is bound in this type of naming service the", " * {@link javax.naming.Reference} for that object is actually stored by the", " * JNDI implementation, not the data source object itself.", " * <p>", " * A JNDI administrator is responsible for making sure that both the object", " * factory and data source implementation classes provided by a JDBC driver", " * vendor are accessible to the JNDI service provider at runtime.", " * <p>", " * An object factory implements the {@link javax.naming.spi.ObjectFactory}", " * interface. This interface contains a single method, {@code getObjectInstance}", " * which is called by a JNDI service provider to reconstruct an object when that", " * object is retrieved from JNDI. A JDBC driver vendor should provide an object", " * factory as part of their JDBC 2.0 product.", " * @see org.apache.derby.jdbc.ClientConnectionPoolDataSource", " * @see org.apache.derby.jdbc.ClientXADataSource", " public ClientDataSourceFactory() {}", " * Reconstructs a Derby client-driver data source object from a JNDI data", " * source reference.", " * <p>", " * The {@code getObjectInstance} method is passed a reference that", " * corresponds to the object being retrieved as its first parameter. The", " * other parameters are optional in the case of JDBC data source objects.", " * The object factory should use the information contained in the reference", " * to reconstruct the data source. If for some reason, a data source", " * object cannot be reconstructed from the reference, a value of", " * {@code null} may be returned. This allows other object factories that may", " * be registered in JNDI to be tried. If an exception is thrown then no", " * other object factories are tried.", " * @param refObj the possibly {@code null} object containing location or", " * reference information that can be used in creating an object", " * @param name the name of this object relative to {@code nameContext}, or", " * {@code null} if no name is specified", " * @param nameContext context relative to which the name parameter is", " * specified, or {@code null} if name is relative to the default", " * initial context.", " * @param environment possibly {@code null} environment that is used in", " * creating the object.", " * @return Object created, or {@code null} if no attempt to create the", " * object is made.", " * @throws Exception if recreating the object fails" ], "header": "@@ -28,46 +28,60 @@ import javax.naming.RefAddr;", "removed": [ " * The data source factory currrently for ClientDataSource only. This factory will support XA and pooling-enabled data", " * sources in the future.", " * <p/>", " * This factory reconstructs a DERBY simple data source object when it is retrieved from JNDI. References are needed", " * since many naming services don't have the ability to store Java objects in their serialized form. When a data source", " * object is bound in this type of naming service the Reference for that object is actually stored by the JNDI", " * implementation, not the data source object itself.", " * <p/>", " * A JNDI administrator is responsible for making sure that both the object factory and data source implementation", " * classes provided by a JDBC driver vendor are accessible to the JNDI service provider at runtime.", " * <p/>", " * An object factory implements the javax.naming.spi.ObjectFactory interface. This interface contains a single method,", " * getObjectInstance, which is called by a JNDI service provider to reconstruct an object when that object is retrieved", " * from JNDI. A JDBC driver vendor should provide an object factory as part of their JDBC 2.0 product.", " public ClientDataSourceFactory() {", " }", " * Reconstructs a ClientDataSource object from a JNDI data source reference.", " * <p/>", " * The getObjectInstance() method is passed a reference that corresponds to the object being retrieved as its first", " * parameter. The other parameters are optional in the case of JDBC data source objects. The object factory should", " * use the information contained in the reference to reconstruct the data source. If for some reason, a data source", " * object cannot be reconstructed from the reference, a value of null may be returned. This allows other object", " * factories that may be registered in JNDI to be tried. If an exception is thrown then no other object factories", " * are tried.", " * @param refObj The possibly null object containing location or reference information that can be used in", " * creating an object.", " * @param name The name of this object relative to nameContext, or null if no name is specified.", " * @param nameContext Context relative to which the name parameter is specified, or null if name is relative to the", " * default initial context.", " * @param environment Possibly null environment that is used in creating the object.", " * @return object created; null if an object cannot be created" ] }, { "added": [ " /**" ], "header": "@@ -101,7 +115,7 @@ public class ClientDataSourceFactory implements javax.naming.spi.ObjectFactory {", "removed": [ " /*" ] } ] } ]
derby-DERBY-2560-fc87a439
DERBY-2560 Missing asserts in ProcedureInTriggerTest fix missing asserts and enable for network server. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@529801 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-2564-6d8a24d9
DERBY-2564 calls to Thread.interrupt need privilege blocks Changed calls to Thread.interrupt to occur in privilege blocks. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@595861 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/drda/org/apache/derby/impl/drda/NetworkServerControlImpl.java", "hunks": [ { "added": [ "import java.security.PrivilegedAction;" ], "header": "@@ -42,6 +42,7 @@ import javax.net.ssl.SSLServerSocketFactory;", "removed": [] }, { "added": [ "\t\tfinal ClientThread clientThread =\t " ], "header": "@@ -733,7 +734,7 @@ public final class NetworkServerControlImpl {", "removed": [ "\t\tClientThread clientThread =\t " ] }, { "added": [ " ", " AccessController.doPrivileged(", " new PrivilegedAction() {", " public Object run() {", " // Need to interrupt the memcheck thread if it is sleeping.", " if (mc != null)", " mc.interrupt();", "", " //interrupt client thread", " clientThread.interrupt();", "", " return null;", " }", " });", "\t\t" ], "header": "@@ -756,13 +757,21 @@ public final class NetworkServerControlImpl {", "removed": [ "\t\t// Need to interrupt the memcheck thread if it is sleeping.", "\t\tif (mc != null)", "\t\t\tmc.interrupt();", "", "\t\t//interrupt client thread", "\t\tclientThread.interrupt();", "" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/jdbc/DRDAServerStarter.java", "hunks": [ { "added": [ "import java.security.PrivilegedAction;" ], "header": "@@ -34,6 +34,7 @@ import java.lang.reflect.Method;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/iapi/services/context/ContextService.java", "hunks": [ { "added": [ "import java.security.AccessController;", "import java.security.PrivilegedAction;" ], "header": "@@ -25,6 +25,8 @@ import org.apache.derby.iapi.services.monitor.Monitor;", "removed": [] }, { "added": [ " final Thread fActive = active;", " {", " AccessController.doPrivileged(", " new PrivilegedAction() {", " public Object run() {", " fActive.interrupt();", " return null;", " }", " });", " }" ], "header": "@@ -556,8 +558,17 @@ public final class ContextService //OLD extends Hashtable", "removed": [ "\t\t\t\t\tactive.interrupt();" ] } ] } ]
derby-DERBY-2569-2bb198ae
DERBY-2569 Basically, this commit moves the logic for comparable method from various TypeCompiler implementations into DTD. This is because now we need collation information also to determine if 2 types are comparable or not and that information is not available to TypeCompilers. In addition, ofcourse, all the callers of TypeCompiler's comparable method now call DTD.comparable. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@530910 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/types/DataTypeDescriptor.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.reference.JDBC30Translation;" ], "header": "@@ -42,6 +42,7 @@ import org.apache.derby.iapi.services.loader.ClassFactory;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/BinaryComparisonOperatorNode.java", "hunks": [ { "added": [ " boolean cmp = leftOperand.getTypeServices().comparable(", " \t\trightOperand.getTypeServices(),", "\t\t\t\tforEquals,", "\t\t\t\tgetClassFactory());" ], "header": "@@ -240,9 +240,10 @@ public abstract class BinaryComparisonOperatorNode extends BinaryOperatorNode", "removed": [ " boolean cmp = leftOperand.getTypeCompiler().comparable(rightType,", " forEquals,", " getClassFactory());" ] }, { "added": [ "\t\t\tif (leftOperand.getTypeServices().comparable(leftOperand.getTypeServices(),", "\t\t\t\t\tfalse, getClassFactory()))" ], "header": "@@ -431,8 +432,8 @@ public abstract class BinaryComparisonOperatorNode extends BinaryOperatorNode", "removed": [ "\t\t\tif (leftOperand.getTypeCompiler().comparable(leftTypeId, false,", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t getClassFactory()))" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/ValueNodeList.java", "hunks": [ { "added": [], "header": "@@ -291,10 +291,8 @@ public class ValueNodeList extends QueryTreeNodeVector", "removed": [ "\t\tTypeCompiler leftTC;", "\t\tleftTC = leftOperand.getTypeCompiler();" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/XMLTypeCompiler.java", "hunks": [ { "added": [], "header": "@@ -39,24 +39,6 @@ import org.apache.derby.iapi.reference.ClassName;", "removed": [ " /**", " * Tell whether this type (XML) can be compared to the given type.", " * Says SQL/XML[2003] spec:", " *", " * 4.2.2 XML comparison and assignment", " * \"XML values are not comparable.\"", " *", " * @param otherType The TypeId of the other type.", " */", " public boolean comparable(TypeId otherType,", " boolean forEquals,", " ClassFactory cs)", " {", " // An XML value cannot be compared to any type--", " // not even to other XML values.", " return false;", " }", "" ] } ] } ]
derby-DERBY-2569-2d32bab3
This is more of a code cleanup for DERBY-2569. Rather than using switch statement to check for individual format ids, this patch uses isXXX methods wherever applicable to determine if two types are comparable or not. This is because with format id checking, in future, when say another numeric type is added, we will have to modify the switch statement to look for that new numeric type's format id. Instead, if we check for isXXX() method, then the formatid checks won't have to be maintained because the new numeric type will fall into existing isXXX() umbrella. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@532627 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/types/DataTypeDescriptor.java", "hunks": [ { "added": [ "\t\t// Long types cannot be compared. ", "\t\t// XML types also fall in this window", "\t\t// Says SQL/XML[2003] spec:", "\t\t// 4.2.2 XML comparison and assignment", "\t\t// \"XML values are not comparable.\"", "\t\t// An XML value cannot be compared to any type--", "\t\t// not even to other XML values." ], "header": "@@ -850,7 +850,13 @@ public final class DataTypeDescriptor implements TypeDescriptor, Formatable", "removed": [ "\t\t// Long types cannot be compared" ] }, { "added": [ "", " \t//Numeric types are comparable to numeric types, boolean types and to ", "\t\t//comparable user types", "\t\tif (typeId.isNumericTypeId())", " \t\treturn (compareWithTypeID.isNumericTypeId() || ", "\t\t//CHAR, VARCHAR and LONGVARCHAR are comparable to strings, boolean, ", "\t\t//DATE/TIME/TIMESTAMP and to comparable user types", "\t\tif (typeId.isStringTypeId()) {", " \t\tif((compareWithTypeID.isDateTimeTimeStampTypeID() ||", " \t\t\t\tcompareWithTypeID.isBooleanTypeId()))", " \t\t\t\treturn true;", " \t\t//If both the types are string types, then we need to make sure", " \t\t//they have the same collation set on them", " \t\tif (compareWithTypeID.isStringTypeId() && typeId.isStringTypeId()) {", " \t\t\tif (getCollationDerivation() == compareWithDTD.getCollationDerivation() &&", " \t\t\t\t\tgetCollationType() == compareWithDTD.getCollationType())", " \t\t\t\treturn true;//collation matches", " \t\t\telse", " \t\t\t\treturn false;//collation does not match", " \t\t} else", " \t\t\treturn false;//can't be compared\t\t\t", "\t\t}", "", " \t//Are comparable to other bit types and comparable user types", "\t\tif (typeId.isBitTypeId()) ", " \treturn (compareWithTypeID.isBitTypeId()); ", "\t\t", "\t\t//Booleans are comparable to Boolean, string, numeric and to ", "\t\t//comparable user types ", "\t\tif (typeId.isBooleanTypeId())", " \t\treturn (compareWithTypeID.getSQLTypeName().equals(typeId.getSQLTypeName()) ||", " \t\t\t\tcompareWithTypeID.isStringTypeId() ||", " \t\t\t\tcompareWithTypeID.isNumericTypeId()); ", "", "\t\t//Dates are comparable to dates, strings and to comparable", "\t\t//user types.", "\t\tif (typeId.getJDBCTypeId() == Types.DATE)", " \t\tif (compareWithJDBCTypeId == Types.DATE || ", " \t\t\t\tcompareWithTypeID.isStringTypeId())", " \t\t\treturn true;", " \t\telse", " \t\t\treturn false;", "", " \t//Times are comparable to times, strings and to comparable", "\t\t//user types.", "\t\tif (typeId.getJDBCTypeId() == Types.TIME)", " \t\tif (compareWithJDBCTypeId == Types.TIME || ", " \t\t\t\tcompareWithTypeID.isStringTypeId())", " \t\t\treturn true;", " \t\telse", " \t\t\treturn false;", "", " \t//Timestamps are comparable to timestamps, strings and to", "\t\t//comparable user types.", "\t\tif (typeId.getJDBCTypeId() == Types.TIMESTAMP)", " \t\tif (compareWithJDBCTypeId == Types.TIMESTAMP || ", " \t\t\t\tcompareWithTypeID.isStringTypeId())", " \t\t\treturn true;", " \t\telse", " \t\t\treturn false;", "", "\t\t//User types are comparable to other user types only if", "\t\t//(for now) they are the same type and are being used to", "\t\t//implement some JDBC type. This is sufficient for", "\t\t//date/time types; it may be generalized later for e.g.", "\t\t//comparison of any user type with one of its subtypes.", "\t\tif (typeId.isUserDefinedTypeId() || typeId.getJDBCTypeId() == Types.OTHER) {", " \tif (forEquals)", " \t\treturn true;", " \ttry {", " \t", " \t\tClass thisClass = cf.getClassInspector().getClass(", "\t\t\t\ttypeId.getCorrespondingJavaTypeName());", " \t\t", " \t\treturn java.lang.Comparable.class.isAssignableFrom(thisClass);", " \t} catch (ClassNotFoundException cnfe) {", " \t\treturn false;", " \t}\t\t\t" ], "header": "@@ -864,122 +870,86 @@ public final class DataTypeDescriptor implements TypeDescriptor, Formatable", "removed": [ "\t\t", " switch (typeId.getJDBCTypeId()) ", "\t\t{", " case Types.DECIMAL:", " case Types.BIGINT:", " case Types.DOUBLE:", " case Types.INTEGER:", " case Types.NUMERIC:", " case Types.REAL:", " case Types.SMALLINT:", " case Types.TINYINT:", " \t// Numeric types are comparable to numeric types, boolean ", " \t//types and to comparable user types", " \t\treturn (compareWithTypeID.isNumericTypeId() ||", " case Types.CHAR:", " case Types.LONGVARCHAR:", " case Types.VARCHAR:", " \t\t// CHAR and VARCHAR are comparable to strings, boolean, ", " \t// DATE/TIME/TIMESTAMP and to comparable user types", " \t\tif((compareWithTypeID.isDateTimeTimeStampTypeID() ||", " \t\t\t\tcompareWithTypeID.isBooleanTypeId()))", " \t\t\t\treturn true;", " \t\t//If both the types are string types, then we need to make", " \t\t//sure they have the same collation set on them", " \t\tif (compareWithTypeID.isStringTypeId() && typeId.isStringTypeId()) {", " \t\t\tif (getCollationDerivation() == compareWithDTD.getCollationDerivation() &&", " \t\t\t\t\tgetCollationType() == compareWithDTD.getCollationType())", " \t\t\t\treturn true;", " \t\t\telse", " \t\t\t\treturn false;", " \t\t} else", " \t\t\treturn false;", " \t\t", "", " case Types.BIT:", " case JDBC30Translation.SQL_TYPES_BOOLEAN:", " \t\t/* Are comparable to Boolean, string, numeric and to ", " \t\t * comparable user types */", " \t\treturn (compareWithTypeID.getSQLTypeName().equals(typeId.getSQLTypeName()) ||", " \t\t\t\tcompareWithTypeID.isStringTypeId() ||", " \t\t\t\tcompareWithTypeID.isNumericTypeId()); ", "", " case Types.DATE:", " \t/*", " \t * Dates are comparable to dates, strings and to comparable", " \t * user types.", " \t */", " \t\tif (compareWithJDBCTypeId == Types.DATE || ", " \t\t\t\tcompareWithTypeID.isStringTypeId())", " \t\t\treturn true;", " \t\telse", " \t\t\treturn false;", "", " case Types.TIME:", " \t/*", " \t * Times are comparable to times, strings and to comparable", " \t * user types.", " \t */", " \t\tif (compareWithJDBCTypeId == Types.TIME || ", " \t\t\t\tcompareWithTypeID.isStringTypeId())", " \t\t\treturn true;", " \t\telse", " \t\t\treturn false;", "", " case Types.TIMESTAMP:", " \t/*", " \t * Timestamps are comparable to timestamps, strings and to ", " \t * comparable user types.", " \t */", " \t\tif (compareWithJDBCTypeId == Types.TIMESTAMP || ", " \t\t\t\tcompareWithTypeID.isStringTypeId())", " \t\t\treturn true;", " \t\telse", " \t\t\treturn false;", "", " case Types.BINARY:", " case Types.LONGVARBINARY:", " case Types.VARBINARY:", " \t//Are comparable to other bit types and comparable user types", " \treturn (compareWithTypeID.isBitTypeId()); ", "", " case org.apache.derby.iapi.reference.JDBC20Translation.SQL_TYPES_JAVA_OBJECT:", " case Types.OTHER:", " \t/*", " \t * User types are comparable to other user types only if", " \t * (for now) they are the same type and are being used to", " \t * implement some JDBC type. This is sufficient for", " \t * date/time types; it may be generalized later for e.g.", " \t * comparison of any user type with one of its subtypes.", " \t */", " \tif (forEquals)", " \t\treturn true;", " \ttry {", " \t", " \t\tClass thisClass = cf.getClassInspector().getClass(", " \t\t\t\t\ttypeId.getCorrespondingJavaTypeName());", " \t\t", " \t\treturn java.lang.Comparable.class.isAssignableFrom(thisClass);", " \t} catch (ClassNotFoundException cnfe) {", " \t\treturn false;", " \t}", "", " case StoredFormatIds.XML_TYPE_ID:", " /*", " * Tell whether this type (XML) can be compared to the given type.", " * Says SQL/XML[2003] spec:", " *", " * 4.2.2 XML comparison and assignment", " * \"XML values are not comparable.\"", " * ", " * An XML value cannot be compared to any type--", " * not even to other XML values.", " */ ", " \treturn false;" ] } ] } ]
derby-DERBY-2570-286c99fc
DERBY-2570: Improve error reporting for release note generator--now it buffers up problems detected while parsing the individual notes. This is better than failing on the first bad note. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@547969 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/build/org/apache/derbyBuild/ReleaseNotesGenerator.java", "hunks": [ { "added": [ " private ArrayList _errors;" ], "header": "@@ -216,6 +216,7 @@ public class ReleaseNotesGenerator extends Task", "removed": [] }, { "added": [ " _errors = new ArrayList();" ], "header": "@@ -233,6 +234,7 @@ public class ReleaseNotesGenerator extends Task", "removed": [] }, { "added": [ " public void addError( String message )", " {", " _errors.add( message );", " }", "" ], "header": "@@ -240,6 +242,11 @@ public class ReleaseNotesGenerator extends Task", "removed": [] }, { "added": [ "", " public String[] getErrors()", " {", " String[] squeezed = new String[ _errors.size() ];", "", " _errors.toArray( squeezed );", "", " return squeezed;", " }" ], "header": "@@ -254,6 +261,15 @@ public class ReleaseNotesGenerator extends Task", "removed": [] }, { "added": [ " printErrors( gs );" ], "header": "@@ -366,6 +382,7 @@ public class ReleaseNotesGenerator extends Task", "removed": [] }, { "added": [ " ", "", " // skip this note if we were unable to read it", " if ( releaseNote == null )", " {", " gs.addMissingReleaseNote( issue );", " continue;", " }", " ", " String summary = getReleaseNoteSummary( gs, issue, releaseNote );", " try {" ], "header": "@@ -620,17 +637,25 @@ public class ReleaseNotesGenerator extends Task", "removed": [ " String summary = getReleaseNoteSummary( issue, releaseNote );", " if ( releaseNote != null )", " {" ] }, { "added": [ " catch (Throwable t)", " gs.addError( formatError( \"Could not read required sections out of issue \" + issue.getKey(), t ) );" ], "header": "@@ -638,9 +663,9 @@ public class ReleaseNotesGenerator extends Task", "removed": [ " else", " gs.addMissingReleaseNote( issue );" ] }, { "added": [ "", " gs.addError( formatError( \"Unable to read or parse release note for \" + issue.getKey(), e ) );", "", " return null;" ], "header": "@@ -669,9 +694,10 @@ public class ReleaseNotesGenerator extends Task", "removed": [ " ", " throw new BuildException", " ( \"Unable to read or parse release note for \" + issue.getKey() + \": \" + e.toString(), e );" ] }, { "added": [ " private String getReleaseNoteSummary( GeneratorState gs, JiraIssue issue, Document releaseNote )" ], "header": "@@ -682,7 +708,7 @@ public class ReleaseNotesGenerator extends Task", "removed": [ " private String getReleaseNoteSummary( JiraIssue issue, Document releaseNote )" ] }, { "added": [ " gs.addError( formatError( \"Badly formatted summary for \" + issue.getKey(), t ) );", " return \"Unreadable summary line\";" ], "header": "@@ -704,8 +730,8 @@ public class ReleaseNotesGenerator extends Task", "removed": [ " throw new BuildException", " ( \"Badly formatted summary for \" + issue.getKey() + \": \" + t.toString(), t );" ] }, { "added": [ " // Print errors", " * Print missing release notes" ], "header": "@@ -786,13 +812,13 @@ public class ReleaseNotesGenerator extends Task", "removed": [ " // Print missing Release Notes", " * Build the Overview section." ] }, { "added": [ " println( \"The following JIRA issues still need release notes or the release notes provided are unreadable:\" );" ], "header": "@@ -803,7 +829,7 @@ public class ReleaseNotesGenerator extends Task", "removed": [ " println( \"The following JIRA issues still need release notes:\" );" ] }, { "added": [ " /**", " * <p>", " * Print errors.", " * </p>", " */", " private void printErrors( GeneratorState gs )", " throws Exception", " {", " String[] errors = gs.getErrors();", " int count = errors.length;", "", " if ( count > 0 )", " {", " println( \"The following other errors occurred:\" );", "", " for ( int i = 0; i < count; i++ )", " {", " String error = errors[ i ];", " ", " println( \"\\n\" + error );", " }", " }", " }", " " ], "header": "@@ -814,6 +840,30 @@ public class ReleaseNotesGenerator extends Task", "removed": [] }, { "added": [ " /**", " * <p>", " * Format an error for later reporting.", " * </p>", " */", " private String formatError( String text, Throwable t )", " {", " text = text + \": \" + t.toString() + \"\\n\" + stringifyStackTrace( t );", "", " return text;", " }", "", " /**", " * <p>", " * Print a stack trace as a string.", " * </p>", " */", " private String stringifyStackTrace( Throwable t )", " {", " StringWriter sw = new StringWriter();", " PrintWriter pw = new PrintWriter( sw, true );", "", " t.printStackTrace( pw );", " pw.flush();", " sw.flush();", "", " return sw.toString(); ", " }", "", " ////////////////////////////////////////////////////////" ], "header": "@@ -1330,7 +1380,36 @@ public class ReleaseNotesGenerator extends Task", "removed": [ " ////////////////////////////////////////////////////////" ] } ] } ]
derby-DERBY-2570-2981b190
DERBY-2570: Create a lint tool which people can use to satisfy themselves that their release notes can be digested by the release note generator. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@547982 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/build/org/apache/derbyBuild/ReleaseNotesGenerator.java", "hunks": [ { "added": [ " private ReleaseNoteReader _releaseNoteReader;" ], "header": "@@ -214,6 +214,7 @@ public class ReleaseNotesGenerator extends Task", "removed": [] }, { "added": [ "", " _releaseNoteReader = new ReleaseNoteReader( documentBuilder );" ], "header": "@@ -235,6 +236,8 @@ public class ReleaseNotesGenerator extends Task", "removed": [] }, { "added": [ " public ReleaseNoteReader getReleaseNoteReader() { return _releaseNoteReader; }" ], "header": "@@ -252,6 +255,7 @@ public class ReleaseNotesGenerator extends Task", "removed": [] }, { "added": [ " ReleaseNoteReader releaseNoteReader = gs.getReleaseNoteReader();" ], "header": "@@ -621,6 +625,7 @@ public class ReleaseNotesGenerator extends Task", "removed": [] }, { "added": [ " Document releaseNote = null;", "", " try {", " releaseNote = getReleaseNote( gs, issue );", " }", " catch (Throwable t)", " {", " gs.addError( formatError( \"Unable to read or parse release note for \" + issue.getKey(), t ) );", " }" ], "header": "@@ -638,7 +643,15 @@ public class ReleaseNotesGenerator extends Task", "removed": [ " Document releaseNote = getReleaseNote( gs, issue );" ] }, { "added": [ " String summary = null;", "", " try {", " summary = releaseNoteReader.getReleaseNoteSummary( releaseNote );", " }", " catch (Throwable t)", " {", " gs.addError( formatError( \"Badly formatted summary for \" + issue.getKey(), t ) );", " summary = \"Unreadable summary line\";", " }", " " ], "header": "@@ -648,7 +661,17 @@ public class ReleaseNotesGenerator extends Task", "removed": [ " String summary = getReleaseNoteSummary( gs, issue, releaseNote );" ] }, { "added": [ " Element details = releaseNoteReader.getReleaseNoteDetails( releaseNote );" ], "header": "@@ -656,8 +679,7 @@ public class ReleaseNotesGenerator extends Task", "removed": [ " Element root = releaseNote.getDocumentElement();", " Element details = getFirstChild( root, BODY );" ] }, { "added": [ " " ], "header": "@@ -671,6 +693,7 @@ public class ReleaseNotesGenerator extends Task", "removed": [] }, { "added": [ " URL url = null;", " InputStream is = null;", " ", " url = new URL( issue.getReleaseNoteAddress() );", " is = url.openStream();", " catch (Throwable t)", " processThrowable( t );", " Document doc = gs.getReleaseNoteReader().getReleaseNote( is );", " return doc;", " else { return null; }" ], "header": "@@ -681,60 +704,24 @@ public class ReleaseNotesGenerator extends Task", "removed": [ " URL url = new URL( issue.getReleaseNoteAddress() );", "", " InputStream is = url.openStream();", " Document doc = gs.getDocumentBuilder().parse( is );", "", " is.close();", "", " return doc;", " catch (Exception e)", " processThrowable( e );", "", " gs.addError( formatError( \"Unable to read or parse release note for \" + issue.getKey(), e ) );", "", " }", " else { return null; }", " }", " /**", " * <p>", " * Get the summary for a release note", " * </p>", " */", " private String getReleaseNoteSummary( GeneratorState gs, JiraIssue issue, Document releaseNote )", " throws Exception", " {", " if ( releaseNote != null )", " {", " //", " // The release note has the following structure:", " //", " // <h4>Summary of Change</h4>", " // <p>", " // Summary text", " // </p>", " //", " try {", " Element root = releaseNote.getDocumentElement();", " Element summaryParagraph = getFirstChild( root, PARAGRAPH );", " String summaryText = squeezeText( summaryParagraph );", " return summaryText;", " }", " catch (Throwable t)", " {", " gs.addError( formatError( \"Badly formatted summary for \" + issue.getKey(), t ) );", " return \"Unreadable summary line\";", " }", " else { return \"???\"; }" ] } ] } ]
derby-DERBY-2570-3455c754
DERBY-2570: adding the JiraConnector. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@547965 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/build/org/apache/derbyBuild/JiraConnector.java", "hunks": [ { "added": [ "/*", "", " Derby - Class org.apache.derbyBuild.JiraConnector", "", " Licensed to the Apache Software Foundation (ASF) under one or more", " contributor license agreements. See the NOTICE file distributed with", " this work for additional information regarding copyright ownership.", " The ASF licenses this file to You under the Apache License, Version 2.0", " (the \"License\"); you may not use this file except in compliance with", " the License. You may obtain a copy of the License at", "", " http://www.apache.org/licenses/LICENSE-2.0", "", " Unless required by applicable law or agreed to in writing, software", " distributed under the License is distributed on an \"AS IS\" BASIS,", " WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", " See the License for the specific language governing permissions and", " limitations under the License.", "", " */", "", "package org.apache.derbyBuild;", "", "/*", " * class that will build an xml file based on the xml jira reports. ", " */", "", "import java.io.BufferedReader;", "import java.io.File;", "import java.io.FileInputStream;", "import java.io.FileWriter;", "import java.io.IOException;", "import java.io.InputStream;", "import java.io.InputStreamReader;", "import java.net.MalformedURLException;", "import java.net.URL;", "import java.net.URLConnection;", "", "public class JiraConnector {", "\tstatic String filePath=null;", "\t// filenames of files to be created.", "\tstatic String fixedBugsListFileName=\"fixedBugsList.xml\";", "\tstatic String releaseNotesListFileName=\"ReleaseNotesList.xml\";", "", "\tpublic static String jira_releaseNotesSource =", "\t\t\"http://issues.apache.org/jira/secure/IssueNavigator.jspa?view=rss\" +", "\t\t\"&pid=10594&sorter/field=issuekey&sorter/order=DESC&tempMax=50\" +", "\t\t\"&reset=true&decorator=none&customfield_12310090=\" +", "\t\t\"Existing+Application+Impact&customfield_12310090=Release+Note+Needed\";", "", "\tpublic static String jira_fixedBugsSource =", "\t\t\"http://issues.apache.org/jira/sr/jira.issueviews:\" +", "\t\t\"searchrequest-xml/temp/SearchRequest.xml?&pid=10594&resolution=1&\" +", "\t\t\"fixVersion=10.3.0.0&sorter/field=issuekey&sorter/order=DESC&\" +", "\t\t\"tempMax=1000&reset=true&decorator=none\";", "", "\t// other urls to some cute jira reports in xml.", "\t// all ", "\t// (warning: avoid using this - it's tough on apache infrastructure.", "\t// public static String jira_report=\"http://issues.apache.org/jira/secure/IssueNavigator.jspa?view=rss&pid=10594&sorter/field=issuekey&sorter/order=DESC&tempMax=5000&reset=true&decorator=none\";", "\t// all open bugs", "\t// public static String jira_BUG_OPEN=\"http://issues.apache.org/jira/secure/IssueNavigator.jspa?view=rss&pid=10594&types=1&statusIds=1&sorter/field=issuekey&sorter/order=DESC&tempMax=1000&reset=true&decorator=none\";", "\t// one bug - the following two would be joined with in the middle a string like ", "\t// 'DERBY-225' to make the http to get xml for 1 bug.", "\t// public static String onejirabegin=\"http://issues.apache.org/jira/browse/\"; // ", "\t// public static String onejiraend=\"?decorator=none&view=rss\";", "", "\tpublic static void main(String[] args) {", "\t\ttry{", "\t\t\trefreshJiraIssues(fixedBugsListFileName, jira_fixedBugsSource);", "\t\t\trefreshJiraIssues(releaseNotesListFileName, jira_releaseNotesSource);", "\t\t}catch(IOException ex){", "\t\t\tex.printStackTrace();", "\t\t}catch(Exception exe){", "\t\t\texe.printStackTrace();", "\t\t}", "\t}", "", "\tpublic static void refreshJiraIssues(String fileName, String stream) throws Exception {", "\t\tString sep = System.getProperty(\"file.separator\");", "\t\tfilePath = System.getProperty(\"user.dir\") + sep + fileName; ", "\t\tgetXMLStreamAndFile(fileName, stream);", "\t}", "", "\tprivate static void getXMLStreamAndFile(String fileName, String stream) throws IOException {", "\t\tFileInputStream fins=null;", "\t\tString XMLurl = stream;", "\t\ttry{", "\t\t\tBufferedReader in = ", "\t\t\t\tnew BufferedReader( new InputStreamReader(getXMLStream(XMLurl)));", "\t\t\tString inputLine;", "\t\t\tFile file=new File(filePath);", "\t\t\tFileWriter fw=new FileWriter(file);", "\t\t\twhile ((inputLine = in.readLine()) != null)", "\t\t\t{", "\t\t\t\tfw.write(inputLine);", "\t\t\t}", "\t\t\tin.close();", "\t\t\tfw.close();", "\t\t\tSystem.out.println(\"A new Jira XML File created: \"+file);", "\t\t}catch(IOException e){", "\t\t\t//e.printStackTrace();", "\t\t\tthrow e;", "\t\t}", "\t}", "", "\tpublic static InputStream getXMLStream(String XMLurl) throws MalformedURLException, IOException {", "\t\tURL url= new URL(XMLurl);", "\t\tSystem.out.println(\"Accessing url: \" + XMLurl);", "\t\tURLConnection jiraSite = url.openConnection();", "\t\treturn jiraSite.getInputStream();", "\t}", "}" ], "header": "@@ -0,0 +1,113 @@", "removed": [] } ] } ]
derby-DERBY-2570-3a7cee20
DERBY-2570: In the summary section, treat <releaseID/> and <previousReleaseID/> as variables which the ReleaseNotesGenerator substitutes with appropriate values. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@548432 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/build/org/apache/derbyBuild/ReleaseNotesGenerator.java", "hunks": [ { "added": [ " replaceVariables( gs );" ], "header": "@@ -381,6 +381,7 @@ public class ReleaseNotesGenerator extends Task", "removed": [] }, { "added": [ " //////////////////////////////////", " //", " // REPLACE VARIABLES", " //", " //////////////////////////////////", "", " /**", " * <p>", " * Replace the known parameters with their corresponding text values.", " * </p>", " */", " private void replaceVariables( GeneratorState gs )", " throws Exception", " {", " Document pamphlet = gs.getPamphlet();", "", " replaceTag( pamphlet, SUM_RELEASE_ID, getReleaseID( gs ) );", " replaceTag( pamphlet, SUM_PREVIOUS_RELEASE_ID, getPreviousReleaseID( gs ) );", " }", " " ], "header": "@@ -774,6 +775,26 @@ public class ReleaseNotesGenerator extends Task", "removed": [] } ] } ]
derby-DERBY-2570-4c14d1b0
DERBY-2570: Construct nice link text from summary paragraphs which have interesting markup in them. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@548781 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/build/org/apache/derbyBuild/ReleaseNoteReader.java", "hunks": [ { "added": [ " Element summary = me.getReleaseNoteSummary( doc );" ], "header": "@@ -105,7 +105,7 @@ public class ReleaseNoteReader", "removed": [ " String summary = me.getReleaseNoteSummary( doc );" ] }, { "added": [ " * Get the summary paragraph for a release note", " public Element getReleaseNoteSummary( Document releaseNote )" ], "header": "@@ -136,10 +136,10 @@ public class ReleaseNoteReader", "removed": [ " * Get the summary for a release note", " public String getReleaseNoteSummary( Document releaseNote )" ] } ] }, { "file": "java/build/org/apache/derbyBuild/ReleaseNotesGenerator.java", "hunks": [ { "added": [ " createSection( body, MAIN_SECTION_LEVEL, toc, OVERVIEW_SECTION, OVERVIEW_SECTION );", " createSection( body, MAIN_SECTION_LEVEL, toc, NEW_FEATURES_SECTION, NEW_FEATURES_SECTION );", " createSection( body, MAIN_SECTION_LEVEL, toc, BUG_FIXES_SECTION, BUG_FIXES_SECTION );", " createSection( body, MAIN_SECTION_LEVEL, toc, ISSUES_SECTION, ISSUES_SECTION );", " createSection( body, MAIN_SECTION_LEVEL, toc, BUILD_ENVIRONMENT_SECTION, BUILD_ENVIRONMENT_SECTION );" ], "header": "@@ -477,11 +477,11 @@ public class ReleaseNotesGenerator extends Task", "removed": [ " createSection( body, MAIN_SECTION_LEVEL, toc, OVERVIEW_SECTION, null );", " createSection( body, MAIN_SECTION_LEVEL, toc, NEW_FEATURES_SECTION, null );", " createSection( body, MAIN_SECTION_LEVEL, toc, BUG_FIXES_SECTION, null );", " createSection( body, MAIN_SECTION_LEVEL, toc, ISSUES_SECTION, null );", " createSection( body, MAIN_SECTION_LEVEL, toc, BUILD_ENVIRONMENT_SECTION, null );" ] }, { "added": [ " Node summaryText = null;", " summaryText = releaseNoteReader.getReleaseNoteSummary( releaseNote );", " summaryText = pamphlet.createTextNode( \"Unreadable summary line\" );", " Element paragraph = pamphlet.createElement( PARAGRAPH );", "", " paragraph.appendChild( pamphlet.createTextNode( key + \": \") );", " cloneChildren( summaryText, paragraph );", " Element issueSection = createSection( issuesSection, ISSUE_DETAIL_LEVEL, toc, key, paragraph );" ], "header": "@@ -662,22 +662,25 @@ public class ReleaseNotesGenerator extends Task", "removed": [ " String summary = null;", " summary = releaseNoteReader.getReleaseNoteSummary( releaseNote );", " summary = \"Unreadable summary line\";", " String tocEntry = key + \": \" + summary;", " Element issueSection = createSection( issuesSection, ISSUE_DETAIL_LEVEL, toc, key, tocEntry );" ] }, { "added": [ " Text textNode = doc.createTextNode( tocEntry );", "", " return createSection( parent, sectionLevel, toc, sectionName, textNode );", " }", " ", " /**", " * <p>", " * Create a section at the end of a parent element and link to it from a", " * table of contents.", " * </p>", " */", " private Element createSection( Element parent, int sectionLevel, Element toc, String sectionName, Node visibleText )", " throws Exception", " {", " Document doc = parent.getOwnerDocument();", " Element link = createLocalLink( doc, sectionName, visibleText );" ], "header": "@@ -947,7 +950,22 @@ public class ReleaseNotesGenerator extends Task", "removed": [ " Element link = createLocalLink( doc, sectionName, tocEntry );" ] }, { "added": [ " Text textNode = doc.createTextNode( text );", "", " return createLocalLink( doc, anchor, textNode );", " }", " ", " /**", " * <p>", " * Create a standard link to a local label.", " * </p>", " */", " private Element createLocalLink( Document doc, String anchor, Node visibleText )", " throws Exception", " {", " return createLink( doc, \"#\" + anchor, visibleText );" ], "header": "@@ -1002,9 +1020,20 @@ public class ReleaseNotesGenerator extends Task", "removed": [ " if ( text == null ) { text = anchor; }", " ", " return createLink( doc, \"#\" + anchor, text );" ] }, { "added": [ " return createLink( doc, label, textNode );", " }", " ", " /**", " * <p>", " * Create a hotlink.", " * </p>", " */", " private Element createLink( Document doc, String label, Node visibleText )", " throws Exception", " {", " Element hotlink = doc.createElement( ANCHOR );", "", " hotlink.appendChild( visibleText );" ], "header": "@@ -1015,11 +1044,23 @@ public class ReleaseNotesGenerator extends Task", "removed": [ " Element hotlink = doc.createElement( ANCHOR );", " hotlink.appendChild( textNode );" ] }, { "added": [ " private void cloneChildren( Node source, Node target )" ], "header": "@@ -1267,7 +1308,7 @@ public class ReleaseNotesGenerator extends Task", "removed": [ " private void cloneChildren( Element source, Element target )" ] } ] } ]
derby-DERBY-2570-994a2959
DERBY-2570: Make the release note generator report notes which have badly formatted summaries. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@547957 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/build/org/apache/derbyBuild/ReleaseNotesGenerator.java", "hunks": [ { "added": [ " String summary = getReleaseNoteSummary( issue, releaseNote );" ], "header": "@@ -622,7 +622,7 @@ public class ReleaseNotesGenerator extends Task", "removed": [ " String summary = getReleaseNoteSummary( releaseNote );" ] }, { "added": [ " private String getReleaseNoteSummary( JiraIssue issue, Document releaseNote )" ], "header": "@@ -682,7 +682,7 @@ public class ReleaseNotesGenerator extends Task", "removed": [ " private String getReleaseNoteSummary( Document releaseNote )" ] }, { "added": [ " try {", " Element root = releaseNote.getDocumentElement();", " Element summaryParagraph = getFirstChild( root, PARAGRAPH );", " String summaryText = squeezeText( summaryParagraph );", " return summaryText;", " }", " catch (Throwable t)", " {", " throw new BuildException", " ( \"Badly formatted summary for \" + issue.getKey() + \": \" + t.toString(), t );", " }" ], "header": "@@ -695,11 +695,18 @@ public class ReleaseNotesGenerator extends Task", "removed": [ " Element root = releaseNote.getDocumentElement();", " Element summaryParagraph = getFirstChild( root, PARAGRAPH );", " String summaryText = squeezeText( summaryParagraph );", " return summaryText;" ] } ] } ]
derby-DERBY-2579-1b482f44
DERBY-2579: AssertFailure class should use JDK's built-in chaining of exceptions git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@531822 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/shared/org/apache/derby/shared/common/sanity/AssertFailure.java", "hunks": [ { "added": [], "header": "@@ -34,8 +34,6 @@ import java.io.*;", "removed": [ "\tprivate Throwable nestedException;", "" ] }, { "added": [ "\t\tsuper(message, nestedError);" ], "header": "@@ -51,8 +49,7 @@ public class AssertFailure extends RuntimeException", "removed": [ "\t\tsuper(message);", "\t\tnestedException = nestedError;" ] }, { "added": [], "header": "@@ -62,20 +59,4 @@ public class AssertFailure extends RuntimeException", "removed": [ "", "\tpublic void printStackTrace() {", "\t\tsuper.printStackTrace();", "\t\tif (nestedException != null)", "\t\t\tnestedException.printStackTrace();", "\t}", "\tpublic void printStackTrace(PrintStream s) {", "\t\tsuper.printStackTrace(s);", "\t\tif (nestedException != null)", "\t\t\tnestedException.printStackTrace(s);", "\t}", "\tpublic void printStackTrace(PrintWriter s) {", "\t\tsuper.printStackTrace(s);", "\t\tif (nestedException != null)", "\t\t\tnestedException.printStackTrace(s);", "\t}" ] } ] } ]
derby-DERBY-258-4d80643f
Fix DERBY-258 - Changes to parse the signature in the context of the parameter types of the function or procedure. Ensures declared signature Java types are mappable to the parameter types, as specified in the SQL2003 spec part 13 Checks for various invalid formats as well. git-svn-id: https://svn.apache.org/repos/asf/incubator/derby/code/trunk@190508 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/MethodCallNode.java", "hunks": [ { "added": [ "import java.util.StringTokenizer;" ], "header": "@@ -56,6 +56,7 @@ import org.apache.derby.catalog.types.RoutineAliasInfo;", "removed": [] }, { "added": [ "\t\t", "\t\tString[]\t\tparmTypeNames;" ], "header": "@@ -671,7 +672,8 @@ public abstract class MethodCallNode extends JavaValueNode", "removed": [ "\t\tString[]\t\tparmTypeNames = getObjectSignature();" ] }, { "added": [ "", " \tint signatureOffset = methodName.indexOf('(');", " \t", " if (signatureOffset != -1) {", " \tparmTypeNames = parseValidateSignature(methodName, signatureOffset, hasDynamicResultSets);", " methodName = methodName.substring(0, signatureOffset);", " ", " // If the signature is specified then Derby resolves to exactly", " // that method. Setting this flag to false disables the method", " // resolution from automatically optionally repeating the last", " // parameter as needed.", " hasDynamicResultSets = false;", " \t ", " \tparmTypeNames = getObjectSignature();", " }", " try", " { \t" ], "header": "@@ -680,16 +682,27 @@ public abstract class MethodCallNode extends JavaValueNode", "removed": [ " try", " {", " if (methodName.indexOf('(') != -1) {", " method = classInspector.findPublicMethod(javaClassName, methodName, staticMethod);", " methodName = method.getName();", " /* First try with built-in types and mappings */" ] }, { "added": [ " // Also if the DDL specified a signature, then no alternate resolution", " if (signatureOffset == -1 && routineInfo == null) {" ], "header": "@@ -702,7 +715,8 @@ public abstract class MethodCallNode extends JavaValueNode", "removed": [ " if (routineInfo == null) {" ] }, { "added": [], "header": "@@ -720,7 +734,6 @@ public abstract class MethodCallNode extends JavaValueNode", "removed": [ " }" ] }, { "added": [ "\t\t\tif (ClassInspector.primitiveType(methodParameter))" ], "header": "@@ -829,7 +842,7 @@ public abstract class MethodCallNode extends JavaValueNode", "removed": [ "\t\t\tif (classInspector.primitiveType(methodParameter))" ] } ] } ]
derby-DERBY-2581-e7d2a423
DERBY-2581: Callers of SanityManager.THROWASSERT should chain the exceptions when possible git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@531827 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/types/BinaryDecimal.java", "hunks": [ { "added": [ "\t\t\t\tSanityManager.THROWASSERT(se);" ], "header": "@@ -167,7 +167,7 @@ abstract class BinaryDecimal extends NumberDataType", "removed": [ "\t\t\t\tSanityManager.THROWASSERT(se.toString());" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/types/SQLBinary.java", "hunks": [ { "added": [ "\t\t\t\tSanityManager.THROWASSERT(\"Unexpected exception\", se);" ], "header": "@@ -532,7 +532,7 @@ abstract class SQLBinary", "removed": [ "\t\t\t\tSanityManager.THROWASSERT(\"Unexpected exception \" + se);" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/types/SQLChar.java", "hunks": [ { "added": [ "\t\t\t\tSanityManager.THROWASSERT(\"Unexpected exception\", se);" ], "header": "@@ -940,7 +940,7 @@ readingLoop:", "removed": [ "\t\t\t\tSanityManager.THROWASSERT(\"Unexpected exception \" + se);" ] }, { "added": [ "\t\t\t\tSanityManager.THROWASSERT(\"Unexpected exception\", se);" ], "header": "@@ -2490,7 +2490,7 @@ readingLoop:", "removed": [ "\t\t\t\tSanityManager.THROWASSERT(\"Unexpected exception \" + se);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/services/bytecode/BCJava.java", "hunks": [ { "added": [ "\t\t\t\tSanityManager.THROWASSERT(\"Unexpected exception\", se);" ], "header": "@@ -254,7 +254,7 @@ public class BCJava implements JavaFactory, CacheableFactory, ModuleControl {", "removed": [ "\t\t\t\tSanityManager.THROWASSERT(\"Unexpected exception \" + se, se);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/services/bytecode/GClass.java", "hunks": [ { "added": [ "\t\t\t\tSanityManager.THROWASSERT(\"Unable to write .class file\", e);" ], "header": "@@ -88,7 +88,7 @@ public abstract class GClass implements ClassBuilder {", "removed": [ "\t\t\t\tSanityManager.THROWASSERT(\"Unable to write .class file\");" ] } ] }, { "file": "java/engine/org/apache/derby/impl/services/jce/JCECipherProvider.java", "hunks": [ { "added": [ "\t\t\t\tSanityManager.THROWASSERT(ise);" ], "header": "@@ -256,7 +256,7 @@ class JCECipherProvider implements CipherProvider", "removed": [ "\t\t\t\tSanityManager.THROWASSERT(\"Illegal state exception\");" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/GenericPreparedStatement.java", "hunks": [ { "added": [ "\t\t\t\tSanityManager.THROWASSERT(\"Unexpected exception\", se);" ], "header": "@@ -558,9 +558,7 @@ recompileOutOfDatePlan:", "removed": [ "\t\t\t\tse.printStackTrace(System.out);", "\t\t\t\tSanityManager.THROWASSERT(", "\t\t\t\t\t\"Unexpected exception - \" + se);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/BinaryRelationalOperatorNode.java", "hunks": [ { "added": [ " \"find base table number for column reference check:\", se);" ], "header": "@@ -412,8 +412,7 @@ public class BinaryRelationalOperatorNode", "removed": [ " \"find base table number for column reference check:\\n\" +", " se.getMessage());" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/NewInvocationNode.java", "hunks": [ { "added": [ "\t\t\t\t\tSanityManager.THROWASSERT(\"Unexpected exception\", cnfe);" ], "header": "@@ -223,8 +223,7 @@ public class NewInvocationNode extends MethodCallNode", "removed": [ "\t\t\t\t\tSanityManager.THROWASSERT(", "\t\t\t\t\t\t\"Unexpected exception - \" + cnfe);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/access/btree/ControlRow.java", "hunks": [ { "added": [ " \"setLeftSibling got an exception: \" +", " \"to new value \" + this.leftSiblingPageNumber, se);" ], "header": "@@ -503,10 +503,10 @@ public abstract class ControlRow implements AuxObject, TypedFormat", "removed": [ " \"setLeftSibling got an exception: \" + se +", " \"to new value \" + this.leftSiblingPageNumber);" ] }, { "added": [ " \"setRightSibling got an exception: \" +", " \"to new value \" + this.rightSiblingPageNumber, se);" ], "header": "@@ -560,10 +560,10 @@ public abstract class ControlRow implements AuxObject, TypedFormat", "removed": [ " \"setRightSibling got an exception: \" + se +", " \"to new value \" + this.rightSiblingPageNumber);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/xact/TransactionTableEntry.java", "hunks": [ { "added": [ " \"TransactionTableEntry cloneable but throws \" +", "\t\t\t\t\t\"CloneNotSupportedException\", e);" ], "header": "@@ -551,7 +551,8 @@ public class TransactionTableEntry implements Formatable, TransactionInfo, Clone", "removed": [ " \"TransactionTableEntry cloneable but throws CloneNotSupportedException \" + e);" ] } ] } ]
derby-DERBY-2583-869152fa
DERBY-2335 Made changes such that rather than having a new method in BaseTypeCompiler to push the DVD on the stack at code generation time, we use the existing method that accomplishes the same task in ExpressionClassBuilder. The junit tests have run fine with these changes and the stack trace experienced by Army in DERBY-2335 has been fixed by this fix. The reason for stack trace was that the lifetime of a BaseTypeCompiler is longer than a single class generation and I was trying to hold a reference to a declared method from MethodBuilder.describeMethod across the generated classes. This discussion can be found at http://www.nabble.com/DERBY-1478-subtask-DERBY-2583---need-help-in-debugging-stack-trace-thrown-during-code-generation-p10611184.html git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@538325 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/sql/compile/TypeCompiler.java", "hunks": [ { "added": [ "import org.apache.derby.impl.sql.compile.ExpressionClassBuilder;" ], "header": "@@ -25,6 +25,7 @@ import org.apache.derby.iapi.services.loader.ClassFactory;", "removed": [] }, { "added": [ "\t * @param eb The ExpressionClassBuilder for the class we're generating" ], "header": "@@ -178,6 +179,7 @@ public interface TypeCompiler", "removed": [] }, { "added": [ "\tvoid generateNull(ExpressionClassBuilder eb,", "\t\t\tMethodBuilder mb, int collationType, String className);" ], "header": "@@ -185,7 +187,8 @@ public interface TypeCompiler", "removed": [ "\tvoid generateNull(MethodBuilder mb, int collationType, String className);" ] }, { "added": [ "\t * @param eb The ExpressionClassBuilder for the class we're generating", "\t * @param mb\tThe method to put the expression in" ], "header": "@@ -200,7 +203,8 @@ public interface TypeCompiler", "removed": [ "\t * @param eb\tThe method to put the expression in" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/BaseTypeCompiler.java", "hunks": [ { "added": [ "\t/** @see TypeCompiler#generateNull(ExpressionClassBuilder, MethodBuilder, int, String)*/", "\tpublic void generateNull(ExpressionClassBuilder e,", "\t\t\tMethodBuilder mb, int collationType, " ], "header": "@@ -93,8 +93,9 @@ abstract class BaseTypeCompiler implements TypeCompiler", "removed": [ "\t/** @see TypeCompiler#generateNull(MethodBuilder, int, String) */", "\tpublic void generateNull(MethodBuilder mb, int collationType, " ] }, { "added": [ "\t/** @see TypeCompiler#generateDataValue(ExpressionClassBuilder, MethodBuilder, int, String, LocalField) */", "\tpublic void generateDataValue(ExpressionClassBuilder eb,", "\t\t\tMethodBuilder mb, int collationType," ], "header": "@@ -103,8 +104,9 @@ abstract class BaseTypeCompiler implements TypeCompiler", "removed": [ "\t/** @see TypeCompiler#generateDataValue(MethodBuilder, int, String, LocalField) */", "\tpublic void generateDataValue(MethodBuilder mb, int collationType," ] }, { "added": [ "\t *", "\t * @param eb The ExpressionClassBuilder for the class we're generating" ], "header": "@@ -153,7 +155,8 @@ abstract class BaseTypeCompiler implements TypeCompiler", "removed": [ "\t * " ] }, { "added": [ "\tprotected void generateCollationSensitiveDataValue(", "\t\t\tExpressionClassBuilder eb,", "\t\t\tMethodBuilder mb, " ], "header": "@@ -161,7 +164,9 @@ abstract class BaseTypeCompiler implements TypeCompiler", "removed": [ "\tprotected void generateCollationSensitiveDataValue(MethodBuilder mb, " ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/ExpressionClassBuilder.java", "hunks": [ { "added": [ "public abstract\tclass ExpressionClassBuilder implements ExpressionClassBuilderInterface" ], "header": "@@ -80,7 +80,7 @@ import java.io.Serializable;", "removed": [ "abstract\tclass ExpressionClassBuilder implements ExpressionClassBuilderInterface" ] }, { "added": [ "\t\ttc.generateNull(this, mb, collationType, getBaseClassName());" ], "header": "@@ -870,7 +870,7 @@ abstract\tclass ExpressionClassBuilder implements ExpressionClassBuilderInterface", "removed": [ "\t\ttc.generateNull(mb, collationType, getBaseClassName());" ] }, { "added": [ "\t\ttc.generateNull(this, mb, collationType, getBaseClassName());" ], "header": "@@ -883,7 +883,7 @@ abstract\tclass ExpressionClassBuilder implements ExpressionClassBuilderInterface", "removed": [ "\t\ttc.generateNull(mb, collationType, getBaseClassName());" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/NumericTypeCompiler.java", "hunks": [ { "added": [ "\t/** @see TypeCompiler#generateDataValue(ExpressionClassBuilder, MethodBuilder, int, String, LocalField) */", "\tpublic void generateDataValue(ExpressionClassBuilder eb,", "\t\t\tMethodBuilder mb, int collationType," ], "header": "@@ -530,7 +530,9 @@ public final class NumericTypeCompiler extends BaseTypeCompiler", "removed": [ "\tpublic void generateDataValue(MethodBuilder mb, int collationType," ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/UserDefinedTypeCompiler.java", "hunks": [ { "added": [ "\t/** @see TypeCompiler#generateDataValue(ExpressionClassBuilder, MethodBuilder, int, String, LocalField) */", "\tpublic void generateDataValue(ExpressionClassBuilder eb, MethodBuilder mb, int collationType,", "\t\tsuper.generateDataValue(eb, mb, collationType, className, field);" ], "header": "@@ -118,13 +118,14 @@ public class UserDefinedTypeCompiler extends BaseTypeCompiler", "removed": [ "\tpublic void generateDataValue(MethodBuilder mb, int collationType,", "\t\tsuper.generateDataValue(mb, collationType, className, field);" ] } ] } ]
derby-DERBY-2583-ba7683ca
DERBY-2583 This commit mainly addresses how code generation should account for collation type when generating DVDs for character types. In the past, there was one to one correspondance between a character DTD and the corresponding DVD that got generated for it. Starting Derby 10.3, a DTD associated with a character type can generate one of the 2 different kinds of DVDs and the DVD chosen will depend on the collation type of the DTD. Note that this applies only to character types. Character types that will have a collation of UCS_BASIC associated with them will continue to generate what was generated in Derby 10.2 ie SQLChar/SQLVarchar/SQLLongvarchar/SQLClob. But the character types that will have collation type of territory based associated with them will now generate CollatorSQLChar/CollatorSQLVarchar/CollatorSQLLongvarchar/CollatorSQLClob. This dependency of DVD type on collation type will be handled in classes ExpressionClassBuilder and in TypeCompiler implementations. CastNode, ConstantNode, NotNode, ResultColumn, SpecialFunctionNode, JavaToSQLValueNode, UnaryComparisonOperatorNode, UserTypeConstantNode, CurrentDatetimeOperatorNode, CurrentRowLocationNode, CoalesceFunctionNode, ConcatenationOperatorNode, ResultColumnList : All of these compile time classes require code generation of DVDs. These classes now need to pass the collation type of the DTD for which a DVD needs to be generated. This collation type will be used to generate code for correct DVD. The actual changes for generating the correct DVD went into CLOBTypeCompiler and CharTypeCompiler. These 2 classes will first generate a DVD for character types w/o taking the collation type into consideration. Then it will call a new method which is defined on the base class BaseTypeCompiler and that new method is called generateCollationSensitiveDataValue. This new method will check if the collation type is UCS_BASIC and if yes, then it will simply return because we have already generated code for DVD with collation type of UCS_BASIC. But if the collation type is territory based, then it will generate the additional code of DVDwithUCS_BASIC.getValue(DVF.getCharacterCollator(collationType)); This generated code will make sure that the DVD generated has territory based collator associated with it and the new DVD class will be of type CollatorSQLChar/CollatorSQLVarchar/CollatorSQLLongvarchar/CollatorSQLClob. In order to generate the additional code above, we need to have DVF on the stack. This pusing of DVF on stack will be done by the private method pushDataValueFactory defined on BaseTypeCompiler git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@532082 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/sql/compile/TypeCompiler.java", "hunks": [ { "added": [], "header": "@@ -27,7 +27,6 @@ import org.apache.derby.iapi.services.compiler.MethodBuilder;", "removed": [ "import org.apache.derby.iapi.types.DataValueDescriptor;" ] }, { "added": [ "\t * of the correct type (interfaceName()).", "\t * @param collationType For character DVDs, this will be used to determine", "\t * what Collator should be associated with the DVD which in turn will ", "\t * decide whether to generate CollatorSQLcharDVDs or SQLcharDVDs.", "\t * @param className name of the base class of the activation's hierarchy", "\tvoid generateNull(MethodBuilder mb, int collationType, String className);" ], "header": "@@ -177,13 +176,16 @@ public interface TypeCompiler", "removed": [ "\t of the correct type (interfaceName()).", "\t *", "\tvoid\t\t\tgenerateNull(MethodBuilder mb);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/BaseTypeCompiler.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.types.StringDataValue;" ], "header": "@@ -27,18 +27,14 @@ import org.apache.derby.iapi.services.loader.ClassFactory;", "removed": [ "import org.apache.derby.iapi.sql.conn.ConnectionUtil;", "import org.apache.derby.iapi.types.BitDataValue;", "import org.apache.derby.iapi.types.DataValueFactory;", "import org.apache.derby.iapi.types.DataValueDescriptor;", "import org.apache.derby.iapi.reference.SQLState;" ] }, { "added": [], "header": "@@ -46,8 +42,6 @@ import org.apache.derby.iapi.services.compiler.MethodBuilder;", "removed": [ "import java.sql.Types;", "" ] }, { "added": [ "\t/** @see TypeCompiler#generateNull(MethodBuilder, int, String) */", "\tpublic void generateNull(MethodBuilder mb, int collationType, ", "\t\t\tString className)" ], "header": "@@ -99,9 +93,9 @@ abstract class BaseTypeCompiler implements TypeCompiler", "removed": [ "\t/** @see TypeCompiler#generateNull */", "", "\tpublic void generateNull(MethodBuilder mb)" ] }, { "added": [ "\t/** @see TypeCompiler#generateDataValue(MethodBuilder, int, String, LocalField) */", "\tpublic void generateDataValue(MethodBuilder mb, int collationType,", "\t\t\tString className, LocalField field)" ], "header": "@@ -109,9 +103,9 @@ abstract class BaseTypeCompiler implements TypeCompiler", "removed": [ "\t/** @see TypeCompiler#generateDataValue */", "\tpublic void generateDataValue(MethodBuilder mb,", "\t\t\t\t\t\t\t\t\t\tLocalField field)" ] }, { "added": [], "header": "@@ -130,7 +124,6 @@ abstract class BaseTypeCompiler implements TypeCompiler", "removed": [ "" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/CLOBTypeCompiler.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.services.compiler.LocalField;", "import org.apache.derby.iapi.services.compiler.MethodBuilder;" ], "header": "@@ -21,17 +21,12 @@", "removed": [ "import org.apache.derby.iapi.reference.SQLState;", "", "import org.apache.derby.iapi.error.StandardException;", "import org.apache.derby.iapi.sql.conn.ConnectionUtil;", "", "import org.apache.derby.iapi.types.BitDataValue;", "import org.apache.derby.iapi.types.DataValueFactory;" ] }, { "added": [], "header": "@@ -40,9 +35,6 @@ import org.apache.derby.iapi.sql.compile.TypeCompiler;", "removed": [ "import java.sql.Types;", "import org.apache.derby.iapi.reference.JDBC20Translation;", "" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/CharTypeCompiler.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.services.compiler.LocalField;", "import org.apache.derby.iapi.services.compiler.MethodBuilder;" ], "header": "@@ -25,24 +25,16 @@ import org.apache.derby.iapi.services.loader.ClassFactory;", "removed": [ "import org.apache.derby.iapi.error.StandardException;", "", "import org.apache.derby.iapi.types.StringDataValue;", "import org.apache.derby.iapi.types.DataValueDescriptor;", "import org.apache.derby.iapi.reference.SQLState;", "", "import org.apache.derby.iapi.util.StringUtil;", "", "import java.sql.Types;", "import org.apache.derby.iapi.reference.JDBC20Translation;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/ConstantNode.java", "hunks": [ { "added": [ "import org.apache.derby.impl.sql.compile.ExpressionClassBuilder;", "" ], "header": "@@ -22,28 +22,20 @@", "removed": [ "import org.apache.derby.iapi.types.TypeId;", "import org.apache.derby.iapi.sql.dictionary.DataDictionary;", "import org.apache.derby.impl.sql.compile.ExpressionClassBuilder;", "", "import java.lang.reflect.Modifier;", "", "import java.sql.Date;", "import java.sql.Time;", "import java.sql.Timestamp;", "" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/ExpressionClassBuilder.java", "hunks": [ { "added": [ "\tvoid generateNull(MethodBuilder mb, TypeCompiler tc, int collationType) {", "\t\ttc.generateNull(mb, collationType, getBaseClassName());" ], "header": "@@ -867,10 +867,10 @@ abstract\tclass ExpressionClassBuilder implements ExpressionClassBuilderInterface", "removed": [ "\tvoid generateNull(MethodBuilder mb, TypeCompiler tc) {", "\t\ttc.generateNull(mb);" ] }, { "added": [ "\tvoid generateNullWithExpress(MethodBuilder mb, TypeCompiler tc, ", "\t\t\tint collationType) {", "\t\ttc.generateNull(mb, collationType, getBaseClassName());" ], "header": "@@ -878,11 +878,12 @@ abstract\tclass ExpressionClassBuilder implements ExpressionClassBuilderInterface", "removed": [ "\tvoid generateNullWithExpress(MethodBuilder mb, TypeCompiler tc) {", "\t\ttc.generateNull(mb);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/JavaToSQLValueNode.java", "hunks": [ { "added": [ "\t\t\tacb.generateNullWithExpress(mb, getTypeCompiler(), ", "\t\t\t\t\tgetTypeServices().getCollationType());" ], "header": "@@ -142,7 +142,8 @@ public class JavaToSQLValueNode extends ValueNode", "removed": [ "\t\t\tacb.generateNullWithExpress(mb, getTypeCompiler());" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/NumericTypeCompiler.java", "hunks": [ { "added": [ "\tpublic void generateDataValue(MethodBuilder mb, int collationType,", "\t\t\tString className, LocalField field)" ], "header": "@@ -530,8 +530,8 @@ public final class NumericTypeCompiler extends BaseTypeCompiler", "removed": [ "\tpublic void generateDataValue(MethodBuilder mb,", "\t\t\t\t\t\t\t\t\t\tLocalField field)" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/UnaryComparisonOperatorNode.java", "hunks": [ { "added": [ "\t\tacb.generateNull(mb, operand.getTypeCompiler(), ", "\t\t\t\toperand.getTypeServices().getCollationType());" ], "header": "@@ -301,7 +301,8 @@ public class UnaryComparisonOperatorNode extends UnaryOperatorNode", "removed": [ "\t\tacb.generateNull(mb, operand.getTypeCompiler());" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/UserTypeConstantNode.java", "hunks": [ { "added": [ "\t\t\tacb.generateNull(mb, tc, getTypeServices().getCollationType());" ], "header": "@@ -246,7 +246,7 @@ public class UserTypeConstantNode extends ConstantNode {", "removed": [ "\t\t\tacb.generateNull(mb, tc);" ] }, { "added": [ "\t\t\tacb.generateDataValue(mb, tc, getTypeServices().getCollationType(), field);" ], "header": "@@ -289,7 +289,7 @@ public class UserTypeConstantNode extends ConstantNode {", "removed": [ "\t\t\tacb.generateDataValue(mb, tc, field);" ] } ] } ]
derby-DERBY-2584-01c5d899
DERBY-2584: Creating a database with JPOX SchemaTool sometimes gives ArrayIndexOutOfBoundsException when getIndexInfo() is called Make sure meta-data queries are properly written to the database when the first attempt to compile them fails with a lock timeout. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@536516 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/sql/dictionary/SPSDescriptor.java", "hunks": [ { "added": [ "\t\t\t\t// DERBY-2584: If the first attempt to compile the query fails,", "\t\t\t\t// we need to reset initiallyCompilable to make sure the", "\t\t\t\t// prepared plan is fully stored to disk. Save the initial", "\t\t\t\t// value here.", "\t\t\t\tfinal boolean compilable = initiallyCompilable;", "" ], "header": "@@ -700,6 +700,12 @@ public class SPSDescriptor extends TupleDescriptor", "removed": [] } ] } ]
derby-DERBY-2594-763e014e
DERBY-2594: Revoking a privilege from an SQL Object should invalidate statements dependent on that object Patch contributed by Dyre Tjeldvoll. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@536767 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/sql/dictionary/ViewDescriptor.java", "hunks": [ { "added": [ "", "\t\t\t\t// When REVOKE_PRIVILEGE gets sent to a", "\t\t\t\t// TablePermsDescriptor we must also send", "\t\t\t\t// INTERNAL_RECOMPILE_REQUEST to its Dependents which", "\t\t\t\t// may be GPSs needing re-compilation. But Dependents", "\t\t\t\t// could also be ViewDescriptors, which then also need", "\t\t\t\t// to handle this event.", "\t\t\tcase DependencyManager.INTERNAL_RECOMPILE_REQUEST:" ], "header": "@@ -273,6 +273,14 @@ public final class ViewDescriptor extends TupleDescriptor", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/TablePrivilegeInfo.java", "hunks": [ { "added": [ "\t\t\t\t\tdd.getDependencyManager().invalidateFor", "\t\t\t\t\t\t(tablePermsDesc,", "\t\t\t\t\t\t DependencyManager.REVOKE_PRIVILEGE, lcc);", "", "\t\t\t\t\t// When revoking a privilege from a Table we need to", "\t\t\t\t\t// invalidate all GPSs refering to it. But GPSs aren't", "\t\t\t\t\t// Dependents of TablePermsDescr, but of the", "\t\t\t\t\t// TableDescriptor itself, so we must send", "\t\t\t\t\t// INTERNAL_RECOMPILE_REQUEST to the TableDescriptor's", "\t\t\t\t\t// Dependents.", "\t\t\t\t\tdd.getDependencyManager().invalidateFor", "\t\t\t\t\t\t(td, DependencyManager.INTERNAL_RECOMPILE_REQUEST, lcc);" ], "header": "@@ -254,7 +254,18 @@ public class TablePrivilegeInfo extends PrivilegeInfo", "removed": [ "\t\t\t\t\tdd.getDependencyManager().invalidateFor(tablePermsDesc, DependencyManager.REVOKE_PRIVILEGE, lcc);" ] } ] } ]
derby-DERBY-2595-d18c1af6
DERBY-2595: JUnit tests use getExportedKeys with table name null Rewrote invalid usage of DatabaseMetaData.getExportedKeys() in JDBC.dropSchema(). Contributed by Jørgen Løland. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@533175 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/junit/JDBC.java", "hunks": [ { "added": [ " ResultSet table_rs = dmd.getTables((String) null, schema, (String) null,", " new String[] {\"TABLE\"});", "", " while (table_rs.next()) {", " String tablename = table_rs.getString(\"TABLE_NAME\");", " rs = dmd.getExportedKeys((String) null, schema, tablename);", " while (rs.next()) {", " short keyPosition = rs.getShort(\"KEY_SEQ\");", " if (keyPosition != 1)", " continue;", " String fkName = rs.getString(\"FK_NAME\");", " // No name, probably can't happen but couldn't drop it anyway.", " if (fkName == null)", " continue;", " String fkSchema = rs.getString(\"FKTABLE_SCHEM\");", " String fkTable = rs.getString(\"FKTABLE_NAME\");", "", " String ddl = \"ALTER TABLE \" +", " JDBC.escape(fkSchema, fkTable) +", " \" DROP FOREIGN KEY \" +", " JDBC.escape(fkName);", " s.executeUpdate(ddl);", " }", " rs.close();", " table_rs.close();" ], "header": "@@ -221,26 +221,32 @@ public class JDBC {", "removed": [ " rs = dmd.getExportedKeys((String) null, schema, (String) null);", " while (rs.next())", " {", " short keyPosition = rs.getShort(\"KEY_SEQ\");", " if (keyPosition != 1)", " continue;", " String fkName = rs.getString(\"FK_NAME\");", " // No name, probably can't happen but couldn't drop it anyway.", " if (fkName == null)", " continue;", " String fkSchema = rs.getString(\"FKTABLE_SCHEM\");", " String fkTable = rs.getString(\"FKTABLE_NAME\");", " ", " String ddl = \"ALTER TABLE \" +", " JDBC.escape(fkSchema, fkTable) +", " \" DROP FOREIGN KEY \" +", " JDBC.escape(fkName);", " s.executeUpdate(ddl);", " rs.close();" ] } ] } ]
derby-DERBY-2599-2364fd8e
DERBY-2599 Committing changes explained by following paragraphs DataTypeDescriptor has a method called getDominantType which compares itself with the DTD passed as parameter to getDominantType and creates a new DTD with the dominant type of the 2 DTDs, But, while doing so, It was not setting the collation information on the new DTD based on the 2 involved DTDs. I have made changes into the method which now will set the correct collation information on the new DTD. The algorithm, as copied from the method javadoc, is as follows * If dealing with character string types, then make sure to set the * collation info on the dominant type. Following algorithm will be used * for dominant DTD's collation determination. Each of the steps of the * algorithem have been numbered in the comments below and those same * numbers are used in the actual algorithm below so it is easier to * understand and maintain. * * Step 1 * If the DTD for "this" node has the same collation derivation as the * otherDTS, then check if their collation types match too. If the * collation types match too, then DTD for dominant type will get the same * collation derivation and type. * * Step 2 * If the collation derivation for DTD for "this" node and otherDTS do not * match, then check if one of them has the collation derivation of NONE. * If that is the case, then dominant DTD will get the collation type and * derivation of DTD whose collation derivation is not NONE. * * Step 3 * If the collation derivation for DTD for "this" node and otherDTS do not * match, and none of them have the derivation of NONE then it means that * we are dealing with collation derivation of IMPLICIT and EXPLICIT and * hence the dominant DTD should get collation derivation of NONE. This is * not a possibility in Derby 10.3 because the only 2 possible collation * derivation supported are IMPLICIT and NONE. * * Step 4 * If the collation derivation for DTD for "this" node and otherDTS match, * then check if the collation types match too. If not, then the dominant * DTD should get collation derivation of NONE. Now, note that ValueNodeList has a method called getDominantTypeServices where it could deal with any number of DTDs to determine the dominant DTD. It calls DataTypeDescriptor.getDominantType on 2 DTDs at a time. At the beginning, the 2 DTDs are the first two in it's vector. It gets an intermediate dominantDTS back for those 2 DTDs. Next, it calls DataTypeDescriptor.getDominantType with the intermediate dominantDTS and the 3rd element in it's vector and so on and so forth. It is not enough to just use 2 DTDs at a time to determine the collation info for the final dominantDTS. We need to consider all the DTDs in the vector together to determine the correct collation info for the final dominantDTS. The algorithm used by ValueNodeList.getDominantTypeServices is in that method's javadoc and included here for reference * Algorithm for determining collation information * This method will check if it is dealing with character string datatypes. * If yes, then it will check if all the character string datatypes have * the same collation derivation and collation type associated with them. * If not, then the resultant DTD from this method will have collation * derivation of NONE. If yes, then the resultant DTD from this method will * have the same collation derivation and collation type as all the * character string datatypes. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@545706 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/types/DataTypeDescriptor.java", "hunks": [ { "added": [ "\t * ", "\t * If dealing with character string types, then make sure to set the", "\t * collation info on the dominant type. Following algorithm will be used ", "\t * for dominant DTD's collation determination. Each of the steps of the ", "\t * algorithem have been numbered in the comments below and those same ", "\t * numbers are used in the actual algorithm below so it is easier to ", "\t * understand and maintain.", "\t * ", "\t * Step 1", "\t * If the DTD for \"this\" node has the same collation derivation as the ", "\t * otherDTS, then check if their collation types match too. If the ", "\t * collation types match too, then DTD for dominant type will get the same ", "\t * collation derivation and type.", "\t * ", "\t * Step 2", "\t * If the collation derivation for DTD for \"this\" node and otherDTS do not ", "\t * match, then check if one of them has the collation derivation of NONE. ", "\t * If that is the case, then dominant DTD will get the collation type and ", "\t * derivation of DTD whose collation derivation is not NONE.", "\t * ", "\t * Step 3", "\t * If the collation derivation for DTD for \"this\" node and otherDTS do not ", "\t * match, and none of them have the derivation of NONE then it means that ", "\t * we are dealing with collation derivation of IMPLICIT and EXPLICIT and ", "\t * hence the dominant DTD should get collation derivation of NONE. This is ", "\t * not a possibility in Derby 10.3 because the only 2 possible collation ", "\t * derivation supported are IMPLICIT and NONE.", "\t * ", "\t * Step 4", "\t * If the collation derivation for DTD for \"this\" node and otherDTS match, ", "\t * then check if the collation types match too. If not, then the dominant ", "\t * DTD should get collation derivation of NONE. " ], "header": "@@ -484,6 +484,38 @@ public final class DataTypeDescriptor implements TypeDescriptor, Formatable", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/ValueNodeList.java", "hunks": [ { "added": [ "\t * Get the dominant DataTypeServices from the elements in the list. This", "\t * method will also set the correct collation information on the dominant", "\t * DataTypeService.", "\t * ", "\t * Algorithm for determining collation information", "\t * This method will check if it is dealing with character string datatypes." ], "header": "@@ -140,8 +140,12 @@ public class ValueNodeList extends QueryTreeNodeVector", "removed": [ "\t * Get the dominant DataTypeServices from the elements in the list. This ", "\t * method will also check if it is dealing with character string datatypes." ] }, { "added": [ "\t * sysCharColumn1 || userCharColumn" ], "header": "@@ -151,7 +155,7 @@ public class ValueNodeList extends QueryTreeNodeVector", "removed": [ "\t * sysCharColumn || userCharColumn" ] }, { "added": [ "\t * Note that this method calls DTD.getDominantType and that method returns", "\t * the dominant type of the 2 DTDs involved in this method. The method also", "\t * sets the collation info on the dominant type following the algorithm", "\t * mentioned in the comments of ", "\t * @see DataTypeDescriptor#getDominantType(DataTypeDescriptor, ClassFactory)", "\t * But when there are more than 2 DTDs involved in this ValueNodeList, we", "\t * can't determine the collation info using only 2 DTDs at a time which is", "\t * what TD.getDominantType does. Consider following eg", "\t * sysCharColumn1 || userCharColumn || sysCharColumn2", "\t * If we let the DataTypeDescriptor.getDominantType determine the collation", "\t * of the eg above, then DataTypeDescriptor.getDominantType will first set ", "\t * collation derivation of NONE for the following. This intermediate DTD is ", "\t * tracked by dominantDTS ", "\t * sysCharColumn1 || userCharColumn", "\t * Next, DataTypeDescriptor.getDominantType gets called for the intermediate", "\t * DTD (dominantDTS) and sysCharColumn2", "\t * dominantDTS || sysCharColumn2", "\t * For these two DTDs, DataTypeDescriptor.getDominantType will set ", "\t * collation type of UCS_BASIC and collation derivation of IMPLICIT. So, the", "\t * result string of the sysCharColumn1 || userCharColumn || sysCharColumn2", "\t * will have collation type of UCS_BASIC and collation derivation of ", "\t * IMPLICIT, but that is not correct. The reason for this is ", "\t * DataTypeDescriptor.getDominantType deals with only 2 DTDs at a time. To", "\t * fix this problem, we basically ignore the collation type and derivation ", "\t * picked by DataTypeDescriptor.getDominantType. Instead we let ", "\t * getDominantTypeServices use the simple algorithm listed at the top of", "\t * this method's comments to determine the collation type and derivation for ", "\t * this ValueNodeList object.", "\t * " ], "header": "@@ -166,6 +170,35 @@ public class ValueNodeList extends QueryTreeNodeVector", "removed": [] }, { "added": [ "\t\t\t//if we didn't find any collation mismatch, then resultant dominant" ], "header": "@@ -233,7 +266,7 @@ public class ValueNodeList extends QueryTreeNodeVector", "removed": [ "\t\t\t//if we didn't fine any collation mismatch, then resultant dominant" ] } ] } ]
derby-DERBY-2599-244952e1
DERBY-2599 Commiting patch attached to DERBY2599_Set_collation_for_aggregates_v1_diff.txt which does the job of setting the correct collation type and derivation for aggregates. As per SQL standard, if the operands of the aggregate methods are string operands and they do not all have the same collation derivaiton and type on them, then the resultant string operand will have collation derivaiton of NONE. One thing that is missing is as per SQL spec, in a comparison operator, as far as there is one operand with non-NONE collation derivation, the comparison should work. I do not have that part working yet. If the two operands of the comparison operator do not have the same collation, the comparison will fail. Also, if both the sides of the comparison operator have NONE collation, the current DERBY10.3 code will not catch that. Both of these issues can go as a subsequent patch. The aggregate methods that are covered by the patch are COALESCE, CONCATENATE, NULLIF, CASE git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@540201 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/types/DataTypeDescriptor.java", "hunks": [ { "added": [ "\t\t\t\t\t\t\t\t\t\t\t\tmaximumWidth,", "\t\t\t\t\t\t\t\t\t\t\t\tsource.getCollationType(),", "\t\t\t\t\t\t\t\t\t\t\t\tsource.getCollationDerivation());" ], "header": "@@ -369,7 +369,9 @@ public final class DataTypeDescriptor implements TypeDescriptor, Formatable", "removed": [ "\t\t\t\t\t\t\t\t\t\t\t\tmaximumWidth);" ] }, { "added": [ "\t\t\t\tsource.getPrecision(),", "\t\t\t\tsource.getScale(),", "\t\t\t\tisNullable,", "\t\t\t\tmaximumWidth,", "\t\t\t\tsource.getCollationType(),", "\t\t\t\tsource.getCollationDerivation());", "" ], "header": "@@ -385,8 +387,13 @@ public final class DataTypeDescriptor implements TypeDescriptor, Formatable", "removed": [ "\t\t\t\t\t\t\t\t\t\t\t\tisNullable,", "\t\t\t\t\t\t\t\t\t\t\t\tmaximumWidth);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/CastNode.java", "hunks": [ { "added": [], "header": "@@ -21,24 +21,15 @@", "removed": [ "import org.apache.derby.iapi.services.context.ContextManager;", "", "import org.apache.derby.iapi.services.monitor.Monitor;", "", "import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;", "", "import org.apache.derby.iapi.sql.compile.CompilerContext;", "import org.apache.derby.iapi.sql.dictionary.DataDictionary;", "" ] }, { "added": [], "header": "@@ -46,26 +37,18 @@ import org.apache.derby.iapi.reference.Limits;", "removed": [ "import org.apache.derby.iapi.types.DataValueFactory;", "import org.apache.derby.iapi.types.VariableSizeDataValue;", "import org.apache.derby.iapi.reference.SQLState;", "import org.apache.derby.iapi.types.DataValueDescriptor;", "", "import org.apache.derby.iapi.services.loader.ClassInspector;", "", "import org.apache.derby.iapi.sql.compile.C_NodeTypes;" ] }, { "added": [], "header": "@@ -75,14 +58,6 @@ import org.apache.derby.iapi.types.NumberDataType;", "removed": [ "import org.apache.derby.catalog.AliasInfo;", "import org.apache.derby.catalog.TypeDescriptor;", "", "import org.apache.derby.iapi.types.SQLReal;", "", "import java.sql.Date;", "import java.sql.Time;", "import java.sql.Timestamp;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/ConcatenationOperatorNode.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.types.StringDataValue;" ], "header": "@@ -27,14 +27,10 @@ import org.apache.derby.iapi.services.sanity.SanityManager;", "removed": [ "import org.apache.derby.iapi.sql.dictionary.DataDictionary;", "import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;", "import org.apache.derby.iapi.types.ConcatableDataValue;", "import org.apache.derby.iapi.types.BitDataValue;", "" ] }, { "added": [ "\t\t\t", "\t\t\tleftOperand.setType(new DataTypeDescriptor(leftType, true));", "\t\t\tif (rightOperand.getTypeId().isStringTypeId())", "\t\t\t{//collation of ? operand should be same as the other operand", "\t\t\t\tleftOperand.getTypeServices().setCollationDerivation(", "\t\t\t\t\trightOperand.getTypeServices().getCollationDerivation());", "\t\t\t\tleftOperand.getTypeServices().setCollationType(", "\t\t\t\t\trightOperand.getTypeServices().getCollationType());", "\t\t\t}" ], "header": "@@ -136,8 +132,15 @@ public class ConcatenationOperatorNode extends BinaryOperatorNode", "removed": [ "", "\t\tleftOperand.setType(new DataTypeDescriptor(leftType, true));" ] }, { "added": [ "\t\t\trightOperand.setType(", "\t\t\tif (leftOperand.getTypeId().isStringTypeId())", "\t\t\t{//collation of ? operand should be same as the other operand", "\t\t\t\trightOperand.getTypeServices().setCollationDerivation(", "\t\t\t\t\t\tleftOperand.getTypeServices().getCollationDerivation());", "\t\t\t\trightOperand.getTypeServices().setCollationType(", "\t\t\t\t\t\tleftOperand.getTypeServices().getCollationType());", "\t\t\t}" ], "header": "@@ -179,11 +182,17 @@ public class ConcatenationOperatorNode extends BinaryOperatorNode", "removed": [ "\t\t", "\t\trightOperand.setType(" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/ValueNodeList.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.types.StringDataValue;" ], "header": "@@ -25,12 +25,9 @@ import org.apache.derby.iapi.services.sanity.SanityManager;", "removed": [ "import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;", "", "import org.apache.derby.iapi.sql.dictionary.DataDictionary;", "" ] }, { "added": [ "\t * Get the dominant DataTypeServices from the elements in the list. This ", "\t * method will also check if it is dealing with character string datatypes.", "\t * If yes, then it will check if all the character string datatypes have", "\t * the same collation derivation and collation type associated with them.", "\t * If not, then the resultant DTD from this method will have collation", "\t * derivation of NONE. If yes, then the resultant DTD from this method will", "\t * have the same collation derivation and collation type as all the ", "\t * character string datatypes.", "\t * ", "\t * eg consider we are dealing with a database with territory based ", "\t * collation. Now consider following example first", "\t * sysCharColumn || userCharColumn", "\t * The result of this concatenation will have collation derivation of NONE", "\t * because the first operand has collation derivation of IMPLICIT and ", "\t * collation type of UCS_BASIC whereas the 2nd opernad has collation ", "\t * derivation of IMPLICIT and collation type of territory based. Since the", "\t * 2 operands don't have matching collaiton information, the result of this", "\t * concatenation will have collation derivation of NONE.", "\t * ", "\t * Now consider following example", "\t * sysCharColumn1 || sysCharColumn2", "\t * Since in this example, both the operands have the same collation", "\t * derivation of IMPLICIT and same collation type of UCS_BASIC, the ", "\t * resultant type will have collation derivation of IMPLICIT and collation ", "\t * type of UCS_BASIC", "\t * " ], "header": "@@ -143,8 +140,32 @@ public class ValueNodeList extends QueryTreeNodeVector", "removed": [ "\t * Get the dominant DataTypeServices from the elements in the list.", "\t *" ] }, { "added": [ "\t\t//Following 2 will hold the collation derivation and type of the first ", "\t\t//string operand. This collation information will be checked against", "\t\t//the collation derivation and type of other string operands. If a ", "\t\t//mismatch is found, foundCollationMisMatch will be set to true.", "\t\tint firstCollationDerivation = -1;", "\t\tint firstCollationType = -1;", "\t\t//As soon as we find 2 strings with different collations, we set the ", "\t\t//following flag to true. At the end of the method, if this flag is set ", "\t\t//to true then it means that we have operands with different collation", "\t\t//types and hence the resultant dominant type will have to have the", "\t\t//collation derivation of NONE. ", "\t\tboolean foundCollationMisMatch = false;" ], "header": "@@ -152,6 +173,18 @@ public class ValueNodeList extends QueryTreeNodeVector", "removed": [] }, { "added": [ "\t\t\tif (valueNodeDTS.getTypeId().isStringTypeId())", "\t\t\t{", "\t\t\t\tif (firstCollationDerivation == -1)", "\t\t\t\t{", "\t\t\t\t\t//found first string type. Initialize firstCollationDerivation", "\t\t\t\t\t//and firstCollationType with collation information from ", "\t\t\t\t\t//that first string type operand.", "\t\t\t\t\tfirstCollationDerivation = valueNodeDTS.getCollationDerivation(); ", "\t\t\t\t\tfirstCollationType = valueNodeDTS.getCollationType(); ", "\t\t\t\t} else if (!foundCollationMisMatch)", "\t\t\t\t{", "\t\t\t\t\tif (firstCollationDerivation != valueNodeDTS.getCollationDerivation())", "\t\t\t\t\t\tfoundCollationMisMatch = true;//collation derivations don't match", "\t\t\t\t\telse if (firstCollationType != valueNodeDTS.getCollationType())", "\t\t\t\t\t\tfoundCollationMisMatch = true;//collation types don't match", "\t\t\t\t}", "\t\t\t}" ], "header": "@@ -162,6 +195,23 @@ public class ValueNodeList extends QueryTreeNodeVector", "removed": [] } ] } ]
derby-DERBY-2599-68f9f47a
DERBY-2599 Committing patch DERBY2599_getNull_should_set_collation_info_v1_diff.txt attached to DERBY-2599. This address the correct collation setting for ConstantNode created through QueryTreeNode's getNullNode method. This method currently creates a ConstantNode using the passed typeId. We need to set the correct collation type and derivation on this ConstantNode. This is accomplished by having the caller of this method pass the correct collation type and derivation. The junit tests have run fine with no problems. derbyall is almost finished with no new failures. In addition to the above change, this patch also fixes some comments in DataTypeDescriptor.java and TypeDescriptorImpl.java git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@540667 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/AggregateNode.java", "hunks": [ { "added": [ "\t\t\t\tcompTypeId,", "\t\t\t\tgetContextManager(),", "\t\t\t\tgetTypeServices().getCollationType(),", "\t\t\t\tgetTypeServices().getCollationDerivation()", "\t\t\t\t); // no params" ], "header": "@@ -553,8 +553,11 @@ public class AggregateNode extends UnaryOperatorNode", "removed": [ "\t\t\t\t\t\t\tcompTypeId,", "\t\t\t\t\t\t\tgetContextManager());\t\t// no params" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/QueryTreeNode.java", "hunks": [ { "added": [ "\t * Get a ConstantNode to represent a typed null value. Then set it's ", "\t * collation type and derivation", "\t * @param collationType The collation type of the ConstantNode", "\t * @param collationDerivation The Collation Derivation of the ConstantNode", "\t\t\tContextManager cm, int collationType, int collationDerivation)" ], "header": "@@ -743,17 +743,20 @@ public abstract class QueryTreeNode implements Visitable", "removed": [ "\t * Get a ConstantNode to represent a typed null value", "\t\t\tContextManager cm)" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/ResultColumn.java", "hunks": [ { "added": [ "\t\t\t\t\t\t\t\t\tgetContextManager(), getTypeServices().getCollationType(),", "\t\t\t\t\t\t\t\t\tgetTypeServices().getCollationDerivation());" ], "header": "@@ -346,7 +346,8 @@ public class ResultColumn extends ValueNode", "removed": [ "\t\t\t\t\t\t\t\t\tgetContextManager());" ] } ] } ]
derby-DERBY-2599-f63b7da8
DERBY-2599 There are few character string types that should take their collation type from compilation schema. I had earlier checked in code for them to use current schema rather than compilation schema (For reference http://www.nabble.com/more-on-system-schema-vs.-user-schema-and-character-constants.-p10885286.html) With this commit, I am adding an utility method in ValueNode called setCollationUsingCompilationSchema(int) which will use the compilation schema's collation type for it's DTD. And it will use the passed int value to set its DTD's collation derivation. This utility method will be used by the subclasses of ValueNode to set their DTD's collation type to compilation schema's type wherever required. Note that NOT all the character string types take their collation from the compilation schema. For instance, persistent character string type column from a table will take the collation type from the schema their table belongs to rather than the compilation schema. I have run the 2 collation related tests, CollationTest and CollationTest2 and they run with no problems. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@543266 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/BinaryOperatorNode.java", "hunks": [ { "added": [ "\t\t\t//collation of ? operand should be same as the compilation schema", "\t\t\tleftOperand.setCollationUsingCompilationSchema(" ], "header": "@@ -316,11 +316,9 @@ public class BinaryOperatorNode extends ValueNode", "removed": [ "\t\t\t//collation of ? operand should be same as the current schema", "\t\t\tleftOperand.getTypeServices().setCollationDerivation(", "\t\t\tleftOperand.getTypeServices().setCollationType(", "\t\t\t\t\tgetLanguageConnectionContext().getDefaultSchema().getCollationType());" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/ConcatenationOperatorNode.java", "hunks": [ { "added": [ "\t\t\t\t//collation of ? operand should be same as the compilation schema", "\t\t\t\tleftOperand.setCollationUsingCompilationSchema(" ], "header": "@@ -125,12 +125,9 @@ public class ConcatenationOperatorNode extends BinaryOperatorNode {", "removed": [ "\t\t\t\t//collation of ? operand should be same as the current schema", "\t\t\t\tleftOperand.getTypeServices().setCollationDerivation(", "\t\t\t\tleftOperand.getTypeServices().setCollationType(", "\t\t\t\t\t\tgetLanguageConnectionContext().getDefaultSchema()", "\t\t\t\t\t\t\t\t.getCollationType());" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/LikeEscapeOperatorNode.java", "hunks": [ { "added": [ "\t\t\t//collation of ? operand should be same as the compilation schema", "\t\t\treceiver.setCollationUsingCompilationSchema(" ], "header": "@@ -189,12 +189,9 @@ public final class LikeEscapeOperatorNode extends TernaryOperatorNode", "removed": [ "\t\t\t//collation of ? operand should be same as the current schema", "\t\t\treceiver.getTypeServices().setCollationDerivation(", "\t\t\treceiver.getTypeServices().setCollationType(", "\t\t\t\t\tgetLanguageConnectionContext().getDefaultSchema()", "\t\t\t\t\t\t\t.getCollationType());" ] }, { "added": [ "\t\t\t//collation of ? operand should be same as the compilation schema", "\t\t\tleftOperand.setCollationUsingCompilationSchema(" ], "header": "@@ -220,12 +217,9 @@ public final class LikeEscapeOperatorNode extends TernaryOperatorNode", "removed": [ "\t\t\t//collation of ? operand should be same as the current schema", "\t\t\tleftOperand.getTypeServices().setCollationDerivation(", "\t\t\tleftOperand.getTypeServices().setCollationType(", "\t\t\t\t\tgetLanguageConnectionContext().getDefaultSchema()", "\t\t\t\t\t\t\t.getCollationType());" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/TernaryOperatorNode.java", "hunks": [ { "added": [ "\t\t\t//collation of ? operand should be same as the compilation schema", "\t\t\treceiver.setCollationUsingCompilationSchema(" ], "header": "@@ -524,12 +524,9 @@ public class TernaryOperatorNode extends ValueNode", "removed": [ "\t\t\t//collation of ? operand should be same as the current schema", "\t\t\treceiver.getTypeServices().setCollationDerivation(", "\t\t\treceiver.getTypeServices().setCollationType(", "\t\t\t\t\tgetLanguageConnectionContext().getDefaultSchema()", "\t\t\t\t\t\t\t.getCollationType());" ] }, { "added": [ "\t\t\t//collation of ? operand should be same as the compilation schema", "\t\t\tleftOperand.setCollationUsingCompilationSchema(" ], "header": "@@ -537,12 +534,9 @@ public class TernaryOperatorNode extends ValueNode", "removed": [ "\t\t\t//collation of ? operand should be same as the current schema", "\t\t\tleftOperand.getTypeServices().setCollationDerivation(", "\t\t\tleftOperand.getTypeServices().setCollationType(", "\t\t\t\t\tgetLanguageConnectionContext().getDefaultSchema()", "\t\t\t\t\t\t\t.getCollationType());" ] }, { "added": [ "\t\t\t//collation of ? operand should be same as the compilation schema", "\t\t\treceiver.setCollationUsingCompilationSchema(" ], "header": "@@ -635,12 +629,9 @@ public class TernaryOperatorNode extends ValueNode", "removed": [ "\t\t\t//collation of ? operand should be same as the current schema", "\t\t\treceiver.getTypeServices().setCollationDerivation(", "\t\t\treceiver.getTypeServices().setCollationType(", "\t\t\t\t\tgetLanguageConnectionContext().getDefaultSchema()", "\t\t\t\t\t\t\t.getCollationType());" ] }, { "added": [ "\t\t\t//collation of ? operand should be same as the compilation schema", "\t\t\tleftOperand.setCollationUsingCompilationSchema(" ], "header": "@@ -661,12 +652,9 @@ public class TernaryOperatorNode extends ValueNode", "removed": [ "\t\t\t//collation of ? operand should be same as the current schema", "\t\t\tleftOperand.getTypeServices().setCollationDerivation(", "\t\t\tleftOperand.getTypeServices().setCollationType(", "\t\t\t\t\tgetLanguageConnectionContext().getDefaultSchema()", "\t\t\t\t\t\t\t.getCollationType());" ] }, { "added": [ "\t\t\t//collation of ? operand should be same as the compilation schema", "\t\t\trightOperand.setCollationUsingCompilationSchema(" ], "header": "@@ -676,12 +664,9 @@ public class TernaryOperatorNode extends ValueNode", "removed": [ "\t\t\t//collation of ? operand should be same as the current schema", "\t\t\trightOperand.getTypeServices().setCollationDerivation(", "\t\t\trightOperand.getTypeServices().setCollationType(", "\t\t\t\t\tgetLanguageConnectionContext().getDefaultSchema()", "\t\t\t\t\t\t\t.getCollationType());" ] }, { "added": [ "\t\t\t//collation of ? operand should be same as the compilation schema", "\t\t\treceiver.setCollationUsingCompilationSchema(" ], "header": "@@ -757,12 +742,9 @@ public class TernaryOperatorNode extends ValueNode", "removed": [ "\t\t\t//collation of ? operand should be same as the current schema", "\t\t\treceiver.getTypeServices().setCollationDerivation(", "\t\t\treceiver.getTypeServices().setCollationType(", "\t\t\t\t\tgetLanguageConnectionContext().getDefaultSchema()", "\t\t\t\t\t\t\t.getCollationType());" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/ValueNodeList.java", "hunks": [ { "added": [ "\t\t\t\tvalueNode.setCollationUsingCompilationSchema(" ], "header": "@@ -551,10 +551,8 @@ public class ValueNodeList extends QueryTreeNodeVector", "removed": [ "\t\t\t\tvalueNode.getTypeServices().setCollationDerivation(", "\t\t\t\tvalueNode.getTypeServices().setCollationType(", "\t\t\t\t\t\tgetLanguageConnectionContext().getDefaultSchema().getCollationType());" ] } ] } ]
derby-DERBY-2599-fc245d8f
DERBY-2599 Committing the patch DERBY2599_correct_collation_for_cast_v1_diff.txt attached to DERBY-2599 which will ensure that when an operand is CASTed to string datatype, the result type will take the collation of the current schema. So, if current schema is user schema for a database with territory based collation, then a comparison between a persistent character column from system table and a constant character string will fail because persistent character column from system table will have the collation of UCS_BASIC but the constant character string will pick up it's collation from current schema which is user schema and hence the collation will be territory based. Since the 2 collations don't match, we will get a compilation error for the query. To get around this, a user can rewrite the query to CAST persistent character column from system table to one of the character types and that resultant character type will pickup it's collation from current schema, so now both the operands will have collation of territory based and the query will execute without collation mismatch failure. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@539060 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-2601-b54db0f3
DERBY-2601: Server SQLException error codes are not returned to client Encode the error code in the SQLCODE value sent from the server. This value used to be -1 for all errors, but the client was OK with any negative value. Now it's a negative value equal to -(errorCode+1). git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1346833 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/SQLExceptionFactory40.java", "hunks": [ { "added": [ " } else if (sqlState.startsWith(SQLState.CONNECTIVITY_PREFIX)) {" ], "header": "@@ -64,8 +64,7 @@ public class SQLExceptionFactory40 extends SQLExceptionFactory {", "removed": [ " } else if (sqlState.startsWith(SQLState.CONNECTIVITY_PREFIX) ||", " errCode >= ExceptionSeverity.SESSION_SEVERITY) {" ] }, { "added": [ " } else if (sqlState.startsWith(SQLState.TRANSACTION_PREFIX)) {" ], "header": "@@ -77,8 +76,7 @@ public class SQLExceptionFactory40 extends SQLExceptionFactory {", "removed": [ " } else if (sqlState.startsWith(SQLState.TRANSACTION_PREFIX) ||", " errCode >= ExceptionSeverity.TRANSACTION_SEVERITY ) {" ] } ] }, { "file": "java/client/org/apache/derby/client/am/Sqlca.java", "hunks": [ { "added": [ "import org.apache.derby.shared.common.error.ExceptionSeverity;", "import org.apache.derby.shared.common.reference.SQLState;" ], "header": "@@ -22,8 +22,9 @@", "removed": [ "import org.apache.derby.shared.common.reference.SQLState;" ] }, { "added": [ " /**", " * <p>", " * Get the error code based on the SQL code received from the server.", " * </p>", " *", " * <p>", " * The conversion from SQL code to error code happens like this:", " * </p>", " *", " * <ul>", " * <li>If the SQL code is 0, there is no error code because the Sqlca", " * doesn't represent an error. Return 0.</li>", " * <li>If the SQL code is positive, the Sqlca represents a warning, and", " * the SQL code represents the actual error code. Return the SQL code.</li>", " * <li>If the SQL code is negative, the Sqlca represents an error, and", " * the error code is {@code -(sqlCode+1)}.</li>", " * </ul>", " *", " * @see org.apache.derby.impl.drda.DRDAConnThread#getSqlCode(java.sql.SQLException)", " */", " public synchronized int getErrorCode() {", " // Warning or other non-error, return SQL code.", " if (sqlCode_ >= 0) return sqlCode_;", "", " // Negative SQL code means it is an error. Transform into a positive", " // error code.", " int errorCode = -(sqlCode_ + 1);", "", " // In auto-commit mode, the embedded driver promotes statement", " // severity to transaction severity. Do the same here to match.", " if (errorCode == ExceptionSeverity.STATEMENT_SEVERITY &&", " connection_ != null && connection_.autoCommit_) {", " errorCode = ExceptionSeverity.TRANSACTION_SEVERITY;", " }", "", " return errorCode;", " }", "" ], "header": "@@ -99,6 +100,44 @@ public abstract class Sqlca {", "removed": [] } ] }, { "file": "java/drda/org/apache/derby/impl/drda/DRDAConnThread.java", "hunks": [ { "added": [ " writeSQLCARD(sqle, 0, 0);" ], "header": "@@ -720,7 +720,7 @@ class DRDAConnThread extends Thread {", "removed": [ " writeSQLCARD(sqle, CodePoint.SVRCOD_ERROR, 0, 0);" ] }, { "added": [ "\t\t\t\t\t\twriteSQLCARD(w, 0, 0);" ], "header": "@@ -812,7 +812,7 @@ class DRDAConnThread extends Thread {", "removed": [ "\t\t\t\t\t\twriteSQLCARD(w, CodePoint.SVRCOD_WARNING, 0, 0);" ] }, { "added": [ " \t\t\twriteSQLCARD(databaseAccessException, 0, 0);" ], "header": "@@ -1298,7 +1298,7 @@ class DRDAConnThread extends Thread {", "removed": [ " \t\t\twriteSQLCARD(databaseAccessException,CodePoint.SVRCOD_ERROR,0,0);" ] }, { "added": [ "\t\t\twriteSQLCARD(e, updateCount, 0);" ], "header": "@@ -5867,10 +5867,9 @@ class DRDAConnThread extends Thread {", "removed": [ "\t\tint severity = CodePoint.SVRCOD_INFO;", "\t\t\twriteSQLCARD(e,severity, updateCount, 0);" ] }, { "added": [ "\t\tint severity = getExceptionSeverity(e);" ], "header": "@@ -5878,7 +5877,7 @@ class DRDAConnThread extends Thread {", "removed": [ "\t\tseverity = getExceptionSeverity(e);" ] }, { "added": [ "\t\twriteSQLCARD(e, updateCount, 0);", " /**", " * <p>", " * Get the SQLCODE to send for an exception or a warning.", " * </p>", " *", " * <p>", " * The client expects a negative SQLCODE for exceptions and a positive", " * SQLCODE for warnings. SQLCODE 0 means there is no error or warning", " * condition. SQLCODE is also used to encode the severity of the condition", " * (as returned by {@code SQLException.getErrorCode()}).", " * </p>", " *", " * <p>", " * For warnings, the SQLCODE is 10000, which is identical to", " * {@link ExceptionSeverity#WARNING_SEVERITY}.", " * </p>", " *", " * <p>", " * For exceptions, the SQLCODE is set to {@code -severity-1}, which allows", " * all non-negative severity values to be encoded. (Derby only uses", " * non-negative severity values in the first place.)", " * </p>", " *", " * @param e the exception or warning to get the SQLCODE for", " * @return the value to send as SQLCODE", " */", " private int getSqlCode(SQLException e)", " if (e == null) return 0;", "", " // All SQLWarnings should have warning severity. However,", " // DataTruncation conditions for write operations (with SQL state", " // 22001) are thrown as exceptions, even though DataTruncation", " // technically is a sub-class of SQLWarning.", " if (e instanceof SQLWarning &&", " !SQLState.LANG_STRING_TRUNCATION.equals(e.getSQLState())) {", " return ExceptionSeverity.WARNING_SEVERITY;", " }", "", " // The exception represents an error condition, so encode the severity", " // as a negative value in the SQLCODE. Negative severity values are", " // changed to 0 (NO_APPLICABLE_SEVERITY).", " int severity =", " Math.max(ExceptionSeverity.NO_APPLICABLE_SEVERITY,", " e.getErrorCode());", " return -severity - 1;", "\tprivate void writeSQLCARD(SQLException e," ], "header": "@@ -5892,20 +5891,58 @@ class DRDAConnThread extends Thread {", "removed": [ "\t\twriteSQLCARD(e,severity, updateCount, 0);", "\tprivate int getSqlCode(int severity)", "\t\tif (severity == CodePoint.SVRCOD_WARNING)\t\t// warning", "\t\t\treturn 100;\t\t//CLI likes it", "\t\telse if (severity == CodePoint.SVRCOD_INFO) ", "\t\t\treturn 0;", "\t\telse", "\t\t\treturn -1;", "\tprivate void writeSQLCARD(SQLException e,int severity, " ] }, { "added": [ " int sqlcode = getSqlCode(e);" ], "header": "@@ -6046,7 +6083,7 @@ class DRDAConnThread extends Thread {", "removed": [ " int sqlcode = 0;" ] } ] } ]
derby-DERBY-2602-bead0abc
DERBY-2602: Allow full JDBC nanosecond-precision in timestamps across the network just as is done in the embedded scenario. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@933726 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/DateTime.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.reference.DRDAConstants;" ], "header": "@@ -22,6 +22,7 @@ package org.apache.derby.client.am;", "removed": [] }, { "added": [ " private static final int timestampRepresentationLength = DRDAConstants.DRDA_TIMESTAMP_LENGTH;" ], "header": "@@ -45,7 +46,7 @@ public class DateTime {", "removed": [ " private static final int timestampRepresentationLength = 26;" ] }, { "added": [ " * See getTimestampLength() for an explanation of how timestamps are formatted.", " * @param supportsTimestampNanoseconds true if the server supports nanoseconds in timestamps", " String encoding,", " boolean supportsTimestampNanoseconds) ", " String timestamp = new String", " ( buffer, offset, getTimestampLength( supportsTimestampNanoseconds ), encoding );", " * the parsed nanoseconds value and use that to set nanos.", " int nanos = parseTimestampString(timestamp, cal, supportsTimestampNanoseconds);", " ts.setNanos( nanos );", " * Parse a String of the form <code>yyyy-mm-dd-hh.mm.ss.ffffff[fff]</code>", " * @param cal Calendar into which to store the parsed fields. Should not be null.", " * @param supportsTimestampNanoseconds true if the server supports nanoseconds in timestamps", " * @return The nanoseconds field as parsed from the timestamp string.", " * create a java.sql.Timestamp with nanosecond precision).", " Calendar cal, boolean supportsTimestampNanoseconds )" ], "header": "@@ -146,52 +147,53 @@ public class DateTime {", "removed": [ " * Expected character representation is DERBY string representation of a timestamp:", " * <code>yyyy-mm-dd-hh.mm.ss.ffffff</code>.", " String encoding) ", " String timestamp = new String(buffer, offset, ", " DateTime.timestampRepresentationLength,encoding);", " * the parsed microseconds value and use that to set nanos.", " int micros = parseTimestampString(timestamp, cal);", " ts.setNanos(micros * 1000);", " * Parse a String of the form <code>yyyy-mm-dd-hh.mm.ss.ffffff</code>", " * @param cal Calendar into which to store the parsed fields. Should", " * not be null.", " * @return The microseconds field as parsed from the timestamp string.", " * create a java.sql.Timestamp with microsecond precision).", " Calendar cal)" ] }, { "added": [ " int nanos = ", " 100000000 * (((int) timestamp.charAt(20)) - zeroBase) +", " 10000000 * (((int) timestamp.charAt(21)) - zeroBase) +", " 1000000 * (((int) timestamp.charAt(22)) - zeroBase) +", " 100000 * (((int) timestamp.charAt(23)) - zeroBase) +", " 10000 * (((int) timestamp.charAt(24)) - zeroBase) +", " 1000 * (((int) timestamp.charAt(25)) - zeroBase);", " ", " if ( supportsTimestampNanoseconds )", " {", " nanos += 100 * (((int) timestamp.charAt(26)) - zeroBase);", " nanos += 10 * (((int) timestamp.charAt(27)) - zeroBase);", " nanos += (((int) timestamp.charAt(28)) - zeroBase);", " }", " ", " /* The \"ffffff[fff]\" that we parsed is nanoseconds. In order to", " * we have to divide by 1000000.", " cal.set(Calendar.MILLISECOND, nanos / 1000000);", " ", " return nanos;" ], "header": "@@ -221,20 +223,28 @@ public class DateTime {", "removed": [ " int micros = ", " 100000 * (((int) timestamp.charAt(20)) - zeroBase) +", " 10000 * (((int) timestamp.charAt(21)) - zeroBase) +", " 1000 * (((int) timestamp.charAt(22)) - zeroBase) +", " 100 * (((int) timestamp.charAt(23)) - zeroBase) +", " 10 * (((int) timestamp.charAt(24)) - zeroBase) +", " (((int) timestamp.charAt(25)) - zeroBase);", "", " /* The \"ffffff\" that we parsed is microseconds. In order to", " * we have to divide by 1000.", " cal.set(Calendar.MILLISECOND, micros / 1000);", " return micros;" ] }, { "added": [ " * See getTimestampLength() for an explanation of how timestamps are formatted.", " *", " * @param supportsTimestampNanoseconds true if the server supports nanoseconds in timestamps", " * @return DateTime.timestampRepresentationLength. This is the fixed length in bytes, taken to represent the timestamp value", " java.sql.Timestamp timestamp,", " boolean supportsTimestampNanoseconds) " ], "header": "@@ -328,20 +338,20 @@ public class DateTime {", "removed": [ " * java.sql.Timestamp is converted to a character representation which is in DERBY string ", " * representation of a timestamp: <code>yyyy-mm-dd-hh.mm.ss.ffffff</code>.", " * and then converted to bytes using UTF8 encoding", " * @return DateTime.timestampRepresentationLength. This is the fixed ", " * length in bytes, taken to represent the timestamp value", " java.sql.Timestamp timestamp) " ] }, { "added": [ " int arrayLength = getTimestampLength( supportsTimestampNanoseconds );", " char[] timestampChars = new char[ arrayLength ];", "" ], "header": "@@ -356,8 +366,10 @@ public class DateTime {", "removed": [ " char[] timestampChars = new char[DateTime.timestampRepresentationLength];" ] }, { "added": [ " if ( supportsTimestampNanoseconds )", " {", " int nanosecondsOnly = timestamp.getNanos() % 1000;", " ", " timestampChars[ 26 ] = (char) (nanosecondsOnly / 100 + zeroBase);", " timestampChars[ 27 ] = (char) ((nanosecondsOnly % 100) / 10 + zeroBase);", " timestampChars[ 28 ] = (char) (nanosecondsOnly % 10 + zeroBase);", " }", "", " String newtimestampString = new String(timestampChars);", " byte[] timestampBytes = newtimestampString.getBytes(Typdef.UTF8ENCODING);", " System.arraycopy(timestampBytes, 0, buffer, offset, arrayLength);", " return arrayLength;" ], "header": "@@ -385,13 +397,23 @@ public class DateTime {", "removed": [ " byte[] timestampBytes = (new String(timestampChars)).getBytes(Typdef.UTF8ENCODING);", " System.arraycopy(timestampBytes, 0, buffer, offset, DateTime.timestampRepresentationLength);", " return DateTime.timestampRepresentationLength;" ] }, { "added": [ " * See getTimestampLength() for an explanation of how timestamps are formatted." ], "header": "@@ -504,8 +526,7 @@ public class DateTime {", "removed": [ " * Expected character representation is DERBY string representation of a timestamp:", " * <code>yyyy-mm-dd-hh.mm.ss.ffffff</code>." ] }, { "added": [ " * See getTimestampLength() for an explanation of how timestamps are formatted." ], "header": "@@ -547,8 +568,7 @@ public class DateTime {", "removed": [ " * Expected character representation is DERBY string representation of a timestamp:", " * <code>yyyy-mm-dd-hh.mm.ss.ffffff</code>." ] }, { "added": [ " parseTimestampString(timestamp, cal, false);" ], "header": "@@ -587,7 +607,7 @@ public class DateTime {", "removed": [ " parseTimestampString(timestamp, cal);" ] } ] }, { "file": "java/client/org/apache/derby/client/net/NetDatabaseMetaData.java", "hunks": [ { "added": [ " /** True if the server supports nanoseconds in timestamps */", " private boolean supportsTimestampNanoseconds_;", " " ], "header": "@@ -39,6 +39,9 @@ public class NetDatabaseMetaData extends org.apache.derby.client.am.DatabaseMeta", "removed": [] }, { "added": [ "", " supportsTimestampNanoseconds_ =", " productLevel_.greaterThanOrEqualTo(10, 6, 0);" ], "header": "@@ -100,6 +103,9 @@ public class NetDatabaseMetaData extends org.apache.derby.client.am.DatabaseMeta", "removed": [] } ] }, { "file": "java/client/org/apache/derby/client/net/NetStatementRequest.java", "hunks": [ { "added": [ "import org.apache.derby.client.am.DateTime;" ], "header": "@@ -25,6 +25,7 @@ import org.apache.derby.client.am.Lob;", "removed": [] } ] }, { "file": "java/client/org/apache/derby/client/net/Request.java", "hunks": [ { "added": [ "import org.apache.derby.client.am.DateTime;" ], "header": "@@ -20,6 +20,7 @@", "removed": [] } ] }, { "file": "java/drda/org/apache/derby/impl/drda/DRDAConnThread.java", "hunks": [ { "added": [ " int timestampLength = appRequester.getTimestampLength();", " ", "\t\t\t\tString paramVal = reader.readStringData( timestampLength ).trim(); //parameter may be char value" ], "header": "@@ -4648,7 +4648,9 @@ class DRDAConnThread extends Thread {", "removed": [ "\t\t\t\tString paramVal = reader.readStringData(26).trim(); //parameter may be char value" ] } ] }, { "file": "java/engine/org/apache/derby/iapi/types/TypeId.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.reference.DRDAConstants;" ], "header": "@@ -28,6 +28,7 @@ import org.apache.derby.catalog.types.BaseTypeIdImpl;", "removed": [] } ] } ]
derby-DERBY-2605-d25aee08
DERBY-2605: Patch to add compile-time checking of columns created from a "CREATE TABLE AS ... WITH NO DATA" statement, to ensure that the resultant table only includes columns with types that a user can create him/herself. In particular this blocks indirect creation of BOOLEAN columns, Object columns, and DECIMAL columns with precision greater than 31. Contributed by: James F. Adams (derby@xemaps.com) git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@539164 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/CreateTableNode.java", "hunks": [ { "added": [ "\t\t\t\t{", "\t\t\t\t\tcontinue;" ], "header": "@@ -297,8 +297,8 @@ public class CreateTableNode extends DDLStatementNode", "removed": [ "\t\t\t {", "\t\t\t\t continue;" ] } ] } ]
derby-DERBY-2607-920fea50
DERBY-2607 DatabaseMetaData is not consistent about throwing SqlException when tablename=null git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@548251 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/DatabaseMetaData.java", "hunks": [ { "added": [ " // validate the table name ", " if (table == null) {", " throw new SqlException(agent_.logWriter_,", " new ClientMessageId(SQLState.TABLE_NAME_CANNOT_BE_NULL)); ", " }" ], "header": "@@ -1769,6 +1769,11 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [] }, { "added": [ " // validate the table name", " if (table == null) {", " throw new SqlException(agent_.logWriter_,", " new ClientMessageId(SQLState.TABLE_NAME_CANNOT_BE_NULL)); ", " } " ], "header": "@@ -1821,6 +1826,11 @@ public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {", "removed": [] } ] } ]
derby-DERBY-2610-9078aae5
DERBY-2610: Queries in metadata.properties allow tablepattern for JDBC methods that do not allow patterns. Patterns are now disallowed, except for in one of the queries (see Jira issue for details). Patch file: DERBY-2610-1.diff Patch contributed by Jørgen Løland. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@538072 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-2610-fde4b69a
DERBY-2758: Update ODBCMetadataGenerator to create an ODBC version of the getCrossReference metadata query. This is required because, as of DERBY-2610, the JDBC version of that query no longer allows pattern matching on table names. Since the ODBC SQLForeignKeys function, which is mapped onto the getCrossReference metadata query, depends on pattern matching for correct behavior, we now create an ODBC-version of getCrossReference that allows pattern matching as in pre-10.3 releases. The SQLForeignKeys function is then mapped to the new, ODBC-specific version of the query. Contributed by: Jorgen Loland (jorgen.loland@sun.com) git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@545321 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/build/org/apache/derbyBuild/ODBCMetadataGenerator.java", "hunks": [ { "added": [ "\t// Types of changes that are possible. There are four", "\t//\t2. Where clause:", "\t//\t\tChange the where clause of the query. For ex. ", "\t//\t\tused to change getCrossReference \"T.TABLENAME=?\"", "\t//\t\tto \"T.TABLENAME LIKE ?\" since JDBC and ODBC specs", "\t//\t\tdiffer on whether table name must be set or not", "\t//\t3. Type and/or value change:", "\t//\t4. Additional column(s):", "", "\tprivate final byte WHERE_CLAUSE_CHANGE = 0x08;" ], "header": "@@ -56,23 +56,30 @@ import org.apache.derby.iapi.services.sanity.SanityManager;", "removed": [ "\t// Types of changes that are possible. There are three", "\t//\t2. Type and/or value change:", "\t//\t3. Additional column(s):" ] }, { "added": [ "\t\tchangeMap.put(\"getCrossReference\",", "\t\t\tnew Byte(WHERE_CLAUSE_CHANGE));", "" ], "header": "@@ -211,6 +218,9 @@ public class ODBCMetadataGenerator {", "removed": [] }, { "added": [ "\t\t// -- #2: Change WHERE clause.", "\t\tif (changeWhereClause(queryName, queryText)) haveODBCChanges = true;", "" ], "header": "@@ -351,6 +361,9 @@ public class ODBCMetadataGenerator {", "removed": [] }, { "added": [ "\t\t// -- #3.A: Prep to add new ODBC columns. Note: we need" ], "header": "@@ -361,7 +374,7 @@ public class ODBCMetadataGenerator {", "removed": [ "\t\t// -- #2.A: Prep to add new ODBC columns. Note: we need" ] }, { "added": [ "\t\t// -- #4: Alter column values, where needed.", "\t\t// -- #3.B: Add new ODBC columns." ], "header": "@@ -370,10 +383,10 @@ public class ODBCMetadataGenerator {", "removed": [ "\t\t// -- #3: Alter column values, where needed.", "\t\t// -- #2.B: Add new ODBC columns." ] }, { "added": [ "", "\t\t\tif (orderBy.length() != 0) {", "\t\t\t\t// re-attach ORDER BY clause.", "\t\t\t\todbcMetaFile.write(orderBy);", "\t\t\t}", "" ], "header": "@@ -392,6 +405,12 @@ public class ODBCMetadataGenerator {", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedDatabaseMetaData.java", "hunks": [ { "added": [ "", "\t\tif (primaryTable == null || foreignTable == null) {", "\t\t\tthrow Util.generateCsSQLException(", "\t\t\t\t\t\t\tSQLState.TABLE_NAME_CANNOT_BE_NULL);", "\t\t}", "", "\t\ts.setString(3, primaryTable); //JDBC spec: must match table name as stored", "\t\ts.setString(6, foreignTable); //JDBC spec: must match table name as stored", " /**", " * In contrast to the JDBC version of getCrossReference, this", " * method allows null values for table names.", " */", " public ResultSet getCrossReferenceForODBC(", " String primaryCatalog, String primarySchema, String primaryTable,", " String foreignCatalog, String foreignSchema, String foreignTable)", " throws SQLException {", "", " PreparedStatement s = getPreparedQuery(\"odbc_getCrossReference\");", " s.setString(1, swapNull(primaryCatalog));", " s.setString(2, swapNull(primarySchema));", " s.setString(3, swapNull(primaryTable));", " s.setString(4, swapNull(foreignCatalog));", " s.setString(5, swapNull(foreignSchema));", " s.setString(6, swapNull(foreignTable));", " return s.executeQuery();", " }", "" ], "header": "@@ -2602,16 +2602,41 @@ public class EmbedDatabaseMetaData extends ConnectionChild", "removed": [ "\t\ts.setString(3, swapNull(primaryTable));", "\t\ts.setString(6, swapNull(foreignTable));" ] } ] } ]
derby-DERBY-2611-0ba46a7f
DERBY-2611 Fixed upgrade test when run against 10.3 to not try and use in place compress in version 10.0. This feature was added in 10.1. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@535429 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-2612-9ba17d84
DERBY-2612 - making running of DatabaseMetaDataTest in upgrade test more selective - not all fixtures can be run with all older versions. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@537728 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-2613-c243be14
DERBY-2613 Upgrade of 10.0 to 10.3 was failing because system procedures were being created, and as part of the creation some updates were attempted to catalogs that were not created yet. Fixed in full upgrade by first creating the new system catalogs and then creating the procedures. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@535466 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/catalog/DD_Version.java", "hunks": [ { "added": [ " if (fromMajorVersionNumber <= DataDictionary.DD_VERSION_DERBY_10_1)", " {", " // add catalogs 1st, subsequent procedure adding may depend on", " // catalogs.", "", "\t\t\t// Add new system catalogs created for grant and revoke", "\t\t\tbootingDictionary.upgradeMakeCatalog(", " tc, DataDictionary.SYSTABLEPERMS_CATALOG_NUM);", "\t\t\tbootingDictionary.upgradeMakeCatalog(", " tc, DataDictionary.SYSCOLPERMS_CATALOG_NUM);", "\t\t\tbootingDictionary.upgradeMakeCatalog(", " tc, DataDictionary.SYSROUTINEPERMS_CATALOG_NUM);", " }", "" ], "header": "@@ -361,6 +361,20 @@ public\tclass DD_Version implements\tFormatable", "removed": [] }, { "added": [ " {", "\t\t\t\tSanityManager.ASSERT((aid != null), ", " \"Failed to get new Database Owner authorization\");", " }" ], "header": "@@ -380,12 +394,10 @@ public\tclass DD_Version implements\tFormatable", "removed": [ "\t\t\t\tSanityManager.ASSERT((aid != null), \"Failed to get new Database Owner authorization\");", "", "\t\t\t// Add new system catalogs created for grant and revoke", "\t\t\tbootingDictionary.upgradeMakeCatalog(tc, DataDictionary.SYSTABLEPERMS_CATALOG_NUM);", "\t\t\tbootingDictionary.upgradeMakeCatalog(tc, DataDictionary.SYSCOLPERMS_CATALOG_NUM);", "\t\t\tbootingDictionary.upgradeMakeCatalog(tc, DataDictionary.SYSROUTINEPERMS_CATALOG_NUM);" ] } ] } ]
derby-DERBY-2616-765b3e42
DERBY-2616 Increase the maximum wait time for tests that start the server. On my windows XP system I would get intermittent failures at the old 60 second max. I believe the issue is turnaround time on the port, rather than a specific startup issue in the code. Setting the max to 300 seconds allowed me to pass a complete nightly test and also passed a reiterated test 30 times that would previously fail every time at the 60 second level. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@535572 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/junit/NetworkServerTestSetup.java", "hunks": [ { "added": [ " /** Setting maximum wait time to 300 seconds. For some systems it looks", " * like restarting a server to listen on the same port is blocked waiting", " * for a system specific interval. This number looks to be something", " * like 240 seconds on XP. Waiting shorter than this time causes", " * intermittent failures on a laptop running XP with a software firewall", " * and a VPN. Increasing the wait time should not adversely affect those", " * systems with fast port turnaround as the actual code loops for ", " * SLEEP_TIME intervals, so should never see WAIT_TIME.", " */", " private static final long WAIT_TIME = 300000;", " /** Sleep for 500 ms before pinging the network server (again) */", " private static final int SLEEP_TIME = 500;" ], "header": "@@ -43,11 +43,19 @@ import org.apache.derby.drda.NetworkServerControl;", "removed": [ " /** Wait maximum 1 minute for server to start */", " private static final long WAIT_TIME = 60000;", " /** Sleep for 50 ms before pinging the network server (again) */", " private static final int SLEEP_TIME = 50;" ] } ] } ]
derby-DERBY-2618-685a1474
DERBY-2618 (partial) Make ClobAsciiStream honour the contract for OutputStream.write(int) by ignoring top 24 bits. Optimize ClobAsciiStream by re-using an array rather than recreating one for each write(byte[]) call. Fix ClobStreamControl to correctly process a Derby UTF8 encoded stream. Does not fix the issue, fixes are being worked on by others under related bugs. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@538311 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/jdbc/ClobAsciiStream.java", "hunks": [ { "added": [ "/**", " * Wrap a Writer as an OutputStream to support Clob.setAsciiStream().", " * Any value written to the OutputStream is a valid ASCII value", " * (0-255 from JDBC 4 spec appendix C2) thus this class simply", " * passes the written values onto the Writer.", " *", " */", " private final Writer writer;", " private final char[] buffer = new char[1024];" ], "header": "@@ -26,9 +26,17 @@ import java.io.IOException;", "removed": [ " private Writer writer;" ] }, { "added": [], "header": "@@ -41,9 +49,6 @@ final class ClobAsciiStream extends OutputStream {", "removed": [ " * <p>", " * Subclasses of <code>OutputStream</code> must provide an ", " * implementation for this method. " ] }, { "added": [ " writer.write(b & 0xff);" ], "header": "@@ -51,7 +56,7 @@ final class ClobAsciiStream extends OutputStream {", "removed": [ " writer.write (b);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/ClobStreamControl.java", "hunks": [ { "added": [ " * See comments in SQLChar.readExternal for more notes on", " * processing the UTF8 format." ], "header": "@@ -53,6 +53,8 @@ final class ClobStreamControl extends LOBStreamControl {", "removed": [] }, { "added": [ " if ((c & 0x80) == 0x00) {", " else if ((c & 0x60) == 0x40) // we know the top bit is set here", " {", " //found char of two byte width", " if (in.skip (1) != 1) {", " streamLength += 2; ", " else if ((c & 0x70) == 0x60) // we know the top bit is set here", " {", " if (in.skip (2) != 2) {", " streamLength += 3;", " }", " else", " {", " throw new UTFDataFormatException();" ], "header": "@@ -69,26 +71,31 @@ final class ClobStreamControl extends LOBStreamControl {", "removed": [ " if ((c >= 0x0001) && (c <= 0x007F)) {", " else if (c > 0x07FF) {", " //found char of three byte width", " if (in.skip (2) < 2) {", " streamLength += 3;", " break;", " else {", " if (in.skip (1) != 1) {", " streamLength += 2;" ] } ] } ]
derby-DERBY-2620-bff53129
DERBY-1025 / DERBY-2620 test cases Make sure statement is created with holdability CLOSE_CURSORS_AT_COMMIT when testing if xa_start commits the transaction. Exempt embedded from running DERBY-1025 test because of 08003 - No current connection git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@535973 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-2631-e3bd4bbd
DERBY-2631: Expose existing auto-generated key functionality through more JDBC APIs in embedded mode. 1. Updates the following JDBC methods so that they no longer throw a "Feature not implemented" error in embedded mode. Instead they make calls to an already existing internal method and pass in the appropriate arguments: Connection.prepareStatement(String sql, String[] columnNames); Connection.prepareStatement(String sql, int[] columnIndexes); Statement.execute(String sql, String[] columNames); Statement.execute(String sql, int[] columIndexes); Statement.executeUpdate(String sql, String[] columnNames); Statement.executeUpdate(String sql, int[] columnIndexes); 2. Changes the (already existing but not currently used) code in sql/execute/InsertResultSet that handles autogen column indexes/names to throw an error for any target column that is not an auto-increment column. This is because Derby's implementation of getGeneratedKeys() internally maps to the IDENTITY_VAL_LOCAL() function, which only returns keys for identity (autoincrement) columns. So if the user specifies something else, we'll throw an error. 3. Changes the names of two existing (but unused) errors to more accurately reflect their intended use (they are now called by the changes for #2). Also changes the text for those errors as the old text seemed a tad awkward. No regression impact here because the two errors in question were never exposed to users before now. 4. Makes a slight change to jdbcapi/statementJdbc30.java (a test) to reflect the new behavior (we no longer throw "Feature not implemented" errors). 5. Adds test cases for the newly supported APIs to the JUnit test lang/AutoGenJDBC30Test.java. This involved adding two more utility methods to junit/BaseJDBCTestCase.java, as well. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@538260 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedConnection.java", "hunks": [ { "added": [ "\t * @exception SQLException Thrown on error.", " \t\treturn prepareStatement(sql,", "\t\t\tJDBC20Translation.TYPE_FORWARD_ONLY,", "\t\t\tJDBC20Translation.CONCUR_READ_ONLY,", "\t\t\tconnectionHoldAbility,", "\t\t\tcolumnIndexes == null", "\t\t\t\t? JDBC30Translation.NO_GENERATED_KEYS", "\t\t\t\t: JDBC30Translation.RETURN_GENERATED_KEYS,", "\t\t\tcolumnIndexes,", "\t\t\tnull);" ], "header": "@@ -817,14 +817,22 @@ public abstract class EmbedConnection implements EngineConnection", "removed": [ "\t * @exception SQLException Feature not implemented for now.", " \t\tthrow Util.notImplemented(\"prepareStatement(String, int[])\");" ] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedStatement.java", "hunks": [ { "added": [ "\t\texecute(sql, false, true,", "\t\t\t((columnIndexes == null) || (columnIndexes.length == 0))", "\t\t\t\t? JDBC30Translation.NO_GENERATED_KEYS", "\t\t\t\t: JDBC30Translation.RETURN_GENERATED_KEYS,", "\t\t\tcolumnIndexes,", "\t\t\tnull);", "\t\treturn updateCount;" ], "header": "@@ -217,7 +217,13 @@ public class EmbedStatement extends ConnectionChild", "removed": [ " \t\tthrow Util.notImplemented(\"executeUpdate(String, int[])\");" ] }, { "added": [ "\t\texecute(sql, false, true,", "\t\t\t((columnNames == null) || (columnNames.length == 0))", "\t\t\t\t? JDBC30Translation.NO_GENERATED_KEYS", "\t\t\t\t: JDBC30Translation.RETURN_GENERATED_KEYS,", "\t\t\tnull,", "\t\t\tcolumnNames);", "\t\treturn updateCount;" ], "header": "@@ -238,7 +244,13 @@ public class EmbedStatement extends ConnectionChild", "removed": [ " \t\tthrow Util.notImplemented(\"executeUpdate(String, String[])\");" ] }, { "added": [ "\t\treturn execute(sql, false, true,", "\t\t\t((columnIndexes == null) || (columnIndexes.length == 0))", "\t\t\t\t? JDBC30Translation.NO_GENERATED_KEYS", "\t\t\t\t: JDBC30Translation.RETURN_GENERATED_KEYS,", "\t\t\tcolumnIndexes,", "\t\t\tnull);" ], "header": "@@ -631,7 +643,12 @@ public class EmbedStatement extends ConnectionChild", "removed": [ " \t\tthrow Util.notImplemented(\"execute(String, int[])\");" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/InsertResultSet.java", "hunks": [ { "added": [ "\t\t\tColumnDescriptor cd = td.getColumnDescriptor(columnIndexes[i]);", "\t\t\tif (!verifyAutoGenColumn(cd))", "\t\t\t{", "\t\t\t\tthrow StandardException.newException(", "\t\t\t\t\tSQLState.LANG_INVALID_AUTOGEN_COLUMN_POSITION,", "\t\t\t\t\tnew Integer(columnIndexes[i]), td.getName());", "\t\t\t}" ], "header": "@@ -563,8 +563,13 @@ class InsertResultSet extends DMLWriteResultSet implements TargetResultSet", "removed": [ "\t\t\tif (td.getColumnDescriptor(columnIndexes[i]) == null)", "\t\t\t\tthrow StandardException.newException(SQLState.LANG_COLUMN_POSITION_NOT_FOUND, new Integer(columnIndexes[i]));" ] } ] } ]
derby-DERBY-2632-af9a4394
DERBY-2632 - attempt to fix some failing upgrade tests with JSR169 by preventing them from running. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@536598 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-2635-e257b0e2
DERBY-2635: intermittent failure in T_RawStoreFactory unit test Disabled the interrupter thread which sometimes caused stray interrupts in sane builds. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@537409 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/unitTests/store/T_Util.java", "hunks": [ { "added": [ "\t * Check that it's not possible to get a page which is already latched by", "\t * the same transaction." ], "header": "@@ -1132,7 +1132,8 @@ public class T_Util", "removed": [ "\t * Check that it's not possible to get a page which is latched." ] }, { "added": [ "\t\tif (!SanityManager.DEBUG) {", "\t\t\t// don't run the interrupter thread in sane builds, since getPage()", "\t\t\t// will throw an assert error instead of hanging (DERBY-2635)", "\t\t\tinterrupter.start();", "\t\t}", "\t\t\t// expect thread interrupted exception in insane builds", "\t\t\tif (SanityManager.DEBUG || !se.getMessageId().equals(\"08000\")) {" ], "header": "@@ -1152,14 +1153,18 @@ public class T_Util", "removed": [ "\t\tinterrupter.start();", "\t\t\t// expect thread interrupted exception", "\t\t\tif (!se.getMessageId().equals(\"08000\")) {" ] }, { "added": [ "\t\t\tif (interrupter.isAlive()) {", "\t\t\t\tinterrupter.join();", "\t\t\t}" ], "header": "@@ -1175,7 +1180,9 @@ public class T_Util", "removed": [ "\t\t\tinterrupter.join();" ] } ] } ]
derby-DERBY-2653-aadfc186
DERBY-2653 (partial)Expose existing auto-generated key functionality through more JDBC APIs in Derby Client. Enables API's that take columnNames. Connection.prepareStatement(String sql, String[] columnNames); Statement.execute(String sql, String[] columNames); Statement.executeUpdate(String sql, String[] columnNames); To get generated keys, user must specify an array of one column name, which should be the identity column. Client actually ignores the contents at this time. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@629171 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/Connection.java", "hunks": [ { "added": [], "header": "@@ -1634,7 +1634,6 @@ public abstract class Connection implements java.sql.Connection,", "removed": [ " checkAutoGeneratedKeysParameters(autoGeneratedKeys, columnNames);" ] }, { "added": [ " int genKeys = (columnNames == null ||", " columnNames.length == 0", " ? Statement.NO_GENERATED_KEYS: ", " Statement.RETURN_GENERATED_KEYS);", " genKeys," ], "header": "@@ -1740,11 +1739,15 @@ public abstract class Connection implements java.sql.Connection,", "removed": [ " java.sql.Statement.RETURN_GENERATED_KEYS," ] } ] }, { "file": "java/client/org/apache/derby/client/am/Statement.java", "hunks": [ { "added": [ " if (columnNames != null && columnNames.length > 0)", " autoGeneratedKeys_ = Statement.RETURN_GENERATED_KEYS;" ], "header": "@@ -1211,6 +1211,8 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [] }, { "added": [ " if (columnNames != null && columnNames.length > 0)", " autoGeneratedKeys_ = Statement.RETURN_GENERATED_KEYS;" ], "header": "@@ -1270,6 +1272,8 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [] }, { "added": [ " checkForClosedStatement(); // Per jdbc spec (see java.sql.Statement.close() javadoc) ", " checkAutoGeneratedKeysParameters();" ], "header": "@@ -1854,12 +1858,11 @@ public class Statement implements java.sql.Statement, StatementCallbackInterface", "removed": [ " checkForClosedStatement(); // Per jdbc spec (see java.sql.Statement.close() javadoc)", " checkAutoGeneratedKeysParameters();", "" ] } ] } ]