id
stringlengths
22
25
commit_message
stringlengths
137
6.96k
diffs
listlengths
0
63
derby-DERBY-4677-012a72b2
DERBY-4677 We were not transferring unique nullable properties from the system catalog to the store layer during compress table and bulk insert. Because of that, after those operations, we started allowing duplicate rows those causing db corruption. With this checkin, we will be transferring unique nullable properties from sql layer to store layer. System catalogs have the information correct, but unique nullability information was not getting transferred to store during btree recreation in case of compress table and bulk insert operations. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@954544 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/execute/InsertResultSet.java", "hunks": [ { "added": [ "\t\t\tif(cd.getIndexDescriptor().isUniqueWithDuplicateNulls())", "\t\t\t{", "\t\t\t\tproperties.put(", "\t \"uniqueWithDuplicateNulls\", Boolean.toString(true));", "\t\t\t}" ], "header": "@@ -1869,6 +1869,11 @@ class InsertResultSet extends DMLWriteResultSet implements TargetResultSet", "removed": [] } ] } ]
derby-DERBY-4679-125f9182
DERBY-4679 Several left outer joins causes unstable query with incorrect results Patch derby-4679b, which solves the following problem: When transitive closure generates new criteria into the query, it is sometimes confused by situations where the same column name appears in a result column list multiple times due to flattening of sub-queries. Flattening requires remapping of (table, column) numbers in column references. In cases where the same column name appears in a result column list multiple times, this lead to remapping (reassigning) wrong (table, column) numbers to column references in join predicates transformed to where clauses as a result of the flattening. See also DERBY-2526 and DERBY-3023 whose fixes which were partial solutions to the problem of wrong column number remappings confusing the transitive closure of search predicates performed by the preprocessing step of the optimizer. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@952237 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/ColumnReference.java", "hunks": [ { "added": [ " /* For remembering original (tn,cn) of this CR during join flattening. */", " private int tableNumberBeforeFlattening = -1;", " private int columnNumberBeforeFlattening = -1;", "" ], "header": "@@ -71,6 +71,10 @@ public class ColumnReference extends ValueNode", "removed": [] }, { "added": [], "header": "@@ -850,12 +854,6 @@ public class ColumnReference extends ValueNode", "removed": [ "\t\t\t\ttableNumber = ft.getTableNumber();", "\t\t\t\tif (SanityManager.DEBUG)", "\t\t\t\t{", "\t\t\t\t\tSanityManager.ASSERT(tableNumber != -1,", "\t\t\t\t\t\t\"tableNumber not expected to be -1\");", "\t\t\t\t}" ] } ] } ]
derby-DERBY-4679-4ce669e5
DERBY-4679 Several left outer joins causes unstable query with incorrect results Follow-up patch derby-4679-followup, which makes the original patch safer by also matching the column name once a candidate result column has been located using the table number and column number pair to match an RC. This is to safe-guard against false matches, since DERBY-4595 shows that the column number can be wrong in certain situations. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@957260 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/ResultColumnList.java", "hunks": [ { "added": [ " * <p/>", " * {@code columnName} is used to assert that we find the right column.", " * If we found a match on (tn, cn) but columnName is wrong, return null.", " * Once we trust table numbers and column numbers to always be correct,", " * cf. DERBY-4695, we can remove this parameter.", " * @param columnName name of the desired column", " public ResultColumn getResultColumn(int tableNumber,", " int columnNumber,", " String columnName)" ], "header": "@@ -321,11 +321,19 @@ public class ResultColumnList extends QueryTreeNodeVector", "removed": [ " public ResultColumn getResultColumn(int tableNumber, int columnNumber)" ] }, { "added": [ " // Found matching (t,c) within this top", " // resultColumn. Now do sanity check that column", " // name is correct. Remove when DERBY-4695 is", " // fixed.", " if (columnName.equals(", " vcn.getSourceColumn().getName())) {", " resultColumn.setReferenced();", " return resultColumn;", " } else {", " if (SanityManager.DEBUG) {", " SanityManager.ASSERT(", " false,", " \"wrong (tn,cn) for column \" +", " columnName +", " \" found: this pair points to \" +", " vcn.getSourceColumn().getName());", " }", " // Fall back on column name based lookup,", " // cf. DERBY-4679. See ColumnReference#", " // remapColumnReferencesToExpressions", " return null;", " }" ], "header": "@@ -347,10 +355,28 @@ public class ResultColumnList extends QueryTreeNodeVector", "removed": [ " // Found matching (t,c) within this top resultColumn", " resultColumn.setReferenced();", " return resultColumn;", "" ] } ] } ]
derby-DERBY-4679-b26c1ca2
DERBY-4679 Several left outer joins causes unstable query with incorrect results Follow-up patch derby-4679-2a, which makes the new (tn, cn) based remapping work also for a CR to a subquery join participant being rebound after flattening, see detailed comments in the code. Extra test cases are added to JoinTest#testDerby_4679 git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@958618 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/ResultColumnList.java", "hunks": [ { "added": [ " * cf. DERBY-4695, we could remove this parameter." ], "header": "@@ -325,7 +325,7 @@ public class ResultColumnList extends QueryTreeNodeVector", "removed": [ " * cf. DERBY-4695, we can remove this parameter." ] }, { "added": [ " if (ft.getTableNumber() == tableNumber) {", " // We have the right table, now try to match the", " // column number. Looking at a join, for a base", " // table participant, we will find the correct", " // column position in the", " // JOIN's ColumnDescriptor. Normally, we could just", " // call rc.getColumnPosition, but this doesn't work", " // if we have a join with a subquery participant", " // (it would give us the virtualColumnId one level", " // too high up, since the column descriptor is null", " // in that case inside a JOIN's RC.", " //", " // If FromTable is a FromSubquery we need to look", " // at the JOIN RC's source column to match the", " // table column number. However, at that level, the", " // table number would be that of the underlying", " // SELECT (for example), rather than the", " // FromSubquery's, so we need to match the table", " // number one level above, cf the test cases in", " // JoinTest#testDerby_4679 which have subqueries.", "", " ColumnDescriptor cd = rc.getTableColumnDescriptor();", "", " if (SanityManager.DEBUG) {", " SanityManager.ASSERT(", " cd != null || ft instanceof FromSubquery);", " }", "", " if ( (cd != null && cd.getPosition() ==", " columnNumber) ||", " (vcn.getSourceColumn().getColumnPosition() ==", " columnNumber) ) {", "", " // Found matching (t,c) within this top", " // resultColumn. Now do sanity check that column", " // name is correct. Remove when DERBY-4695 is", " // fixed.", " if (columnName.equals(", " vcn.getSourceColumn().getName())) {", " resultColumn.setReferenced();", " return resultColumn;", " } else {", " if (SanityManager.DEBUG) {", " SanityManager.ASSERT(", " false,", " \"wrong (tn,cn) for column \" +", " columnName +", " \" found: this pair points to \" +", " vcn.getSourceColumn().getName());", " }", " // Fall back on column name based lookup,", " // cf. DERBY-4679. See ColumnReference#", " // remapColumnReferencesToExpressions", " return null;", " } else {", " rc = vcn.getSourceColumn();" ], "header": "@@ -352,30 +352,63 @@ public class ResultColumnList extends QueryTreeNodeVector", "removed": [ " if (ft.getTableNumber() == tableNumber &&", " rc.getColumnPosition() == columnNumber) {", "", " // Found matching (t,c) within this top", " // resultColumn. Now do sanity check that column", " // name is correct. Remove when DERBY-4695 is", " // fixed.", " if (columnName.equals(", " vcn.getSourceColumn().getName())) {", " resultColumn.setReferenced();", " return resultColumn;", " } else {", " if (SanityManager.DEBUG) {", " SanityManager.ASSERT(", " false,", " \"wrong (tn,cn) for column \" +", " columnName +", " \" found: this pair points to \" +", " vcn.getSourceColumn().getName());", " // Fall back on column name based lookup,", " // cf. DERBY-4679. See ColumnReference#", " // remapColumnReferencesToExpressions", " return null;" ] } ] } ]
derby-DERBY-4684-1b3f93b3
DERBY-4684: Fix implicit casts to BOOLEAN. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@951047 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/types/SQLBoolean.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.reference.SQLState;" ], "header": "@@ -30,6 +30,7 @@ import org.apache.derby.iapi.services.io.Storable;", "removed": [] } ] } ]
derby-DERBY-4685-20cc6f64
DERBY-4685: Dead/unreachable code in OpenConglomerate.lockPositionForWrite() Remove the second parameter (forInsert) in lockPositionForWrite() because it is always false in the current code, and because the method is not believed to work for inserts. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1028712 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/store/access/conglomerate/GenericConglomerateController.java", "hunks": [ { "added": [ " open_conglom.lockPositionForWrite(pos, true);" ], "header": "@@ -176,7 +176,7 @@ public abstract class GenericConglomerateController", "removed": [ " open_conglom.lockPositionForWrite(pos, false /* not an insert */, true);" ] }, { "added": [ " open_conglom.lockPositionForWrite(pos, true);" ], "header": "@@ -275,8 +275,7 @@ public abstract class GenericConglomerateController", "removed": [ " open_conglom.lockPositionForWrite(", " pos, false /* not an insert */, true);" ] }, { "added": [ " open_conglom.lockPositionForWrite(pos, waitForLock);" ], "header": "@@ -380,8 +379,7 @@ public abstract class GenericConglomerateController", "removed": [ " open_conglom.lockPositionForWrite(", " pos, false /* not an insert */, waitForLock);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/access/conglomerate/OpenConglomerate.java", "hunks": [ { "added": [], "header": "@@ -521,7 +521,6 @@ public abstract class OpenConglomerate", "removed": [ " boolean forInsert," ] }, { "added": [ " // This method is only used for locking existing rows, never for", " // insert operations.", " final boolean forInsert = false;", "" ], "header": "@@ -563,6 +562,10 @@ public abstract class OpenConglomerate", "removed": [] } ] } ]
derby-DERBY-4686-33288d68
DERBY-4686: SQLBinary.writeBlob is inefficient, reading one byte at a time from the source BLOB Improves performance by reading and writing from/to a transfer buffer. Patch contributed by Yun Lee (yun dot lee dot bj at gmail dot com). Patch file: DERBY-4686-1.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@954748 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/types/SQLBinary.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.services.io.DerbyIOException;" ], "header": "@@ -31,6 +31,7 @@ import org.apache.derby.iapi.types.DataValueDescriptor;", "removed": [] }, { "added": [ " private static final int LEN_OF_BUFFER_TO_WRITE_BLOB = 1024;", "" ], "header": "@@ -101,6 +102,8 @@ abstract class SQLBinary", "removed": [] }, { "added": [ " ", " int bytesRead = 0;", " int numOfBytes = 0;", " byte[] buffer = new byte[LEN_OF_BUFFER_TO_WRITE_BLOB];", " ", " while(bytesRead < len) {", " numOfBytes = is.read(buffer);", " ", " if (numOfBytes == -1) {", " throw new DerbyIOException(", " MessageService.getTextMessage(", " SQLState.SET_STREAM_INEXACT_LENGTH_DATA),", " SQLState.SET_STREAM_INEXACT_LENGTH_DATA);", " }", " ", " out.write(buffer, 0, numOfBytes);", " bytesRead += numOfBytes; " ], "header": "@@ -368,10 +371,23 @@ abstract class SQLBinary", "removed": [ "", " for ( int i = 0; i < len; i++ )", " {", " out.write( is.read() );" ] } ] } ]
derby-DERBY-4692-6ad8107b
DERBY-4692: Forbid UNIONs between BOOLEAN and non-BOOLEAN types, per the SQL Standard. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@952263 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/ResultColumnList.java", "hunks": [ { "added": [ "\t\t\t** Check type compatability.", "\t\t\tif ( !unionCompatible( thisExpr, otherExpr ) )" ], "header": "@@ -2373,14 +2373,10 @@ public class ResultColumnList extends QueryTreeNodeVector", "removed": [ "\t\t\t** Check type compatability. We want to make sure that", "\t\t\t** the types are assignable in either direction", "\t\t\t** and they are comparable.", "\t\t\tif (", "\t\t\t\t!thisExpr.getTypeCompiler().storable(otherTypeId, cf) &&", "\t\t\t\t!otherExpr.getTypeCompiler().storable(thisTypeId, cf))" ] } ] } ]
derby-DERBY-4693-3c3fb736
DERBY-4693: RENAME COLUMN loses IDENTITY attributes This change modifies the RENAME COLUMN logic so that it preserves the autoincrement properties of the column when renaming it. Since RENAME COLUMN more-or-less drops and re-adds the column, we need to ensure that when the column is re-added, if it is an Autoincrement column, we set the CREATE_AUTOINCREMENT flag that the parser sets when initially creating the table. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@954344 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/execute/RenameConstantAction.java", "hunks": [ { "added": [ "import org.apache.derby.impl.sql.compile.ColumnDefinitionNode;" ], "header": "@@ -50,6 +50,7 @@ import org.apache.derby.catalog.UUID;", "removed": [] } ] } ]
derby-DERBY-4694-65213c4f
DERBY-4694: Build breaks on Mac OS X due to JDK classpath issues Relaxed verification criteria for Apple JDKs (skip check for Headers directory). Added flag -DprintCompilerPropertiesVerbose to aid debugging when the property setter fails to set the correct compile classpath(s). Patch file: derby-4694-2b-debugging_and_mac_fix.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@963206 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/build/org/apache/derbyPreBuild/PropertySetter.java", "hunks": [ { "added": [], "header": "@@ -123,10 +123,8 @@ public class PropertySetter extends Task", "removed": [ " private static final String APPLE_JAVA_ROOT = \"/System/Library/Frameworks/JavaVM.framework/Versions\";", " private static final String APPLE_HEADERS_DIR = \"Headers\";" ] }, { "added": [ " /** Property controlling extra verbose debugging information. */", " private static final String PROPERTY_SETTER_VERBOSE_DEBUG_FLAG =", " \"printCompilerPropertiesVerbose\";", " private static boolean VERBOSE_DEBUG_ENABLED;" ], "header": "@@ -134,6 +132,10 @@ public class PropertySetter extends Task", "removed": [] }, { "added": [ " verbose(\"jdkParent derived from '\" + javaHome + \"': '\" +", " ancestor.getPath() + \"'\");" ], "header": "@@ -539,7 +541,8 @@ public class PropertySetter extends Task", "removed": [ " " ] }, { "added": [ " debug(\"WARNING: No JDK parent directories specified.\");", " debug(\"\\nLocating Apple JDKs:\");", " verbose(\"locating JDKs in '\" + jdkParentDirectory + \"'\");", " verbose(\"checking root '\" + f + \"'\");" ], "header": "@@ -601,22 +604,24 @@ public class PropertySetter extends Task", "removed": [ " debug(\"\\nLocating JDKs:\");", " new File(f, APPLE_HEADERS_DIR)," ] }, { "added": [ " verbose(\"located \" + jdks.size() + \" JDKs in total\");" ], "header": "@@ -663,6 +668,7 @@ public class PropertySetter extends Task", "removed": [] }, { "added": [ " verbose(\"no manifest found for JDK in '\" + jdkHome + \"'\");" ], "header": "@@ -747,6 +753,7 @@ public class PropertySetter extends Task", "removed": [] }, { "added": [ " debug(\"No candidate JDKs (version '\" + specificationVersion + \"')\");" ], "header": "@@ -775,6 +782,7 @@ public class PropertySetter extends Task", "removed": [] }, { "added": [ "", " // Set the verbose debugging flag, it is used by static methods.", " VERBOSE_DEBUG_ENABLED = Boolean.valueOf((String)", " _propertiesSnapshot.get(PROPERTY_SETTER_VERBOSE_DEBUG_FLAG)", " ).booleanValue();" ], "header": "@@ -1085,6 +1093,11 @@ public class PropertySetter extends Task", "removed": [] }, { "added": [ " if (isSet(PROPERTY_SETTER_DEBUG_FLAG) ||", " VERBOSE_DEBUG_ENABLED) {", " /**", " * Emits a debug message to the console if verbose debugging is enabled.", " * <p>", " * Verbose debugging is controlled by", " * {@linkplain #PROPERTY_SETTER_VERBOSE_DEBUG_FLAG}.", " *", " * @param msg the message to print", " */", " private static void verbose(CharSequence msg) {", " if (VERBOSE_DEBUG_ENABLED) {", " System.out.println(\"[verbose] \" + msg);", " }", " }", "" ], "header": "@@ -1309,11 +1322,26 @@ public class PropertySetter extends Task", "removed": [ " if (isSet(PROPERTY_SETTER_DEBUG_FLAG)) {" ] }, { "added": [ " verbose((accept ? \"candidate\" : \"duplicate\") + \" '\" +", " pathname + \"' -> '\" + canonicalRoot + \"'\");", " verbose(\"file operation failed: \" + ioe.getMessage());" ], "header": "@@ -1334,9 +1362,12 @@ public class PropertySetter extends Task", "removed": [] } ] } ]
derby-DERBY-4694-ee6ea33e
DERBY-4694: Build breaks on OS X due to JDK classpath issues. Enabled JAR inspection for Mac OS X instead of using hard-coded values. The motivation for the change is that Apple has started symlinking the directories for JDK 1.4 and JDK 1.5 to JDK 1.6. This made the build fail on OS X (only recent fresh installs, where the symlinking is used). Patch file: derby-4694-1b.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@958555 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/build/org/apache/derbyPreBuild/PropertySetter.java", "hunks": [ { "added": [], "header": "@@ -23,7 +23,6 @@ package org.apache.derbyPreBuild;", "removed": [ "import java.io.FilenameFilter;" ] }, { "added": [ " private static final String APPLE_CLASSES_DIR = \"Classes\";", " private static final String APPLE_COMMANDS_DIR = \"Commands\";", " private static final String APPLE_HEADERS_DIR = \"Headers\";", " private static final String APPLE_HOME_DIR = \"Home\";", " private static final String APPLE_LIB_DIR = \"Libraries\";", " private static final String APPLE_RESOURCES_DIR = \"Resources\";" ], "header": "@@ -125,6 +124,12 @@ public class PropertySetter extends Task", "removed": [] }, { "added": [ " throws Exception", " String default_j14lib = getProperty( J14LIB );", " String default_j15lib = getProperty( J15LIB );", " String default_j16lib = getProperty( J16LIB );", "", " // Obtain a list of all JDKs available to us, then specify which one to", " // use for the different versions we require.", " List<JDKInfo> jdks = locateAppleJDKs(getJdkSearchPath());", " debug(\"\\nSelecting JDK candidates:\");", " if (default_j14lib == null) {", " default_j14lib = getJreLib(jdks, \"1.4\", jdkVendor);", " }", " if (default_j15lib == null) {", " default_j15lib = getJreLib(jdks, \"1.5\", jdkVendor);", " }", " if (default_j16lib == null) {", " default_j16lib = getJreLib(jdks, \"1.6\", jdkVendor);", " }", "", " defaultSetter(default_j14lib, default_j15lib, default_j16lib);" ], "header": "@@ -350,9 +355,27 @@ public class PropertySetter extends Task", "removed": [ " throws BuildException", " defaultSetter( APPLE_JAVA_ROOT + \"/1.4/Classes\", APPLE_JAVA_ROOT + \"/1.5/Classes\", APPLE_JAVA_ROOT + \"/1.6/Classes\" );" ] }, { "added": [ " List<JDKInfo> jdks = locateMostJDKs(getJdkSearchPath());" ], "header": "@@ -413,7 +436,7 @@ public class PropertySetter extends Task", "removed": [ " List<JDKInfo> jdks = locateJDKs(getJdkSearchPath());" ] }, { "added": [ " //", " private List<JDKInfo> locateAppleJDKs(List<File> jdkParentDirectories) {", " ArrayList<JDKInfo> jdks = new ArrayList<JDKInfo>();", " if (jdkParentDirectories == null) {", " return jdks;", " }", "", " debug(\"\\nLocating JDKs:\");", "", " final FileFilter jdkFilter = new JDKRootFileFilter();", " for (File jdkParentDirectory : jdkParentDirectories) {", " // Limit the search to the directories in the parent directory.", " // Don't descend into sub directories.", " File[] possibleJdkRoots = jdkParentDirectory.listFiles(jdkFilter);", " for (File f : possibleJdkRoots) {", "", " File[] requiredDirs = new File[] {", " new File(f, APPLE_CLASSES_DIR),", " new File(f, APPLE_COMMANDS_DIR),", " new File(f, APPLE_HEADERS_DIR),", " new File(f, APPLE_HOME_DIR),", " new File(f, APPLE_LIB_DIR),", " new File(f, APPLE_RESOURCES_DIR)", " };", " ", " boolean dirsOK = true;", " for (File reqDir : requiredDirs) {", " if (!reqDir.exists()) {", " debug(\"Missing JDK directory: \" +", " reqDir.getAbsolutePath());", " dirsOK = false;", " break;", " }", " }", " if (!dirsOK) {", " continue;", " }", "", " File rtArchive = new File(f,", " new File(APPLE_CLASSES_DIR, \"classes.jar\").getPath());", " if (!rtArchive.exists()) {", " debug(\"Missing JAR: \" + rtArchive);", " // Bail out, we only understand JDKs that have a", " // \"Classes/classes.jar\".", " continue;", " }", " // Get implementation version from the manifest.", " Manifest mf;", " try {", " JarFile rtJar = new JarFile(rtArchive);", " mf = rtJar.getManifest();", " } catch (IOException ioeIgnored) {", " // Obtaining the manifest failed for some reason.", " // If in debug mode, let the user know.", " debug(\"Failed to obtain manifest for \" +", " rtArchive.getAbsolutePath() + \": \" +", " ioeIgnored.getMessage());", " continue;", " }", " JDKInfo jdk = inspectJarManifest(mf, f);", " if (jdk != null) {", " jdks.add(jdk);", " continue;", " }", " }", " }", " return jdks;", " }" ], "header": "@@ -574,6 +597,74 @@ public class PropertySetter extends Task", "removed": [] }, { "added": [ " private List<JDKInfo> locateMostJDKs(List<File> jdkParentDirectories) {" ], "header": "@@ -582,7 +673,7 @@ public class PropertySetter extends Task", "removed": [ " private List<JDKInfo> locateJDKs(List<File> jdkParentDirectories) {" ] }, { "added": [ " final FileFilter jdkFilter = new JDKRootFileFilter();", " File[] possibleJdkRoots = jdkParentDirectory.listFiles(jdkFilter);" ], "header": "@@ -597,17 +688,11 @@ public class PropertySetter extends Task", "removed": [ " File[] possibleJdkRoots = jdkParentDirectory.listFiles(", " new FileFilter() {", "", " /** Accepts only directories. */", " public boolean accept(File pathname) {", " return pathname.isDirectory();", " }", " });" ] }, { "added": [ " debug(\"found JDK: \" + info);", " return info;" ], "header": "@@ -669,23 +754,8 @@ public class PropertySetter extends Task", "removed": [ " if (!info.implementationVersion.equals(JDKInfo.UNKNOWN)) {", " // Make sure we have javac", " File jdkBin = new File(jdkHome, \"bin\");", " File[] javac = jdkBin.listFiles(new FilenameFilter() {", "", " public boolean accept(File dir, String name) {", " return name.toLowerCase().startsWith(\"javac\");", " }", " });", " if (javac == null || javac.length == 0) {", " return null;", " }", " //javac located, we're good to go.", " debug(\"found JDK: \" + info);", " return info;", " }", " return null;" ] }, { "added": [ " if (jdks == null || jdks.isEmpty()) {" ], "header": "@@ -704,10 +774,9 @@ public class PropertySetter extends Task", "removed": [ " if (jdks == null || jdks.size() == 0) {", " final String jreLib = new File(\"jre\", \"lib\").getPath();" ] }, { "added": [ " if (candidates.isEmpty()) {" ], "header": "@@ -723,7 +792,7 @@ public class PropertySetter extends Task", "removed": [ " if (candidates.size() == 0) {" ] }, { "added": [ " return constructJreLibPath(jdk).getAbsolutePath();" ], "header": "@@ -749,7 +818,7 @@ public class PropertySetter extends Task", "removed": [ " return new File(jdk.path, jreLib).getAbsolutePath();" ] }, { "added": [ " /**", " * Constructs the path to the JRE library directory for the given JDK.", " *", " * @param jdk the target JDK", " * @return A <tt>File</tt> object pointing to the JRE library directory.", " */", " private static File constructJreLibPath(JDKInfo jdk) {", " String relLib;", " if (jdk.vendor.startsWith(JDK_APPLE)) {", " relLib = new File(APPLE_CLASSES_DIR).getPath();", " } else {", " relLib = new File(\"jre\", \"lib\").getPath();", " }", " return new File(jdk.path, relLib);", " }", "" ], "header": "@@ -757,6 +826,22 @@ public class PropertySetter extends Task", "removed": [] }, { "added": [ " private boolean isValidVersion(String implVersion,", " debug(\"JDK ignored, no impl version found\");", " debug(\"JDK with version '\" + implVersion + \"' ignored: \" +", " \"early access\");" ], "header": "@@ -765,15 +850,18 @@ public class PropertySetter extends Task", "removed": [ " private static boolean isValidVersion(String implVersion," ] }, { "added": [ " // Normalize the vendore names returned by Apple JDKs.", " if (vendorName.equals(\"Apple Inc.\")) {", " // The running VM says \"Apple Inc.\", the JAR manifest says", " // \"Apple Computer, Inc.\".", " vendorName = \"Apple Computer, Inc.\";", " }", " // The vendor name specified in the jar file manifest differes from the", " // one return by the JVM itself for the Sun JDKs. For instance:" ], "header": "@@ -820,10 +908,14 @@ public class PropertySetter extends Task", "removed": [ " // Currently we only replace commas with the empty string. The reason", " // for doing this is that the vendor name specified in the jar file", " // manifest differes from the one return by the JVM itself for the Sun", " // JDKs. For instance:" ] }, { "added": [ " /**", " * A custom filter that accepts only directories and which in addition tries", " * to ignore duplicates (i.e. symbolic links pointing into the same", " * directory).", " */", " private static class JDKRootFileFilter", " implements FileFilter {", "", " private List<String> canonicalRoots = new ArrayList<String>();", "", " /** Accepts only directories. */", " public boolean accept(File pathname) {", " if (pathname.isDirectory()) {", " // Avoid processing the same JDK multiple times if possible.", " try {", " String canonicalRoot = pathname.getCanonicalPath();", " boolean accept = !canonicalRoots.contains(canonicalRoot);", " if (accept) {", " canonicalRoots.add(canonicalRoot);", " }", " return accept;", " } catch (IOException ioe) {", " // Ignore exception, just accept the directory.", " return true;", " }", " }", " return false;", " }", " }", "}" ], "header": "@@ -1221,5 +1313,34 @@ public class PropertySetter extends Task", "removed": [ "}" ] } ] } ]
derby-DERBY-4695-ee21e7c1
DERBY-4695 Internal assignment of tablenumer, columnnumber looks wrong in query tree, although no ill effects are seen Patch derby-4695-fixcolumnno-1b, which moves the assignment of ColumnReference#columnnumber from ColumnReference to the underlying FromTable, i.e. the same place as where the table number is assigned. This assures that the two are in synch, and the columnNumber assignment is no longer thwarted by the (re)assignment of the ColumnReference#source for "JoinNode"s to one of the result columns of the Join itself (needed for code generation: the source for the ColumnReference need to be to the correct ResultSet at generate time, cf the comment in JoinNode#getMatchingColumn). git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@988204 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/ColumnReference.java", "hunks": [ { "added": [ "", " /**", " * The FromTable this column reference is bound to.", " */", " private int tableNumber;", "", " /**", " * The column number in the underlying FromTable. But note {@code source}.", " * @see #source", " */", " private int columnNumber;", "", " /**", " * This is where the value for this column reference will be coming from.", " * Note that for join nodes, {@code tableNumber}/{@code columnNumber} will", " * point to the column in the left or right join participant {@code", " * FromTable}, whereas {@code source} will be bound to the RC in the result", " * column list of the join node. See also the comment at the end of", " * JoinNode#getMatchingColumn.", " * @see JoinNode#getMatchingColumn", " * @see #columnNumber", " * @see #tableNumber", " */", " private ResultColumn source;" ], "header": "@@ -58,12 +58,30 @@ public class ColumnReference extends ValueNode", "removed": [ "\t/* The table this column reference is bound to */", "\tprivate int\t\t\ttableNumber;\t", "\t/* The column number in the underlying base table */", "\tprivate int\t\t\tcolumnNumber;\t", "\t/* This is where the value for this column reference will be coming from */", "\tprivate ResultColumn\tsource;" ] } ] } ]
derby-DERBY-4698-2e7e8f6d
DERBY-4698 Simple query with HAVING clause crashes with NullPointerException Patch derby-4698-2. The case of column references in HAVING clauses being wrong after JOIN flattening was initially solved by DERBY-3880. That solution was partial in that it can sometimes happen too late. This patch changes the fix-up of column references in a having clause after join flattening to the same point in time as that of other column references that need fix-up after the flattening (rcl, column references in join predicates and group by clauses). Thus the fixup is moved from the modifyaccesspath phase to the preprocess phase. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@956234 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/FromList.java", "hunks": [ { "added": [ " * @param havingClause The HAVING clause, if any", " GroupByList gbl,", " ValueNode havingClause)" ], "header": "@@ -708,13 +708,15 @@ public class FromList extends QueryTreeNodeVector implements OptimizableList", "removed": [ "\t\t\t\t\t\t\t\t GroupByList gbl)" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/FromSubquery.java", "hunks": [ { "added": [ " * @param havingClause The HAVING clause, if any" ], "header": "@@ -483,6 +483,7 @@ public class FromSubquery extends FromTable", "removed": [] }, { "added": [ " GroupByList gbl,", " ValueNode havingClause)" ], "header": "@@ -491,7 +492,8 @@ public class FromSubquery extends FromTable", "removed": [ "\t\t\t\t\t\t\tGroupByList gbl)" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/FromTable.java", "hunks": [ { "added": [ " * @param havingClause The HAVING clause, if any" ], "header": "@@ -1420,6 +1420,7 @@ abstract class FromTable extends ResultSetNode implements Optimizable", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/JoinNode.java", "hunks": [ { "added": [ " * @param havingClause The HAVING clause, if any" ], "header": "@@ -1414,6 +1414,7 @@ public class JoinNode extends TableOperatorNode", "removed": [] }, { "added": [ " GroupByList gbl,", " ValueNode havingClause)" ], "header": "@@ -1422,7 +1423,8 @@ public class JoinNode extends TableOperatorNode", "removed": [ "\t\t\t\t\t\t\tGroupByList gbl)" ] } ] } ]
derby-DERBY-4699-a8132ce4
DERBY-3989 / DERBY-4699 Made PropertySetter ignore Java 6 libraries if a Java 5 compiler is used. If j16lib is specified explicitly in such an environment, the build will be aborted (an error message will be displayed to the user). Patch file: derby-3989-02-aa-dontUseJava6LibsWithJava5Compiler.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@954421 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/build/org/apache/derbyPreBuild/PropertySetter.java", "hunks": [ { "added": [ " if ( j14lib != null ) {", " debug(\"'j14lib' explicitly set to '\" + j14lib + \"'\");", " setClasspathFromLib(J14CLASSPATH, j14lib, true );", " }", " if ( j15lib != null ) {", " debug(\"'j15lib' explicitly set to '\" + j15lib + \"'\");", " setClasspathFromLib(J15CLASSPATH, j15lib, true );", " }", " if ( j16lib != null ) {", " debug(\"'j16lib' explicitly set to '\" + j16lib + \"'\");", " setClasspathFromLib(J16CLASSPATH, j16lib, true );", " }" ], "header": "@@ -274,9 +274,18 @@ public class PropertySetter extends Task", "removed": [ " if ( j14lib != null ) { setClasspathFromLib(J14CLASSPATH, j14lib, true ); }", " if ( j15lib != null ) { setClasspathFromLib(J15CLASSPATH, j15lib, true ); }", " if ( j16lib != null ) { setClasspathFromLib(J16CLASSPATH, j16lib, true ); }" ] }, { "added": [ " debug(\"\\nSelecting JDK candidates:\");" ], "header": "@@ -405,6 +414,7 @@ public class PropertySetter extends Task", "removed": [] }, { "added": [ " debug(\"\\nLocating JDKs:\");" ], "header": "@@ -577,6 +587,7 @@ public class PropertySetter extends Task", "removed": [] }, { "added": [ " debug(\"Candidate JDK for specification version \" +" ], "header": "@@ -733,7 +744,7 @@ public class PropertySetter extends Task", "removed": [ " debug(\"Chosen JDK for specification version \" +" ] }, { "added": [ " \"\\nThe build raises version mismatch errors when using a \" +", " \"Java 5 compiler with Java 6 libraries.\\n\" +", " \"Please either use a Java 6 (or later) compiler or do not \" +", " \"set the '\" + J16CLASSPATH + \"' and '\" + J16LIB +", " \"' variables.\\n\"" ], "header": "@@ -998,8 +1009,11 @@ public class PropertySetter extends Task", "removed": [ " \"\\nThe build raises version mismatch errors when using the IBM Java 5 compiler with Java 6 libraries.\\n\" +", " \"Please either use a Java 6 (or later) compiler or do not set the '\" + J16CLASSPATH + \"' and '\" + J16LIB + \"' variables.\\n\"" ] }, { "added": [ " // A Java 5 compiler raises version mismatch errors when used", " // with Java 6 libraries.", " return ( javaVersion.startsWith( JAVA_5 ) &&", " J16CLASSPATH.equals( property ) );" ], "header": "@@ -1013,13 +1027,13 @@ public class PropertySetter extends Task", "removed": [ " // The IBM Java 5 compiler raises version mismatch errors when used", " // with the IBM Java 6 libraries.", " String jdkVendor = getProperty( JDK_VENDOR );", " return ( usingIBMjdk( jdkVendor ) && javaVersion.startsWith( JAVA_5 ) && J16CLASSPATH.equals( property ) );" ] } ] } ]
derby-DERBY-47-028077eb
DERBY-47 (partial): Patch to implement execution-time "probing" given a probe predicate "place-holder" and a list of IN values. This patch creates a new execution-time result, MuliProbeTableScanResultSet, to perform the probing. At a higher-level the changes in this patch make it so that repeated calls to MultiProbeTableScanResultSet.getNextRowCore() will first return all rows matching probeValues[0], then all rows matching probeValues[1], and so on (duplicate probe values are ignored). Once all matching rows for all values in probeValues have been returned, the call to getNextRowCore() will return null, thereby ending the scan. In order to accommodate the above behavior, the following changes were made to existing files: 1 - Add correct instantiation logic to the "getMultiProbeTableScanResultSet()" method of GenericResultSetFactory, which was just a stub method before this patch. 2 - Overloaded methods in TableScanResultSet to allow the passing of a "probe value" into the openScanController() and reopenScanController() methods. The methods then use the probe value (if one exists) as the start/stop key for positioning a scan, instead of using the start/stop key passed into the result set constructor. 3 - Made the iapi.types.DataType class implement the java.lang.Comparable interface for the sake of easy sorting (just let the JVM do the sort). Since DataType (the superclass of all datatypes and base implementation of the DataValueDescriptor interface) already has a "compare()" method that returns an integer to indicate less than, greater than, or equal, all we have to do is wrap that method inside a "compareTo()" method and we're done. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@517470 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/types/DataType.java", "hunks": [ { "added": [ "import java.lang.Comparable;" ], "header": "@@ -35,6 +35,7 @@ import org.apache.derby.iapi.services.sanity.SanityManager;", "removed": [] }, { "added": [ "\timplements DataValueDescriptor, CloneableObject, Comparable" ], "header": "@@ -64,7 +65,7 @@ import java.util.Calendar;", "removed": [ "\timplements DataValueDescriptor, CloneableObject" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/GenericResultSetFactory.java", "hunks": [ { "added": [ "\t\t\t\t\t\t\t\t\tActivation activation," ], "header": "@@ -740,7 +740,7 @@ public class GenericResultSetFactory implements ResultSetFactory", "removed": [ " \t\t\tActivation activation," ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/TableScanResultSet.java", "hunks": [ { "added": [ "\t{", "\t\topenScanController(tc, (DataValueDescriptor)null);", "\t}", "", "\t/*", "\t** Does the work of openScanController.", "\t**", "\t** @param tc transaction controller; will open one if null.", "\t** @param probeValue If non-null then we will open the scan controller", "\t** and position it using the received probeValue as the start key.", "\t** Otherwise we'll use whatever value is in startPosition (if non-", "\t** null) as the start key.", "\t*/", "\tprotected void openScanController(TransactionController tc,", "\t\tDataValueDescriptor probeValue) throws StandardException", "\t\t/* If we have a probe value then we do the \"probe\" by positioning", "\t\t * the scan at the first row matching the value. The way to do", "\t\t * that is to use the value as a start key, which is what will", "\t\t * happen if we plug it into \"startPositionRow\". So in this case", "\t\t * startPositionRow functions as a \"place-holder\" for the probe", "\t\t * value. Note: if we have a probe value then we want to use it", "\t\t * as the start key AND as the stop key. In that case the value", "\t\t * of \"sameStartStopPosition\" would have been true when we created", "\t\t * this result set, and thus we've already set stopPosition equal", "\t\t * to startPosition as part of openCore(). So by putting the probe", "\t\t * value into startPositionRow, we're also putting it into", "\t\t * stopPositionRow, which is what we want.", "\t\t */", "\t\tif (probeValue != null)", "\t\t\tstartPositionRow[0] = probeValue;", "" ], "header": "@@ -446,12 +446,43 @@ class TableScanResultSet extends NoPutResultSetImpl", "removed": [] }, { "added": [ "\tprotected void reopenScanController() throws StandardException", "\t{", "\t\treopenScanController((DataValueDescriptor)null);", "\t}", "", "\t/*", "\t** Does the work of reopenScanController.", "\t**", "\t** @param probeValue If non-null then we will open the scan controller", "\t** and position it using the received probeValue as the start key.", "\t** Otherwise we'll use whatever value is in startPosition (if non-", "\t** null) as the start key.", "\t*/", "\tprotected void reopenScanController(DataValueDescriptor probeValue)" ], "header": "@@ -507,7 +538,20 @@ class TableScanResultSet extends NoPutResultSetImpl", "removed": [ "\tprivate void reopenScanController()" ] }, { "added": [ "\t\t/* If we have a probe value then we do the \"probe\" by using the", "\t\t * value as a start and stop key. See openScanController() for", "\t\t * details. Note that in this case we do *not* want to reset", "\t\t * the rowsThisScan variable because we are going to be doing", "\t\t * multiple \"probes\" for a single scan. Logic to detect when", "\t\t * when we've actually started a new scan (as opposed to just", "\t\t * repositioning an existing scan based on a probe value) is", "\t\t * in MultiProbeTableScanResultSet.reopenScanController(),", "\t\t * and that method will then take care of resetting the variable", "\t\t * (if needed) for probing scans.", "\t\t */", "\t\tif (probeValue != null)", "\t\t\tstartPositionRow[0] = probeValue;", "\t\telse", "\t\t\trowsThisScan = 0;", "" ], "header": "@@ -515,6 +559,22 @@ class TableScanResultSet extends NoPutResultSetImpl", "removed": [] }, { "added": [], "header": "@@ -530,8 +590,6 @@ class TableScanResultSet extends NoPutResultSetImpl", "removed": [ "", "\t\trowsThisScan = 0;" ] } ] } ]
derby-DERBY-47-0b2eeba7
DERBY-2491 (partial): Add new DistinctTest, not wired in to _Suite due to one failing test case related to DERBY-47 that merits investigation. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@523454 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/junit/RuntimeStatisticsParser.java", "hunks": [ { "added": [ " private boolean distinctScan = false;", " private boolean eliminatedDuplicates = false;", " private boolean tableScan = false;", " private String statistics = \"\";" ], "header": "@@ -24,6 +24,10 @@ import java.sql.Connection;", "removed": [] }, { "added": [ " \tstatistics = rts;" ], "header": "@@ -34,6 +38,7 @@ public class RuntimeStatisticsParser {", "removed": [] }, { "added": [ " if (rts.indexOf(\"Distinct Scan ResultSet\") > 0) {", " \tdistinctScan = true;", " }", " ", " if (rts.indexOf(\"Table Scan ResultSet\") > 0) {", " \ttableScan = true;", " }", " ", " if (rts.indexOf(\"Eliminate duplicates = true\") > 0) {", " \teliminatedDuplicates = true;", " }" ], "header": "@@ -43,6 +48,17 @@ public class RuntimeStatisticsParser {", "removed": [] }, { "added": [ " /**", " * Return whether or not a Distinct Scan result set was used in the", " * query.", " */", " public boolean usedDistinctScan() {", " \treturn distinctScan;", " }", " ", " /**", " * Return whether or not a Table Scan result set was used in the", " * query.", " */", " public boolean usedTableScan() {", " \treturn tableScan;", " }", " ", " /**", " * Return whether or not the query involved a sort that eliminated", " * duplicates", " */", " public boolean eliminatedDuplicates() {", " \treturn eliminatedDuplicates;", " }" ], "header": "@@ -52,4 +68,27 @@ public class RuntimeStatisticsParser {", "removed": [] } ] } ]
derby-DERBY-47-1ad3b32f
DERBY-47 (partial): First incremental patch for the multi-probing ("mp") approach described in Jira comments. As mentioned in that comment, we need to be able to distinguish between "true" relational predicates and "probe predicates" so that we do not incorrectly perform certain operations on probe predicates. This first patch adds the logic to allow such distinction. In particular it: - Adds a new method, "isRelationalOpPredicate()", to Predicate.java that only returns true if the predicate is a "true" relational predicate; i.e. it will return "false" for probe predicates. - Updates several "if" statements in Predicate.java and PredicateList.java to use the new method. - Updates several utility methods in BinaryRelationalOperatorNode to distinguish "true" relational operators from ones that are created internally for probe predicates. There should be no functional changes to Derby as a result of this patch. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@512079 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/BinaryRelationalOperatorNode.java", "hunks": [ { "added": [ "\t/* If this BinRelOp was created for an IN-list \"probe predicate\"", "\t * then we keep a pointer to the original IN-list. This serves", "\t * two purposes: 1) if this field is non-null then we know that", "\t * this BinRelOp is for an IN-list probe predicate; 2) if the", "\t * optimizer chooses a plan for which the probe predicate is", "\t * not usable as a start/stop key then we'll \"revert\" the pred", "\t * back to the InListOperatorNode referenced here.", "\t */", "\tInListOperatorNode inListProbeSource = null;", "" ], "header": "@@ -75,6 +75,16 @@ public class BinaryRelationalOperatorNode", "removed": [] }, { "added": [ "\t/**", "\t * Same as init() above except takes a third argument that is", "\t * an InListOperatorNode. This version is used during IN-list", "\t * preprocessing to create a \"probe predicate\" for the IN-list.", "\t * See InListOperatorNode.preprocess() for more.", "\t */", "\tpublic void init(Object leftOperand, Object rightOperand, Object inListOp)", "\t{", "\t\tinit(leftOperand, rightOperand);", "\t\tthis.inListProbeSource = (InListOperatorNode)inListOp;", "\t}", "", "\t/**", "\t * If this rel op was created for an IN-list probe predicate then return", "\t * the underlying InListOperatorNode. Will return null if this rel", "\t * op is a \"legitimate\" relational operator (as opposed to a disguised", "\t * IN-list).", "\t */", "\tprotected InListOperatorNode getInListOp()", "\t{", "\t\treturn inListProbeSource;", "\t}", "" ], "header": "@@ -128,6 +138,29 @@ public class BinaryRelationalOperatorNode", "removed": [] }, { "added": [ "\t\t/* If this rel op is for an IN-list probe predicate then we never", "\t\t * treat it as a qualifer. The reason is that if we treat it as", "\t\t * a qualifier then we could end up generating it as a qualifier,", "\t\t * which would lead to the generation of an equality qualifier", "\t\t * of the form \"col = <val>\" (where <val> is the first value in", "\t\t * the IN-list). That would lead to wrong results (missing rows)", "\t\t * because that restriction is incorrect.", "\t\t */", "\t\tif (inListProbeSource != null)", "\t\t\treturn false;", "" ], "header": "@@ -738,6 +771,17 @@ public class BinaryRelationalOperatorNode", "removed": [] }, { "added": [ "\t\t/* If this rel op is for a probe predicate then we do not call", "\t\t * it a \"relational operator\"; it's actually a disguised IN-list", "\t\t * operator.", "\t\t */", "\t\treturn (inListProbeSource == null);", "\t/** @see ValueNode#isBinaryEqualsOperatorNode */", "\t\t/* If this rel op is for a probe predicate then we do not treat", "\t\t * it as an \"equals operator\"; it's actually a disguised IN-list", "\t\t * operator.", "\t\t */", "\t\treturn (inListProbeSource == null) &&", "\t\t\t(operatorType == RelationalOperator.EQUALS_RELOP);" ], "header": "@@ -1169,12 +1213,22 @@ public class BinaryRelationalOperatorNode", "removed": [ "\t\treturn true;", "\t\treturn (operatorType == RelationalOperator.EQUALS_RELOP);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/Predicate.java", "hunks": [ { "added": [ "\t\tif (!isRelationalOpPredicate())" ], "header": "@@ -175,7 +175,7 @@ public final class Predicate extends QueryTreeNode implements OptimizablePredica", "removed": [ "\t\tif (relop == null)" ] }, { "added": [ "\t\tif (!isRelationalOpPredicate())" ], "header": "@@ -190,7 +190,7 @@ public final class Predicate extends QueryTreeNode implements OptimizablePredica", "removed": [ "\t\tif (relop == null)" ] }, { "added": [ "\t\tif (isRelationalOpPredicate())", "\t\t\tretval = getRelop().equalsComparisonWithConstantExpression(optTable);" ], "header": "@@ -237,12 +237,11 @@ public final class Predicate extends QueryTreeNode implements OptimizablePredica", "removed": [ "\t\tRelationalOperator relop = getRelop();", "\t\tif (relop != null)", "\t\t\tretval = relop.equalsComparisonWithConstantExpression(optTable);" ] }, { "added": [ "\t\tif (this.isRelationalOpPredicate()) // this is not \"in\"" ], "header": "@@ -308,7 +307,7 @@ public final class Predicate extends QueryTreeNode implements OptimizablePredica", "removed": [ "\t\tif (getRelop() != null)\t\t// this is not \"in\"" ] }, { "added": [ "\t\tif (otherPred.isRelationalOpPredicate()) // other is not \"in\"" ], "header": "@@ -316,7 +315,7 @@ public final class Predicate extends QueryTreeNode implements OptimizablePredica", "removed": [ "\t\tif (otherPred.getRelop() != null)\t\t// other is not \"in\"" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/PredicateList.java", "hunks": [ { "added": [ "\t\t\t\tif (!pred.isRelationalOpPredicate())" ], "header": "@@ -534,9 +534,7 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\t\t\tRelationalOperator relop = pred.getRelop();", "", "\t\t\t\tif (relop == null)" ] }, { "added": [ " if ( ! pred.getRelop().isQualifier(optTable, pushPreds))" ], "header": "@@ -548,7 +546,7 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ " if ( ! relop.isQualifier(optTable, pushPreds))" ] }, { "added": [ "\t\t\tif (!pred.isRelationalOpPredicate() ||", "\t\t\t\t!pred.getRelop().isQualifier(optTable, false))" ], "header": "@@ -944,9 +942,9 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\t\tRelationalOperator relop = pred.getRelop();", "\t\t\tif (relop == null || ! relop.isQualifier(optTable, false))" ] }, { "added": [ "\t\t\tif (!predicate.isRelationalOpPredicate())" ], "header": "@@ -2071,7 +2069,7 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\t\tif (! (andNode.getLeftOperand() instanceof RelationalOperator))" ] }, { "added": [ "\t\t\tif (pred.isRelationalOpPredicate())" ], "header": "@@ -3396,7 +3394,7 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\t\tif (relop != null)" ] } ] } ]
derby-DERBY-47-7777c5dd
DERBY-47 (partial): Patch that adds logic to create IN-list "probe predicates" during preprocessing, thus allowing the code changes in all previous patches for this issue to take effect. With this patch Derby will now re-write IN lists as probe predicates and, if the optimizer thinks it is best to do so, will do index "multi-probing" at execution time to avoid excessive scanning. The changes in this patch affect "preprocessing" logic as follow: 1. Replaces "A" with "B", where "A" is existing logic that creates a BETWEEN node for IN-lists containing all constants, and "B" is new logic that creates a "probe predicate" for IN-lists containing all constants *and/or* parameter nodes. The probe predicates are then used throughout optimization, modification of access paths, code generation, and execution time (as appropriate) in the manner described by previous patches for this issue. 2. Adds some additional logic to OrNode preprocessing to allow the conversion of queries like: select ... from T1 where i in (2, 3) or i in (7, 10) into queries that look like: select ... from T1 where i in (2, 3, 7, 10) This is really just an extension of the existing logic to transform a chain of OR nodes into an IN-list. 3. Adds logic to PredicateList.pushExpressionsIntoSelect() to correctly copy "probe predicates" so that the left operand (column reference) is pointing to the correct place when we do static pushing of one- sided predicates (which is what a "probe predicate" is). 4. Adds a new method to ValueNodeList that is used for checking to see if a list of IN values consists solely of constant and/or parameter nodes (there are no other expressions or column references). This patch also incorporates all of the test master updates required as a result of the new multi-probing functionality. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@518322 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/InListOperatorNode.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.types.DataTypeDescriptor;" ], "header": "@@ -31,6 +31,7 @@ import org.apache.derby.iapi.sql.dictionary.DataDictionary;", "removed": [] }, { "added": [ "\t/**", "\t * Create a shallow copy of this InListOperatorNode whose operands are", "\t * the same as this node's operands. Copy over all other necessary", "\t * state, as well.", "\t */", "\tprotected InListOperatorNode shallowCopy() throws StandardException", "\t{", "\t\tInListOperatorNode ilon =", "\t\t\t (InListOperatorNode)getNodeFactory().getNode(", "\t\t\t\tC_NodeTypes.IN_LIST_OPERATOR_NODE,", "\t\t\t\tleftOperand,", "\t\t\t\trightOperandList,", "\t\t\t\tgetContextManager());", "", "\t\tilon.copyFields(this);", "\t\tif (isOrdered)", "\t\t\tilon.markAsOrdered();", "", "\t\treturn ilon;", "\t}", "" ], "header": "@@ -88,6 +89,27 @@ public final class InListOperatorNode extends BinaryListOperatorNode", "removed": [] }, { "added": [ "\t\t\t\t rightOperandList.containsOnlyConstantAndParamNodes())", "\t\t\t/* At this point we have an IN-list made up of constant and/or", "\t\t\t * parameter values. Ex.:", "\t\t\t *", "\t\t\t * select id, name from emp where id in (34, 28, ?)", "\t\t\t *", "\t\t\t * Since the optimizer does not recognize InListOperatorNodes", "\t\t\t * as potential start/stop keys for indexes, it (the optimizer)", "\t\t\t * may estimate that the cost of using any of the indexes would", "\t\t\t * be too high. So we could--and probably would--end up doing", "\t\t\t * a table scan on the underlying base table. But if the number", "\t\t\t * of rows in the base table is significantly greater than the", "\t\t\t * number of values in the IN-list, scanning the base table can", "\t\t\t * be overkill and can lead to poor performance. And further,", "\t\t\t * choosing to use an index but then scanning the entire index", "\t\t\t * can be slow, too. DERBY-47.", "\t\t\t *", "\t\t\t * What we do, then, is create an \"IN-list probe predicate\",", "\t\t\t * which is an internally generated equality predicate with a", "\t\t\t * parameter value on the right. So for the query shown above", "\t\t\t * the probe predicate would be \"id = ?\". We then replace", "\t\t\t * this InListOperatorNode with the probe predicate during", "\t\t\t * optimization. The optimizer in turn recognizes the probe", "\t\t\t * predicate, which is disguised to look like a typical binary", "\t\t\t * equality, as a potential start/stop key for any indexes.", "\t\t\t * This start/stop key potential then factors into the estimated", "\t\t\t * cost of probing the indexes, which leads to a more reasonable", "\t\t\t * estimate and thus makes it more likely that the optimizer", "\t\t\t * will choose to use an index vs a table scan. That done, we", "\t\t\t * then use the probe predicate to perform multiple execution-", "\t\t\t * time \"probes\" on the index--instead of doing a range index", "\t\t\t * scan--which eliminates unnecessary scanning. For more see", "\t\t\t * execute/MultiProbeTableScanResultSet.java.", "\t\t\t *", "\t\t\t * With this approach we know that regardless of how large the", "\t\t\t * base table is, we'll only have to probe the index a max of", "\t\t\t * N times, where \"N\" is the size of the IN-list. If N is", "\t\t\t * significantly less than the number of rows in the table, or", "\t\t\t * is significantly less than the number of rows between the", "\t\t\t * min value and the max value in the IN-list, this selective", "\t\t\t * probing can save us a lot of time.", "\t\t\t *", "\t\t\t * Note: We will do fewer than N probes if there are duplicates", "\t\t\t * in the list.", "\t\t\t *", "\t\t\t * Note also that, depending on the relative size of the IN-list", "\t\t\t * verses the number of rows in the table, it may actually be", "\t\t\t * better to just do a table scan--especially if there are fewer", "\t\t\t * rows in the table than there are in the IN-list. So even though", "\t\t\t * we create a \"probe predicate\" and pass it to the optimizer, it", "\t\t\t * (the optimizer) may still choose to do a table scan. If that", "\t\t\t * happens then we'll \"revert\" the probe predicate back to its", "\t\t\t * original form (i.e. to this InListOperatorNode) during code", "\t\t\t * generation, and then we'll use it as a regular IN-list", "\t\t\t * restriction when it comes time to execute.", "\t\t\tboolean allConstants = rightOperandList.containsAllConstantNodes();", "", "\t\t\t/* If we have all constants then sort them now. This allows us to", "\t\t\t * skip the sort at execution time (we have to sort them so that", "\t\t\t * we can eliminate duplicate IN-list values). If we have one", "\t\t\t * or more parameter nodes then we do *not* sort the values here", "\t\t\t * because we do not (and cannot) know what values the parameter(s)", "\t\t\t * will have. In that case we'll sort the values at execution", "\t\t\t * time. ", "\t\t\tif (allConstants)", "\t\t\t\t/* When sorting or choosing min/max in the list, if types", "\t\t\t\t * are not an exact match, we use the left operand's type", "\t\t\t\t * as the \"judge\", assuming that they are compatible, as", "\t\t\t\t * also the case with DB2.", "\t\t\t\t */", "\t\t\t\tTypeId judgeTypeId = leftOperand.getTypeServices().getTypeId();", "\t\t\t\tDataValueDescriptor judgeODV = null; //no judge, no argument", "\t\t\t\tif (!rightOperandList.allSamePrecendence(", "\t\t\t\t\tjudgeTypeId.typePrecedence()))", "\t\t\t\t{", "\t\t\t\t\tjudgeODV = (DataValueDescriptor) judgeTypeId.getNull();", "\t\t\t\t}", " ", "\t\t\t\t// Sort the list in ascending order", "\t\t\t\trightOperandList.sortInAscendingOrder(judgeODV);", "\t\t\t\tisOrdered = true;", "", "\t\t\t\tValueNode minValue = (ValueNode)rightOperandList.elementAt(0);", "\t\t\t\tValueNode maxValue =", "\t\t\t\t\t(ValueNode)rightOperandList.elementAt(", "\t\t\t\t\t\trightOperandList.size() - 1);", "", "\t\t\t\t/* Handle the degenerate case where the min and the max", "\t\t\t\t * are the same value.", "\t\t\t\t */", "\t\t\t\tDataValueDescriptor minODV =", "\t\t\t\t\t((ConstantNode) minValue).getValue();", "\t\t\t\tDataValueDescriptor maxODV =", "\t\t\t\t\t ((ConstantNode) maxValue).getValue();", "", "\t\t\t\tif (((judgeODV == null) && (minODV.compare(maxODV) == 0)) ||", "\t\t\t\t\t((judgeODV != null)", "\t\t\t\t\t\t&& judgeODV.equals(minODV, maxODV).equals(true)))", "\t\t\t\t{", "\t\t\t\t\tBinaryComparisonOperatorNode equal = ", "\t\t\t\t\t\t(BinaryComparisonOperatorNode)getNodeFactory().getNode(", "\t\t\t\t\t\t\tC_NodeTypes.BINARY_EQUALS_OPERATOR_NODE,", "\t\t\t\t\t\t\tleftOperand, ", "\t\t\t\t\t\t\tminValue,", "\t\t\t\t\t\t\tgetContextManager());", "\t\t\t\t\t/* Set type info for the operator node */", "\t\t\t\t\tequal.bindComparisonOperator();", "\t\t\t\t\treturn equal;", "\t\t\t\t}", "\t\t\t/* Create a parameter node to serve as the right operand of", "\t\t\t * the probe predicate. We intentionally use a parameter node", "\t\t\t * instead of a constant node because the IN-list has more than", "\t\t\t * one value (some of which may be unknown at compile time, i.e.", "\t\t\t * if they are parameters), so we don't want an estimate based", "\t\t\t * on any single literal. Instead we want a generic estimate", "\t\t\t * of the cost to retrieve the rows matching some _unspecified_", "\t\t\t * value (namely, one of the values in the IN-list, but we", "\t\t\t * don't know which one). That's exactly what a parameter", "\t\t\t * node gives us.", "\t\t\t *", "\t\t\t * Note: If the IN-list only had a single value then we would", "\t\t\t * have taken the \"if (rightOperandList.size() == 1)\" branch", "\t\t\t * above and thus would not be here.", "\t\t\t *", "\t\t\t * We create the parameter node based on the first value in", "\t\t\t * the list. This is arbitrary and should not matter in the", "\t\t\t * big picture.", "\t\t\tValueNode srcVal = (ValueNode) rightOperandList.elementAt(0);", "\t\t\tParameterNode pNode =", "\t\t\t\t(ParameterNode) getNodeFactory().getNode(", "\t\t\t\t\tC_NodeTypes.PARAMETER_NODE,", "\t\t\t\t\tnew Integer(0),", "\t\t\t\t\tnull, // default value", "\t\t\t\t\tgetContextManager());", "", "\t\t\tDataTypeDescriptor pType = srcVal.getTypeServices();", "\t\t\tpNode.setDescriptors(new DataTypeDescriptor [] { pType });", "\t\t\tpNode.setType(pType);", "", "\t\t\t/* If we choose to use the new predicate for execution-time", "\t\t\t * probing then the right operand will function as a start-key", "\t\t\t * \"place-holder\" into which we'll store the different IN-list", "\t\t\t * values as we iterate through them. This means we have to", "\t\t\t * generate a valid value for the parameter node--i.e. for the", "\t\t\t * right side of the probe predicate--in order to have a valid", "\t\t\t * execution-time placeholder. To do that we pass the source", "\t\t\t * value from which we found the type down to the new, \"fake\"", "\t\t\t * parameter node. Then, when it comes time to generate the", "\t\t\t * parameter node, we'll just generate the source value as our", "\t\t\t * place-holder. See ParameterNode.generateExpression().", "\t\t\t *", "\t\t\t * Note: the actual value of the \"place-holder\" does not matter", "\t\t\t * because it will be clobbered by the various IN-list values", "\t\t\t * (which includes \"srcVal\" itself) as we iterate through them", "\t\t\t * during execution.", "\t\t\t */", "\t\t\tpNode.setValueToGenerate(srcVal);", "", "\t\t\t/* Finally, create the \"column = ?\" equality that serves as the", "\t\t\t * basis for the probe predicate. We store a reference to \"this\"", "\t\t\t * node inside the probe predicate so that, if we later decide", "\t\t\t * *not* to use the probe predicate for execution time index", "\t\t\t * probing, we can revert it back to its original form (i.e.", "\t\t\t * to \"this\").", "\t\t\tBinaryComparisonOperatorNode equal = ", "\t\t\t\t(BinaryComparisonOperatorNode) getNodeFactory().getNode(", "\t\t\t\t\tC_NodeTypes.BINARY_EQUALS_OPERATOR_NODE,", "\t\t\t\t\tleftOperand, ", "\t\t\t\t\tpNode,", "\t\t\t\t\tthis,", "\t\t\t\t\tgetContextManager());", "\t\t\t/* Set type info for the operator node */", "\t\t\tequal.bindComparisonOperator();", "\t\t\treturn equal;" ], "header": "@@ -129,95 +151,190 @@ public final class InListOperatorNode extends BinaryListOperatorNode", "removed": [ "\t\t\t\t rightOperandList.containsAllConstantNodes())", "\t\t\t/* When sorting or choosing min/max in the list, if types are not an exact", "\t\t\t * match, we use the left operand's type as the \"judge\", assuming that they", "\t\t\t * are compatible, as also the case with DB2.", "\t\t\tTypeId judgeTypeId = leftOperand.getTypeServices().getTypeId();", "\t\t\tDataValueDescriptor judgeODV = null; //no judge, no argument", "\t\t\tif (! rightOperandList.allSamePrecendence(judgeTypeId.typePrecedence()))", "\t\t\t\tjudgeODV = (DataValueDescriptor) judgeTypeId.getNull();", "", "\t\t\t//Sort the list in ascending order", "\t\t\trightOperandList.sortInAscendingOrder(judgeODV);", "\t\t\tisOrdered = true;", "", "\t\t\t/* If the leftOperand is a ColumnReference", "\t\t\t * and the IN list is all constants, then we generate", "\t\t\t * an additional BETWEEN clause of the form:", "\t\t\t *\tCRClone BETWEEN minValue and maxValue", "\t\t\t */", "\t\t\tValueNode leftClone = leftOperand.getClone();", "\t\t\tValueNode minValue = (ValueNode) rightOperandList.elementAt(0); //already sorted", "\t\t\tValueNode maxValue = (ValueNode) rightOperandList.elementAt(rightOperandList.size() - 1);", "\t\t\t/* Handle the degenerate case where ", "\t\t\t * the min and the max are the same value.", "\t\t\tDataValueDescriptor minODV =", "\t\t\t\t ((ConstantNode) minValue).getValue();", "\t\t\tDataValueDescriptor maxODV =", "\t\t\t\t ((ConstantNode) maxValue).getValue();", "\t\t\tif ((judgeODV == null && minODV.compare(maxODV) == 0) ||", "\t\t\t\t(judgeODV != null && judgeODV.equals(minODV, maxODV).equals(true)))", "\t\t\t\tBinaryComparisonOperatorNode equal = ", "\t\t\t\t\t(BinaryComparisonOperatorNode) getNodeFactory().getNode(", "\t\t\t\t\t\tC_NodeTypes.BINARY_EQUALS_OPERATOR_NODE,", "\t\t\t\t\t\tleftOperand, ", "\t\t\t\t\t\tminValue,", "\t\t\t\t\t\tgetContextManager());", "\t\t\t\t/* Set type info for the operator node */", "\t\t\t\tequal.bindComparisonOperator();", "\t\t\t\treturn equal;", "\t\t\t// Build the Between", "\t\t\tValueNodeList vnl = (ValueNodeList) getNodeFactory().getNode(", "\t\t\t\t\t\t\t\t\t\t\t\t\tC_NodeTypes.VALUE_NODE_LIST,", "\t\t\t\t\t\t\t\t\t\t\t\t\tgetContextManager());", "\t\t\tvnl.addValueNode(minValue);", "\t\t\tvnl.addValueNode(maxValue);", "", "\t\t\tBetweenOperatorNode bon = ", "\t\t\t\t(BetweenOperatorNode) getNodeFactory().getNode(", "\t\t\t\t\t\t\t\t\tC_NodeTypes.BETWEEN_OPERATOR_NODE,", "\t\t\t\t\t\t\t\t\tleftClone,", "\t\t\t\t\t\t\t\t\tvnl,", "\t\t\t\t\t\t\t\t\tgetContextManager());", "", "\t\t\t/* The transformed tree has to be normalized:", "\t\t\t *\t\t\t\tAND", "\t\t\t *\t\t\t / \\", "\t\t\t *\t\tIN LIST AND", "\t\t\t *\t\t\t\t / \\", "\t\t\t *\t\t\t\t >=\tAND", "\t\t\t *\t\t\t\t\t\t/ \\", "\t\t\t *\t\t\t\t\t <=\tTRUE", "", "\t\t\t/* Create the AND */", "\t\t\tAndNode newAnd;", "", "\t\t\tnewAnd = (AndNode) getNodeFactory().getNode(", "\t\t\t\t\t\t\t\t\tC_NodeTypes.AND_NODE,", "\t\t\t\t\t\t\t\t\tthis,", "\t\t\t\t\t\t\t\t\tbon.preprocess(numTables,", "\t\t\t\t\t\t\t\t\t\t\t\t outerFromList,", "\t\t\t\t\t\t\t\t\t\t\t\t outerSubqueryList,", "\t\t\t\t\t\t\t\t\t\t\t\t outerPredicateList),", "\t\t\t\t\t\t\t\t\tgetContextManager());", "\t\t\tnewAnd.postBindFixup();", "", "\t\t\t/* Mark this node as transformed so that we don't get", "\t\t\t * calculated into the selectivity mulitple times.", "\t\t\tsetTransformed();", "\t\t\t// Return new AndNode", "\t\t\treturn newAnd;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/OrNode.java", "hunks": [ { "added": [ "\t\t\t\t\t/* If the operator is an IN-list disguised as a relational", "\t\t\t\t\t * operator then we can still convert it--we'll just", "\t\t\t\t\t * combine the existing IN-list (\"left\") with the new IN-", "\t\t\t\t\t * list values. So check for that case now.", "\t\t\t\t\t */ ", "", "\t\t\t\t\tif (SanityManager.DEBUG)", "\t\t\t\t\t{", "\t\t\t\t\t\t/* At the time of writing the only way a call to", "\t\t\t\t\t\t * left.isRelationalOperator() would return false for", "\t\t\t\t\t\t * a BinaryRelationalOperatorNode was if that node", "\t\t\t\t\t\t * was for an IN-list probe predicate. That's why we", "\t\t\t\t\t\t * we can get by with the simple \"instanceof\" check", "\t\t\t\t\t\t * below. But if we're running in SANE mode, do a", "\t\t\t\t\t\t * quick check to make sure that's still valid.", "\t\t\t\t\t \t */", "\t\t\t\t\t\tBinaryRelationalOperatorNode bron = null;", "\t\t\t\t\t\tif (left instanceof BinaryRelationalOperatorNode)", "\t\t\t\t\t\t{", " \t\t\t\t\t\t\tbron = (BinaryRelationalOperatorNode)left;", "\t\t\t\t\t\t\tif (bron.getInListOp() == null)", "\t\t\t\t\t\t\t{", "\t\t\t\t\t\t\t\tSanityManager.THROWASSERT(", "\t\t\t\t\t\t\t\t\"isRelationalOperator() unexpectedly returned \"", "\t\t\t\t\t\t\t\t+ \"false for a BinaryRelationalOperatorNode.\");", "\t\t\t\t\t\t\t}", "\t\t\t\t\t\t}", "\t\t\t\t\t}", "", "\t\t\t\t\tconvert = (left instanceof BinaryRelationalOperatorNode);", "\t\t\t\t\tif (!convert)", "\t\t\t\t\t\tbreak;" ], "header": "@@ -132,8 +132,38 @@ public class OrNode extends BinaryLogicalOperatorNode", "removed": [ "\t\t\t\t\tconvert = false;", "\t\t\t\t\tbreak;" ] }, { "added": [ "\t\t\t\tBinaryRelationalOperatorNode bron = (BinaryRelationalOperatorNode)left;", "\t\t\t\tif (bron.getLeftOperand() instanceof ColumnReference)", "\t\t\t\t\tcr = (ColumnReference) bron.getLeftOperand();" ], "header": "@@ -142,11 +172,11 @@ public class OrNode extends BinaryLogicalOperatorNode", "removed": [ "\t\t\t\tBinaryRelationalOperatorNode beon = (BinaryRelationalOperatorNode)left;", "\t\t\t\tif (beon.getLeftOperand() instanceof ColumnReference)", "\t\t\t\t\tcr = (ColumnReference) beon.getLeftOperand();" ] }, { "added": [ "\t\t\t\telse if (bron.getRightOperand() instanceof ColumnReference)", "\t\t\t\t\tcr = (ColumnReference) bron.getRightOperand();" ], "header": "@@ -159,9 +189,9 @@ public class OrNode extends BinaryLogicalOperatorNode", "removed": [ "\t\t\t\telse if (beon.getRightOperand() instanceof ColumnReference)", "\t\t\t\t\tcr = (ColumnReference) beon.getRightOperand();" ] } ] } ]
derby-DERBY-47-83802657
DERBY-47 (partial): Update the logic for cost-based optimization (CBO) and modification of access paths (MoAP) to recognize IN-list "probe predicates" and to handle them appropriately. More specifically this patch adds code to do the following: - During costing, recognize when we're using a probe predicate as a start/stop key and adjust the cost accordingly. This means multiplying the estimated cost and row count for "column = ?" by the number of values in the IN-list (because we are effectively going to evaluate "column = ?" N times, where N is the size of the IN-list, and we could return one or more rows for each of the N evaluations). We also want to make sure that the resultant row count estimate is not greater than the total number of rows in the table. - When determining which predicates can be used as start/stop keys for the current conglomerate, only consider a probe predicate to be a start/stop key if it applies to the _first_ column in the conglomerate. Otherwise the probe predicate would end up being generated as a store qualifier, which means we would only get rows for which "column = ?" was true when the parameter was set to the _first_ value in the IN-list. That means we would end up with incorrect results (missing rows). - If cost-based optimization is complete and we are modifying access paths in preparation for code generation, then take any probe predicates that are *not* going to be used as start/stop keys for the chosen conglomerate and "revert" them back to the InListOperatorNodes from which they were built. Those InListOpNodes will then be generated as normal IN-list restrictions on the rows returned from store. If we did not do this reverting then the predicates would ultimately be ignored (since they are not valid qualifiers) and we would therefore end up with incorrect results (extra rows). - If we're modifying access paths and we have chosen to do multi-probing of an index then we disable bulk fetching for the target base table. Logically this is not a requirement. However, it turns out that bulk fetch can lead to poor performance when multi-probing an index if the number of probe values is high (several hundred or more) BUT that number is still just a small fraction of the total number of rows in the table. By disabling bulk fetch for multi-probing we avoid this performance slowdown. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@513839 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/FromBaseTable.java", "hunks": [ { "added": [ "\t\t\t/* If we have a probe predicate that is being used as a start/stop", "\t\t\t * key then ssKeySourceInList will hold the InListOperatorNode", "\t\t\t * from which the probe predicate was built.", "\t\t\t */", "\t\t\tInListOperatorNode ssKeySourceInList = null;" ], "header": "@@ -1309,6 +1309,11 @@ public class FromBaseTable extends FromTable", "removed": [] }, { "added": [ "\t\t\t\t\t/* A probe predicate is only useful if it can be used as", "\t\t\t\t\t * as a start/stop key for _first_ column in an index", "\t\t\t\t\t * (i.e. if the column position is 0). That said, we only", "\t\t\t\t\t * allow a single start/stop key per column position in the", "\t\t\t\t\t * index (see orderUsefulPredicates() in PredicateList.java).", "\t\t\t\t\t * Those two facts combined mean that we should never have", "\t\t\t\t\t * more than one probe predicate start/stop key for a given", "\t\t\t\t\t * conglomerate.", "\t\t\t\t\t */", "\t\t\t\t\tif (SanityManager.DEBUG)", "\t\t\t\t\t{", "\t\t\t\t\t\tSanityManager.ASSERT(", "\t\t\t\t\t\t\t(ssKeySourceInList == null) ||", "\t\t\t\t\t\t\t\t(((Predicate)pred).getSourceInList() == null),", "\t\t\t\t\t\t\t\"Found multiple probe predicate start/stop keys\" +", "\t\t\t\t\t\t\t\" for conglomerate '\" + cd.getConglomerateName() +", "\t\t\t\t\t\t\t\"' when at most one was expected.\");", "\t\t\t\t\t}", "", "\t\t\t\t\tssKeySourceInList = ((Predicate)pred).getSourceInList();" ], "header": "@@ -1317,6 +1322,26 @@ public class FromBaseTable extends FromTable", "removed": [] }, { "added": [ "\t\t\t/* If the start and stop key came from an IN-list \"probe predicate\"", "\t\t\t * then we need to adjust the cost estimate. The probe predicate", "\t\t\t * is of the form \"col = ?\" and we currently have the estimated", "\t\t\t * cost of probing the index a single time for \"?\". But with an", "\t\t\t * IN-list we don't just probe the index once; we're going to", "\t\t\t * probe it once for every value in the IN-list. And we are going", "\t\t\t * to potentially return an additional row (or set of rows) for", "\t\t\t * each probe. To account for this \"multi-probing\" we take the", "\t\t\t * costEstimate and multiply each of its fields by the size of", "\t\t\t * the IN-list.", "\t\t\t *", "\t\t\t * Note: If the IN-list has duplicate values then this simple", "\t\t\t * multiplication could give us an elevated cost (because we", "\t\t\t * only probe the index for each *non-duplicate* value in the", "\t\t\t * IN-list). But for now, we're saying that's okay.", "\t\t\t */", "\t\t\tif (ssKeySourceInList != null)", "\t\t\t{", "\t\t\t\tint listSize = ssKeySourceInList.getRightOperandList().size();", "\t\t\t\tdouble rc = costEstimate.rowCount() * listSize;", "\t\t\t\tdouble ssrc = costEstimate.singleScanRowCount() * listSize;", "", "\t\t\t\t/* If multiplication by listSize returns more rows than are", "\t\t\t\t * in the scan then just use the number of rows in the scan.", "\t\t\t\t */", "\t\t\t\tcostEstimate.setCost(", "\t\t\t\t\tcostEstimate.getEstimatedCost() * listSize,", "\t\t\t\t\trc > initialRowCount ? initialRowCount : rc,", "\t\t\t\t\tssrc > initialRowCount ? initialRowCount : ssrc);", "\t\t\t}", "" ], "header": "@@ -1514,6 +1539,37 @@ public class FromBaseTable extends FromTable", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/PredicateList.java", "hunks": [ { "added": [ "\t\t\tInListOperatorNode inNode = pred.getSourceInList();", "\t\t\tboolean isIn = (inNode != null);" ], "header": "@@ -202,8 +202,8 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\t\tboolean isIn = false;", "\t\t\tInListOperatorNode inNode = null;" ] }, { "added": [ "\t\t\tInListOperatorNode inNode = pred.getSourceInList();", "\t\t\tboolean isIn = (inNode != null);", "\t\t\tboolean isInListProbePred = isIn;" ], "header": "@@ -601,8 +601,9 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\t\tboolean isIn = false;", "\t\t\tInListOperatorNode inNode = null;" ] }, { "added": [ "\t\t\t\t\t\telse if (isInListProbePred && (indexPosition > 0))", "\t\t\t\t\t\t{", "\t\t\t\t\t\t\t/* If the predicate is an IN-list probe predicate", "\t\t\t\t\t\t\t * then we only consider it to be useful if the", "\t\t\t\t\t\t\t * referenced column is the *first* one in the", "\t\t\t\t\t\t\t * index (i.e. if (indexPosition == 0)). Otherwise", "\t\t\t\t\t\t\t * the predicate would be treated as a qualifier", "\t\t\t\t\t\t\t * for store, which could lead to incorrect", "\t\t\t\t\t\t\t * results.", "\t\t\t\t\t\t\t */", "\t\t\t\t\t\t\tindexCol = null;", "\t\t\t\t\t\t}" ], "header": "@@ -633,6 +634,18 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [] }, { "added": [ "\t\t\t{", "\t\t\t\t/* If we're pushing predicates then this is the last time", "\t\t\t\t * we'll get here before code generation. So if we have", "\t\t\t\t * any IN-list probe predicates that are not useful, we", "\t\t\t\t * need to \"revert\" them back to their original IN-list", "\t\t\t\t * form so that they can be generated as regular IN-list", "\t\t\t\t * restrictions.", "\t\t\t\t */", "\t\t\t\tif (pushPreds && isInListProbePred)", "\t\t\t\t\tpred.revertToSourceInList();", "", "\t\t\t}" ], "header": "@@ -651,7 +664,19 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [] }, { "added": [ "\t\t\tInListOperatorNode inNode = thisPred.getSourceInList();", "\t\t\tboolean isIn = (inNode != null);", "\t\t\tboolean isInListProbePred = isIn;" ], "header": "@@ -712,8 +737,9 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\t\tboolean isIn = false;", "\t\t\tInListOperatorNode inNode = null;" ] }, { "added": [ "\t\t\t\t{", "\t\t\t\t\t/* If we get here for an IN-list probe pred then we know", "\t\t\t\t\t * that we are *not* using the probe predicate as a", "\t\t\t\t\t * start/stop key. We also know that we're in the middle", "\t\t\t\t\t * of modifying access paths (because pushPreds is true),", "\t\t\t\t\t * which means we are preparing to generate code. Those", "\t\t\t\t\t * two facts together mean we have to \"revert\" the", "\t\t\t\t\t * probe predicate back to its original state so that", "\t\t\t\t\t * it can be generated as normal IN-list.", "\t\t\t\t\t */", "\t\t\t\t\tif (isInListProbePred)", "\t\t\t\t\t\tthisPred.revertToSourceInList();", "", "\t\t\t\t}" ], "header": "@@ -834,7 +860,21 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [] }, { "added": [ "\t\t\t\t *", "\t\t\t\t * Note: we don't do this if the predicate is an IN-list", "\t\t\t\t * probe predicate. In that case we want to push the ", "\t\t\t\t * predicate down to the base table for special handling.", "\t\t\t\tif (isIn && !isInListProbePred)" ], "header": "@@ -850,9 +890,13 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\t\t\tif (isIn) " ] }, { "added": [ "\t\t\t\t\t/* Although we generated dynamic start and stop keys", "\t\t\t\t\t * for \"in\", we still need this predicate for further", "\t\t\t\t\t * restriction--*unless* we're dealing with a probe", "\t\t\t\t\t * predicate, in which case the restriction is handled", "\t\t\t\t\t * via execution-time index probes (for more see", "\t\t\t\t\t * execute/TableScanResultSet.java).", "\t\t\t\t\tif (!isIn || isInListProbePred)" ], "header": "@@ -875,12 +919,15 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\t\t\t\t/* although we generated dynamic start and stop key for \"in\"", "\t\t\t\t\t * , we still need this predicate for further restriction", "\t\t\t\t\tif (! isIn)", "\t\t\t\t\t// restore origin" ] } ] } ]
derby-DERBY-47-9d829130
DERBY-47 (cleanup): Cleanup patch which does the following: 1 - Changes Predicate.isRelationalOpPredicate() so that it just calls the already existing method "isRelationalOperator()" on the left operand of the predicate's AND node. This ultimately comes down to a simple check for a null variable in BinaryRelationalOperatorNode, which is cheaper than the old check. 2 - Adds a new method, "isInListProbeNode()", to ValueNode. The default case returns "false", while BinaryRelationalOperatorNode returns true if it has a source IN-list associated with it. Also adds a corresponding method called "isInListProbePredicate()" to Predicate.java. This method allows for simple (and relatively cheap) checking of a predicate to see if it is an IN-list probe predicate. 3 - Modifies Predicate.getSourceInList() to return the underlying InListOperatorNode for probe predicates AND for "normal" IN-list predicates (i.e. an IN-list that could not be transformed into a "probe predicate" because it contains one or more non-parameter, non-constant values) This then allowed for some cleanup of other related code in PredicateList.java. Also adds a second version of getSourceInList() that takes a boolean argument; if true, then it will only return the source IN list for a predicate *if* that predicate is an IN-list probe predicate. 4 - Changes PredicateList.generateInListValues() to account for the fact that it only ever gets called when we know that there is a probe predicate in the list. 5 - Shortens a couple of lines in FromBaseTable that were added with earlier patches but were longer than 80 chars. Also rewrites one Sanity check in that class to avoid construction of strings when no error occurs (per recent discussions on derby-dev). git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@520188 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/FromBaseTable.java", "hunks": [ { "added": [ "\t\t\t\t\t * allow a single start/stop key per column position in", "\t\t\t\t\t * the index (see PredicateList.orderUsefulPredicates()).", "\t\t\t\t\t\tif ((ssKeySourceInList != null) &&", "\t\t\t\t\t\t\t((Predicate)pred).isInListProbePredicate())", "\t\t\t\t\t\t{", "\t\t\t\t\t\t\tSanityManager.THROWASSERT(", "\t\t\t\t\t\t}", "\t\t\t\t\t/* By passing \"true\" in the next line we indicate that we", "\t\t\t\t\t * should only retrieve the underlying InListOpNode *if*", "\t\t\t\t\t * the predicate is a \"probe predicate\".", "\t\t\t\t\t */", "\t\t\t\t\tssKeySourceInList = ((Predicate)pred).getSourceInList(true);" ], "header": "@@ -1331,23 +1331,29 @@ public class FromBaseTable extends FromTable", "removed": [ "\t\t\t\t\t * allow a single start/stop key per column position in the", "\t\t\t\t\t * index (see orderUsefulPredicates() in PredicateList.java).", "\t\t\t\t\t\tSanityManager.ASSERT(", "\t\t\t\t\t\t\t(ssKeySourceInList == null) ||", "\t\t\t\t\t\t\t\t(((Predicate)pred).getSourceInList() == null),", "\t\t\t\t\tssKeySourceInList = ((Predicate)pred).getSourceInList();" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/PredicateList.java", "hunks": [ { "added": [ "", "\t\t\t/* InListOperatorNodes, while not relational operators, may still", "\t\t\t * be useful. There are two cases: a) we transformed the IN-list", "\t\t\t * into a probe predicate of the form \"col = ?\", which can then be", "\t\t\t * optimized/generated as a start/stop key and used for \"multi-", "\t\t\t * probing\" at execution; or b) we did *not* transform the IN-list,", "\t\t\t * in which case we'll generate _dynamic_ start and stop keys in", "\t\t\t * order to improve scan performance (beetle 3858). In either case", "\t\t\t * the IN-list may still prove \"useful\".", "\t\t\t */", "\t\t\t/* If it's not a relational operator and it's not \"in\", then it's", "\t\t\t * not useful.", "\t\t\t */", "\t\t\tif (!isIn && (relop == null))", "\t\t\t\tcontinue;" ], "header": "@@ -195,29 +195,25 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "", "\t\t\t/*", "\t\t\t** Skip over it if it's not a relational operator (this includes", "\t\t\t** BinaryComparisonOperators and IsNullNodes.", "\t\t\t*/", "\t\t\tif (relop == null)", "\t\t\t{", "\t\t\t\t/* if it's \"in\" operator, we generate dynamic start and stop key", "\t\t\t\t * to improve index scan performance, beetle 3858.", "\t\t\t\t */", "\t\t\t\tif (pred.getAndNode().getLeftOperand() instanceof InListOperatorNode &&", "\t\t\t\t\t! ((InListOperatorNode)pred.getAndNode().getLeftOperand()).getTransformed())", "\t\t\t\t{", "\t\t\t\t\tisIn = true;", "\t\t\t\t\tinNode = (InListOperatorNode) pred.getAndNode().getLeftOperand();", "\t\t\t\t}", "\t\t\t\telse", "\t\t\t\t\tcontinue;", "\t\t\t}" ] }, { "added": [ "\t\t\t\t\tif (pred.isInListProbePredicate())" ], "header": "@@ -564,7 +560,7 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\t\t\t\tif (pred.getSourceInList() != null)" ] }, { "added": [ "\t\t\t/* InListOperatorNodes, while not relational operators, may still", "\t\t\t * be useful. There are two cases: a) we transformed the IN-list", "\t\t\t * into a probe predicate of the form \"col = ?\", which can then be", "\t\t\t * optimized/generated as a start/stop key and used for \"multi-", "\t\t\t * probing\" at execution; or b) we did *not* transform the IN-list,", "\t\t\t * in which case we'll generate _dynamic_ start and stop keys in", "\t\t\t * order to improve scan performance (beetle 3858). In either case", "\t\t\t * the IN-list may still prove \"useful\".", "\t\t\t/* If it's not an \"in\" operator and either a) it's not a relational", "\t\t\t * operator or b) it's not a qualifier, then it's not useful for", "\t\t\t * limiting the scan, so skip it.", "\t\t\t */", "\t\t\tif (!isIn &&", "\t\t\t\t((relop == null) || !relop.isQualifier(optTable, pushPreds)))", "\t\t\t}" ], "header": "@@ -609,34 +605,29 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "", "\t\t\t/*", "\t\t\t** Skip over it if it's not a relational operator (this includes", "\t\t\t** BinaryComparisonOperators and IsNullNodes.", "\t\t\t*/", "\t\t\t/* if it's \"in\" operator, we generate dynamic start and stop key", "\t\t\t * to improve index scan performance, beetle 3858.", "\t\t\tboolean isInListProbePred = isIn;", "\t\t\tif (relop == null)", "\t\t\t\tif (pred.getAndNode().getLeftOperand() instanceof InListOperatorNode &&", "\t\t\t\t\t! ((InListOperatorNode)pred.getAndNode().getLeftOperand()).getTransformed())", "\t\t\t\t{", "\t\t\t\t\tisIn = true;", "\t\t\t\t\tinNode = (InListOperatorNode) pred.getAndNode().getLeftOperand();", "\t\t\t\t}", "\t\t\t\telse", "\t\t\t\t\tcontinue;", "\t\t\t}", "", "\t\t\tif ( !isIn && ! relop.isQualifier(optTable, pushPreds))" ] }, { "added": [ "\t\t\t\t\t\telse if (pred.isInListProbePredicate()", "\t\t\t\t\t\t\t\t&& (indexPosition > 0))" ], "header": "@@ -652,7 +643,8 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\t\t\t\t\telse if (isInListProbePred && (indexPosition > 0))" ] }, { "added": [ "\t\t\tboolean isIn = (thisPred.getSourceInList() != null);", "\t\t\tif (relop != null)" ], "header": "@@ -753,20 +745,10 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\t\tInListOperatorNode inNode = thisPred.getSourceInList();", "\t\t\tboolean isIn = (inNode != null);", "\t\t\tboolean isInListProbePred = isIn;", "\t\t\tif (relop == null)", "\t\t\t{", "\t\t\t\tisIn = true;", "\t\t\t\tinNode = (InListOperatorNode) ", " thisPred.getAndNode().getLeftOperand();", "\t\t\t}", "\t\t\telse", " {", " }" ] }, { "added": [ "\t\t\t\tif (isIn && !thisPred.isInListProbePredicate())" ], "header": "@@ -911,7 +893,7 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\t\t\tif (isIn && !isInListProbePred)" ] }, { "added": [ "\t\t\t\t\tif (!isIn || thisPred.isInListProbePredicate())" ], "header": "@@ -941,7 +923,7 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\t\t\t\tif (!isIn || isInListProbePred)" ] }, { "added": [ "\t\t\t\t\tinNode = opNode.getInListOp();", "\t\t\t\t\tif (inNode != null)", "\t\t\t\t\t\tinNode = inNode.shallowCopy();", "\t\t\t\t\t\tinNode.setLeftOperand(newCRNode);" ], "header": "@@ -1506,11 +1488,11 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\t\t\t\tInListOperatorNode ilon = opNode.getInListOp();", "\t\t\t\t\tif (ilon != null)", "\t\t\t\t\t\tilon = ilon.shallowCopy();", "\t\t\t\t\t\tilon.setLeftOperand(newCRNode);" ] }, { "added": [ "\t\t\t\t\t\t\t\t\t\tinNode," ], "header": "@@ -1518,7 +1500,7 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\t\t\t\t\t\t\t\t\tilon," ] }, { "added": [ "\t\tfor (int index = size() - 1; index >= 0; index--)", "\t\t\tif (!pred.isInListProbePredicate())" ], "header": "@@ -2854,15 +2836,12 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\tint size = size();", "\t\tInListOperatorNode ilon = null;", "\t\tfor (int index = size - 1; index >= 0; index--)", "\t\t\tilon = pred.getSourceInList();", "\t\t\tif (ilon == null)" ] }, { "added": [ "\t\t\t\t\tif (((Predicate)elementAt(i)).isInListProbePredicate())" ], "header": "@@ -2882,8 +2861,7 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\t\t\t\tpred = (Predicate)elementAt(i);", "\t\t\t\t\tif (pred.getSourceInList() != null)" ] }, { "added": [ "\t\t\tInListOperatorNode ilon = pred.getSourceInList();", "\t\t\treturn;", "", "\t\t/* If we get here then we didn't find any probe predicates. But", "\t\t * if that's true then we shouldn't have made it to this method", "\t\t * to begin with.", "\t\t */", "\t\tif (SanityManager.DEBUG)", "\t\t\tSanityManager.THROWASSERT(\"Attempted to generate IN-list values\" +", "\t\t\t\t\"for multi-probing but no probe predicates were found.\");" ], "header": "@@ -2892,18 +2870,20 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\t\tbreak;", "\t\t}", "", "\t\tif (ilon != null)", "\t\t{", "\t\telse", "\t\t\tmb.pushNull(ClassName.DataValueDescriptor + \"[]\");", "\t\t\tmb.push(false);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/ValueNode.java", "hunks": [ { "added": [ "\t/**", "\t * Returns true if this value node is an operator created", "\t * for optimized performance of an IN list.", "\t *", "\t * Or more specifically, returns true if this value node is", "\t * an equals operator of the form \"col = ?\" that we generated", "\t * during preprocessing to allow index multi-probing.", "\t */", "\tpublic boolean isInListProbeNode()", "\t{", "\t\treturn false;", "\t}", "" ], "header": "@@ -1281,6 +1281,19 @@ public abstract class ValueNode extends QueryTreeNode", "removed": [] } ] } ]
derby-DERBY-47-cbdc90cf
DERBY-47 (partial): Code generation patch that does the following: 1 - Moves the code for generating a list of IN values into a new method, InListOperatorNode.generateListAsArray()". The new method is then called from two places: A. InListOperatorNode.generateExpression(): the "normal" code-path for generating IN-list bytecode (prior to DERBY-47 changes). B. PredicateList.generateInListValues(): new method for generating the IN-list values that will serve as the execution-time index "probe" values. This method also generates a boolean to indicate whether or not the values are already sorted (i.e. if we sorted them at compile time, which means they all must have been constants). 2 - Adds code to ParameterNode that allows generation of a "place-holder" value (instead of the ParameterNode itself) for probe-predicates. This is required because a probe predicate has the form "column = ?" where the right operand is an internally generated parameter node that does not actually correspond to a user parameter. Since that parameter node is "fake" we can't really generate it; instead we need to be able to generate a legitimate ValueNode--either a constant node or a "real" parameter node--to serve as the place-holder. 3 - Updates the generateExpression() method of BinaryOperatorNode to account for situations where the optimizer chooses a plan for which a probe pred is *not* a useful start/stop key and thus is not being used for execution-time index probing. In this case we simply "revert" the probe predicate back to the InListOperatorNode from which it was created. Or put another way, we "give up" on index multi-probing and simply generate the original IN-list as a regular restriction. This patch also removes the now unnecessary "revertToSourceInList()" calls from PredicateList.java. 4 - Adds logic to NestedLoopJoinStrategy to generate a new type of result set, MultiProbeTableScanResultSet, for probing an index at execution time. The new result set does not yet exist (incremental development) but the code to generate such a result set is added as part of this patch. 5 - Adds a new method, "getMultiProbeTableScanResultSet()", to the ResultSetFactory interface. Also adds a corresponding stub method to GenericResultSetFactory. The latter is just a dummy method and will be filled in with the appropriate code as part of a subsequent patch. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@515795 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/sql/compile/JoinStrategy.java", "hunks": [ { "added": [ "\t * @param bulkFetch True means bulk fetch is being done on the inner table", "\t * @param multiprobe True means we are probing the inner table for rows", "\t * matching a specified list of values.", "\tString resultSetMethodName(boolean bulkFetch, boolean multiprobe);" ], "header": "@@ -179,10 +179,11 @@ public interface JoinStrategy {", "removed": [ "\t * @param bulkFetch\t\tTrue means bulk fetch is being done on the inner", "\t *\t\t\t\t\t\ttable", "\tString resultSetMethodName(boolean bulkFetch);" ] }, { "added": [ "\t * @param genInListVals Whether or not we are going to generate IN-list", "\t * values with which to probe the inner table." ], "header": "@@ -215,6 +216,8 @@ public interface JoinStrategy {", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/iapi/sql/execute/ResultSetFactory.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.types.DataValueDescriptor;" ], "header": "@@ -30,6 +30,7 @@ import org.apache.derby.iapi.sql.Activation;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/FromBaseTable.java", "hunks": [ { "added": [ "\t/* Whether or not we are going to do execution time \"multi-probing\"", "\t * on the table scan for this FromBaseTable.", "\t */", "\tboolean\t\t\tmultiProbing = false;", "" ], "header": "@@ -143,6 +143,11 @@ public class FromBaseTable extends FromTable", "removed": [] }, { "added": [ "\t\t\t\tmultiProbing = true;" ], "header": "@@ -2732,6 +2737,7 @@ public class FromBaseTable extends FromTable", "removed": [] }, { "added": [ "\t\t\ttrulyTheBestJoinStrategy.resultSetMethodName(", "\t\t\t\t(bulkFetch != UNSET), multiProbing)," ], "header": "@@ -3121,7 +3127,8 @@ public class FromBaseTable extends FromTable", "removed": [ "\t\t\ttrulyTheBestJoinStrategy.resultSetMethodName(bulkFetch != UNSET)," ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/HashJoinStrategy.java", "hunks": [ { "added": [ "\tpublic String resultSetMethodName(boolean bulkFetch, boolean multiprobe) {" ], "header": "@@ -300,7 +300,7 @@ public class HashJoinStrategy extends BaseJoinStrategy {", "removed": [ "\tpublic String resultSetMethodName(boolean bulkFetch) {" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/InListOperatorNode.java", "hunks": [ { "added": [ "\t\tLocalField arrayField = generateListAsArray(acb, mb);", "", "\t\t/*", "\t\t** Call the method for this operator.", "\t\t*/", "\t\t/*", "\t\t** Generate (field = <left expression>). This assignment is", "\t\t** used as the receiver of the method call for this operator,", "\t\t** and the field is used as the left operand:", "\t\t**", "\t\t**\t(field = <left expression>).method(field, <right expression>...)", "\t\t*/", "", "\t\t//LocalField receiverField =", "\t\t//\tacb.newFieldDeclaration(Modifier.PRIVATE, receiverType);", "", "\t\tleftOperand.generateExpression(acb, mb);", "\t\tmb.dup();", "\t\t//mb.putField(receiverField); // instance for method call", "\t\t/*mb.getField(receiverField);*/ mb.upCast(leftInterfaceType); // first arg", "\t\tmb.getField(arrayField); // second arg", "\t\tmb.push(isOrdered); // third arg", "\t\tmb.callMethod(VMOpcode.INVOKEINTERFACE, receiverType, methodName, resultTypeName, 3);", "\t}", "", "\t/**", "\t * Generate the code to create an array of DataValueDescriptors that", "\t * will hold the IN-list values at execution time. The array gets", "\t * created in the constructor. All constant elements in the array", "\t * are initialized in the constructor. All non-constant elements,", "\t * if any, are initialized each time the IN list is evaluated.", "\t *", "\t * @param acb The ExpressionClassBuilder for the class we're generating", "\t * @param mb The MethodBuilder the expression will go into", "\t */", "\tprotected LocalField generateListAsArray(ExpressionClassBuilder acb,", "\t\tMethodBuilder mb) throws StandardException", "\t{", "\t\tint listSize = rightOperandList.size();", "\t\tLocalField arrayField = acb.newFieldDeclaration(", "\t\t\tModifier.PRIVATE, ClassName.DataValueDescriptor + \"[]\");" ], "header": "@@ -397,14 +397,48 @@ public final class InListOperatorNode extends BinaryListOperatorNode", "removed": [ "\t\tLocalField arrayField =", "\t\t\tacb.newFieldDeclaration(Modifier.PRIVATE, rightInterfaceType);", "\t\t/* The array gets created in the constructor.", "\t\t * All constant elements in the array are initialized", "\t\t * in the constructor. All non-constant elements, if any,", "\t\t * are initialized each time the IN list is evaluated.", "\t\t */" ] }, { "added": [ "\t\t\tsetArrayMethod.upCast(ClassName.DataValueDescriptor); // second arg" ], "header": "@@ -455,7 +489,7 @@ public final class InListOperatorNode extends BinaryListOperatorNode", "removed": [ "\t\t\tsetArrayMethod.upCast(receiverType); // second arg" ] }, { "added": [ "\t\treturn arrayField;" ], "header": "@@ -472,29 +506,7 @@ public final class InListOperatorNode extends BinaryListOperatorNode", "removed": [ "\t\t/*", "\t\t** Call the method for this operator.", "\t\t*/", "\t\t/*", "\t\t** Generate (field = <left expression>). This assignment is", "\t\t** used as the receiver of the method call for this operator,", "\t\t** and the field is used as the left operand:", "\t\t**", "\t\t**\t(field = <left expression>).method(field, <right expression>...)", "\t\t*/", "", "\t\t//LocalField receiverField =", "\t\t//\tacb.newFieldDeclaration(Modifier.PRIVATE, receiverType);", "", "\t\tleftOperand.generateExpression(acb, mb);", "\t\tmb.dup();", "\t\t//mb.putField(receiverField); // instance for method call", "\t\t/*mb.getField(receiverField);*/ mb.upCast(leftInterfaceType); // first arg", "\t\tmb.getField(arrayField); // second arg", "\t\tmb.push(isOrdered); // third arg", "\t\tmb.callMethod(VMOpcode.INVOKEINTERFACE, receiverType, methodName, resultTypeName, 3);", "", "" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/NestedLoopJoinStrategy.java", "hunks": [ { "added": [ "\tpublic String resultSetMethodName(boolean bulkFetch, boolean multiprobe) {", "\t\telse if (multiprobe)", "\t\t\treturn \"getMultiProbeTableScanResultSet\";" ], "header": "@@ -171,9 +171,11 @@ public class NestedLoopJoinStrategy extends BaseJoinStrategy {", "removed": [ "\tpublic String resultSetMethodName(boolean bulkFetch) {" ] }, { "added": [ "\t\t\t\t\t\t\tint maxMemoryPerTable,", "\t\t\t\t\t\t\tboolean genInListVals" ], "header": "@@ -207,7 +209,8 @@ public class NestedLoopJoinStrategy extends BaseJoinStrategy {", "removed": [ " int maxMemoryPerTable" ] }, { "added": [ "\t\t/* If we're going to generate a list of IN-values for index probing", "\t\t * at execution time then we push TableScanResultSet arguments plus", "\t\t * two additional arguments: 1) the list of IN-list values, and 2)", "\t\t * a boolean indicating whether or not the IN-list values are already", "\t\t * sorted.", "\t\t */", "\t\tif (genInListVals)", "\t\t{", "\t\t\tnumArgs = 26;", "\t\t}", "\t\telse if (bulkFetch > 1)" ], "header": "@@ -223,7 +226,17 @@ public class NestedLoopJoinStrategy extends BaseJoinStrategy {", "removed": [ "\t\tif (bulkFetch > 1)" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/ParameterNode.java", "hunks": [ { "added": [ "\t/**", "\t * If this parameter node was created as part of a \"probe predicate\"", "\t * for an InListOperatorNode then it does not actually correspond to", "\t * a specific value--we just created it as a start-key place-holder", "\t * for IN-list values at execution time. In order to serve that", "\t * purpose we need to generate some value that can be used as the", "\t * place-holder. Since this parameter node is \"fake\" and does not", "\t * correspond to an actual parameter, we can't really generate it;", "\t * so the following field holds some legitimate ValueNode--either a", "\t * constant node or a \"real\" parameter node--that we can generate to", "\t * serve as the place-holder.", "\t */", "\tprivate ValueNode valToGenerate;", "" ], "header": "@@ -85,6 +85,20 @@ public class ParameterNode extends ValueNode", "removed": [] }, { "added": [ "\t\t/* If we were given a specific ValueNode to generate then", "\t\t * just use that.", "\t\t */", "\t\tif (valToGenerate != null)", "\t\t{", "\t\t\tvalToGenerate.generateExpression(acb, mb);", "\t\t\treturn;", "\t\t}", "" ], "header": "@@ -322,6 +336,15 @@ public class ParameterNode extends ValueNode", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/PredicateList.java", "hunks": [ { "added": [ " /* NOT an OR or AND, so go on to next predicate.", " *", " * Note: if \"pred\" (or any predicates in the tree", " * beneath \"pred\") is an IN-list probe predicate", " * then we'll \"revert\" it to its original form", " * (i.e. to the source InListOperatorNode from", " * which it originated) as part of code generation.", " * See generateExpression() in BinaryOperatorNode.", " */" ], "header": "@@ -539,7 +539,15 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ " // NOT an OR or AND, so go on to next predicate." ] }, { "added": [ "\t\t\t\tif (SanityManager.DEBUG)", "\t\t\t\t{", "\t\t\t\t\tif (pred.getSourceInList() != null)", "\t\t\t\t\t{", "\t\t\t\t\t\tSanityManager.THROWASSERT(\"Found an IN-list probe \" +", "\t\t\t\t\t\t\t\"predicate (\" + pred.binaryRelOpColRefsToString() +", "\t\t\t\t\t\t\t\") that was marked as a qualifier, which should \" +", "\t\t\t\t\t\t\t\"not happen.\");", "\t\t\t\t\t}", "\t\t\t\t}", "" ], "header": "@@ -554,6 +562,17 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [] }, { "added": [ "\t\t\t\t * any IN-list probe predicates that are not useful, we'll", "\t\t\t\t * restrictions. That \"revert\" operation happens in", "\t\t\t\t * the generateExpression() method of BinaryOperatorNode." ], "header": "@@ -666,14 +685,12 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\t\t\t * any IN-list probe predicates that are not useful, we", "\t\t\t\t * restrictions.", "\t\t\t\tif (pushPreds && isInListProbePred)", "\t\t\t\t\tpred.revertToSourceInList();", "" ] }, { "added": [ "\t\t\t\t\t * it can be generated as normal IN-list. That \"revert\"", "\t\t\t\t\t * operation happens from within the generateExpression()", "\t\t\t\t\t * method of BinaryOperatorNode.java." ], "header": "@@ -867,11 +884,10 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\t\t\t\t * it can be generated as normal IN-list.", "\t\t\t\t\tif (isInListProbePred)", "\t\t\t\t\t\tthisPred.revertToSourceInList();", "" ] }, { "added": [ "\t\t\t\t\t * execute/MultiProbeTableScanResultSet.java)." ], "header": "@@ -923,7 +939,7 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [ "\t\t\t\t\t * execute/TableScanResultSet.java)." ] }, { "added": [ "\t/**", "\t * If there is an IN-list probe predicate in this list then generate", "\t * the corresponding IN-list values as a DataValueDescriptor array,", "\t * to be used for probing at execution time. Also generate a boolean", "\t * value indicating whether or not the values are already in sorted", "\t * order.", "\t *", "\t * Assumption is that by the time we get here there is at most one", "\t * IN-list probe predicate in this list.", "\t *", "\t * @param acb The ActivationClassBuilder for the class we're building", "\t * @param exprFun The MethodBuilder for the method we're building", "\t */", "\tprotected void generateInListValues(ExpressionClassBuilder acb,", "\t\tMethodBuilder mb) throws StandardException", "\t{", "\t\tint size = size();", "\t\tInListOperatorNode ilon = null;", "\t\tfor (int index = size - 1; index >= 0; index--)", "\t\t{", "\t\t\tPredicate pred = (Predicate)elementAt(index);", "\t\t\tilon = pred.getSourceInList();", "", "\t\t\t// Don't do anything if it's not an IN-list probe predicate.", "\t\t\tif (ilon == null)", "\t\t\t\tcontinue;", "", "\t\t\t/* We're going to generate the relevant code for the probe", "\t\t\t * predicate below, so we no longer need it to be in the", "\t\t\t * list. Remove it now.", "\t\t\t */", "\t\t\tremoveOptPredicate(pred);", "", "\t\t\t/* This list is a store restriction list for a specific base", "\t\t\t * table, and we can only have one probe predicate per base", "\t\t\t * table (any others, if any, will be \"reverted\" back to", "\t\t\t * their original InListOperatorNodes and generated as", "\t\t\t * qualifiers). So make sure there are no other probe preds", "\t\t\t * in this list.", "\t\t\t */", "\t\t\tif (SanityManager.DEBUG)", "\t\t\t{", "\t\t\t\tfor (int i = 0; i < index; i++)", "\t\t\t\t{", "\t\t\t\t\tpred = (Predicate)elementAt(i);", "\t\t\t\t\tif (pred.getSourceInList() != null)", "\t\t\t\t\t{", "\t\t\t\t\t\tSanityManager.THROWASSERT(\"Found multiple probe \" +", "\t\t\t\t\t\t\t\"predicates for IN-list when only one was \" +", "\t\t\t\t\t\t\t\"expected.\");", "\t\t\t\t\t}", "\t\t\t\t}", "\t\t\t}", "", "\t\t\tbreak;", "\t\t}", "", "\t\tif (ilon != null)", "\t\t{", "\t\t\tmb.getField(ilon.generateListAsArray(acb, mb));", "\t\t\tmb.push(ilon.isOrdered());", "\t\t}", "\t\telse", "\t\t{", "\t\t\tmb.pushNull(ClassName.DataValueDescriptor + \"[]\");", "\t\t\tmb.push(false);", "\t\t}", "\t}", "" ], "header": "@@ -2807,6 +2823,75 @@ public class PredicateList extends QueryTreeNodeVector implements OptimizablePre", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/GenericResultSetFactory.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.types.DataValueDescriptor;", "" ], "header": "@@ -51,6 +51,8 @@ import org.apache.derby.iapi.store.access.StaticCompiledOpenConglomInfo;", "removed": [] }, { "added": [ "\t/**", "\t\tMulti-probing scan that probes an index for specific values contained", "\t\tin the received probe list.", "", "\t\tAll index rows for which the first column equals probeVals[0] will", "\t\tbe returned, followed by all rows for which the first column equals", "\t\tprobeVals[1], and so on. Assumption is that we only get here if", "\t\tprobeVals has at least one value.", "", "\t\t@see ResultSetFactory#getMultiProbeTableScanResultSet", "\t\t@exception StandardException thrown on error", "\t */", "\tpublic NoPutResultSet getMultiProbeTableScanResultSet(", " \t\t\tActivation activation,", "\t\t\t\t\t\t\t\t\tlong conglomId,", "\t\t\t\t\t\t\t\t\tint scociItem,", "\t\t\t\t\t\t\t\t\tGeneratedMethod resultRowAllocator,", "\t\t\t\t\t\t\t\t\tint resultSetNumber,", "\t\t\t\t\t\t\t\t\tGeneratedMethod startKeyGetter,", "\t\t\t\t\t\t\t\t\tint startSearchOperator,", "\t\t\t\t\t\t\t\t\tGeneratedMethod stopKeyGetter,", "\t\t\t\t\t\t\t\t\tint stopSearchOperator,", "\t\t\t\t\t\t\t\t\tboolean sameStartStopPosition,", "\t\t\t\t\t\t\t\t\tQualifier[][] qualifiers,", "\t\t\t\t\t\t\t\t\tDataValueDescriptor [] probeVals,", "\t\t\t\t\t\t\t\t\tboolean probeValsAreSorted,", "\t\t\t\t\t\t\t\t\tString tableName,", "\t\t\t\t\t\t\t\t\tString userSuppliedOptimizerOverrides,", "\t\t\t\t\t\t\t\t\tString indexName,", "\t\t\t\t\t\t\t\t\tboolean isConstraint,", "\t\t\t\t\t\t\t\t\tboolean forUpdate,", "\t\t\t\t\t\t\t\t\tint colRefItem,", "\t\t\t\t\t\t\t\t\tint indexColItem,", "\t\t\t\t\t\t\t\t\tint lockMode,", "\t\t\t\t\t\t\t\t\tboolean tableLocked,", "\t\t\t\t\t\t\t\t\tint isolationLevel,", "\t\t\t\t\t\t\t\t\tboolean oneRowScan,", "\t\t\t\t\t\t\t\t\tdouble optimizerEstimatedRowCount,", "\t\t\t\t\t\t\t\t\tdouble optimizerEstimatedCost)", "\t\t\tthrows StandardException", "\t{", "\t\t/* Incremental development: For now we should never actually get to", "\t\t * this method, so just return null. When the appropriate execution", "\t\t * logic is in place (i.e. MultiProbeTableScanResultSet exists) then", "\t\t * we will add a call to create an instance of the result set here.", "\t\t */", "\t\tif (SanityManager.DEBUG)", "\t\t{", "\t\t\tSanityManager.THROWASSERT(\"Tried to instantiate \" +", "\t\t\t\t\"MultiProbeTableScanResultSet, which does not \" +", "\t\t\t\t\"yet exist (DERBY-47 incremental development).\");", "\t\t}", "", "\t\treturn (NoPutResultSetImpl)null;", "\t}", "" ], "header": "@@ -725,6 +727,62 @@ public class GenericResultSetFactory implements ResultSetFactory", "removed": [] } ] } ]
derby-DERBY-470-e1b8b606
DERBY-470 - Exception when using LOCALIZEDDISPLAY with JSR169 1. Check for BigDecimal class in the JVM by doing a Class.forName 2. Call getNumberAsString for NUMERIC and DECIMAL types only if BigDecimal class is available. Otherwise, return rs.getString. Contributed by Deepa Remesh git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@239718 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/tools/org/apache/derby/iapi/tools/i18n/LocalizedResource.java", "hunks": [ { "added": [ "\tprivate static boolean HAVE_BIG_DECIMAL;", "\t", "\t{", "\t\tboolean haveBigDecimal;", "\t\ttry {", "\t\t\tClass.forName(\"java.math.BigDecimal\");", "\t\t\thaveBigDecimal = true;", "\t\t} catch (Throwable t) {", "\t\t\thaveBigDecimal = false;", "\t\t}", "\t\tHAVE_BIG_DECIMAL = haveBigDecimal;", "\t}", "\t" ], "header": "@@ -44,6 +44,19 @@ import java.sql.Types;", "removed": [] }, { "added": [ "\t\t\telse if (HAVE_BIG_DECIMAL && (type == Types.NUMERIC || type == Types.DECIMAL)) {" ], "header": "@@ -305,7 +318,7 @@ public final class LocalizedResource implements java.security.PrivilegedAction", "removed": [ "\t\t\telse if (type == Types.NUMERIC || type == Types.DECIMAL ) {" ] } ] } ]
derby-DERBY-4700-43220cc1
DERBY-4700: Add method to obtain a bogus port in TestConfiguration Added the method getBogusPort to obtain a port where no Derby network server is supposed to be running (i.e. when doing negative connect tests). Patch file: derby-4700-1b.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@963705 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/junit/TestConfiguration.java", "hunks": [ { "added": [ "public final class TestConfiguration {" ], "header": "@@ -56,7 +56,7 @@ import junit.framework.TestSuite;", "removed": [ "public class TestConfiguration {" ] }, { "added": [ " this.bogusPort = getNextAvailablePort();", " println(\"basePort=\" + basePort + \", jmxPort=\" + jmxPort +", " \", bogusPort=\" + bogusPort);" ], "header": "@@ -1018,6 +1018,9 @@ public class TestConfiguration {", "removed": [] }, { "added": [ " this.bogusPort = copy.bogusPort;" ], "header": "@@ -1038,6 +1041,7 @@ public class TestConfiguration {", "removed": [] }, { "added": [ " this.bogusPort = copy.bogusPort;", " if (bogusPort == port) {", " throw new IllegalStateException(", " \"port cannot equal bogusPort: \" + bogusPort);", " }" ], "header": "@@ -1061,6 +1065,11 @@ public class TestConfiguration {", "removed": [] }, { "added": [ " this.bogusPort = copy.bogusPort;", " if (bogusPort == port) {", " throw new IllegalStateException(", " \"port cannot equal bogusPort: \" + bogusPort);", " }" ], "header": "@@ -1084,6 +1093,11 @@ public class TestConfiguration {", "removed": [] }, { "added": [ " this.bogusPort = copy.bogusPort;" ], "header": "@@ -1116,6 +1130,7 @@ public class TestConfiguration {", "removed": [] }, { "added": [ " this.bogusPort = copy.bogusPort;" ], "header": "@@ -1179,6 +1194,7 @@ public class TestConfiguration {", "removed": [] }, { "added": [ " bogusPort = getNextAvailablePort();", " println(\"basePort=\" + basePort + \", jmxPort=\" + jmxPort +", " \", bogusPort=\" + bogusPort);" ], "header": "@@ -1208,6 +1224,9 @@ public class TestConfiguration {", "removed": [] }, { "added": [ " /**", " * Returns a port number where no Derby network servers are supposed to", " * be running.", " *", " * @return A port number where no Derby network servers are started.", " */", " public int getBogusPort() {", " return bogusPort;", " }", "" ], "header": "@@ -1415,6 +1434,16 @@ public class TestConfiguration {", "removed": [] }, { "added": [ "", " /**", " * Private method printing debug information to standard out if debugging", " * is enabled.", " * <p>", " * <em>Note:</em> This method may direct output to a different location", " * than the println method in <tt>BaseJDBCTestCase</tt>.", " */", " private void println(CharSequence msg) {", " if (isVerbose) {", " System.out.println(\"DEBUG: {TC@\" + hashCode() + \"} \" + msg);", " }", " }", "" ], "header": "@@ -1597,6 +1626,20 @@ public class TestConfiguration {", "removed": [] }, { "added": [ " private final int bogusPort;" ], "header": "@@ -1702,6 +1745,7 @@ public class TestConfiguration {", "removed": [] } ] } ]
derby-DERBY-4700-b765481f
DERBY-4700: Add method to obtain a bogus port in TestConfiguration Made bogus port static so that it can be shared across TestConfiguration instances. Increased the maximum port count to 20 (we're currently using 11 in suites.All). Patch file: derby-4700-2a-static_bogus.diff (changed 11 to 20 for MAX_PORTS_USED) git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@964115 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/testing/org/apache/derbyTesting/junit/TestConfiguration.java", "hunks": [ { "added": [ " private final static int MAX_PORTS_USED = 20;", " private static final int bogusPort;" ], "header": "@@ -75,13 +75,14 @@ public final class TestConfiguration {", "removed": [ " private final static int MAX_PORTS_USED = 10;" ] }, { "added": [ " bogusPort = ++lastAssignedPort;", " private static int assignedPortCount = 2;" ], "header": "@@ -90,8 +91,9 @@ public final class TestConfiguration {", "removed": [ " private static int assignedPortCount = 1;" ] }, { "added": [ " println(\"basePort=\" + basePort + \", bogusPort=\" + bogusPort +", " \", jmxPort=\" + jmxPort);" ], "header": "@@ -1018,9 +1020,8 @@ public final class TestConfiguration {", "removed": [ " this.bogusPort = getNextAvailablePort();", " println(\"basePort=\" + basePort + \", jmxPort=\" + jmxPort +", " \", bogusPort=\" + bogusPort);" ] }, { "added": [], "header": "@@ -1041,7 +1042,6 @@ public final class TestConfiguration {", "removed": [ " this.bogusPort = copy.bogusPort;" ] }, { "added": [], "header": "@@ -1065,7 +1065,6 @@ public final class TestConfiguration {", "removed": [ " this.bogusPort = copy.bogusPort;" ] }, { "added": [], "header": "@@ -1093,7 +1092,6 @@ public final class TestConfiguration {", "removed": [ " this.bogusPort = copy.bogusPort;" ] }, { "added": [], "header": "@@ -1130,7 +1128,6 @@ public final class TestConfiguration {", "removed": [ " this.bogusPort = copy.bogusPort;" ] }, { "added": [], "header": "@@ -1194,7 +1191,6 @@ public final class TestConfiguration {", "removed": [ " this.bogusPort = copy.bogusPort;" ] }, { "added": [ " println(\"basePort=\" + basePort + \", bogusPort=\" + bogusPort +", " \", jmxPort=\" + jmxPort);" ], "header": "@@ -1224,9 +1220,8 @@ public final class TestConfiguration {", "removed": [ " bogusPort = getNextAvailablePort();", " println(\"basePort=\" + basePort + \", jmxPort=\" + jmxPort +", " \", bogusPort=\" + bogusPort);" ] }, { "added": [], "header": "@@ -1745,7 +1740,6 @@ public final class TestConfiguration {", "removed": [ " private final int bogusPort;" ] } ] } ]
derby-DERBY-4704-0859d5b4
DERBY-4704: Make casts of strings to booleans always nullable. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@956025 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4704-8260425f
DERBY-4704: Incorrect nullability when casting non-nullable VARCHAR to BOOLEAN Added test case. Disabled for now. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@955900 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4706-ccfada30
DERBY-4706: Remove stale and potentially unused code Request.writeEncryptedScalarStream Removed the method writeEncryptedScalarStream in Request, and a number of methods only used by it. The other methodsdiffer from their "normal" counter-parts only by writing into a new buffer instead of the transfer buffer (i.e. typically because what's written has to be encoded before placed onto the transfer buffer). Patch file: derby-4706-1a-remove_writeEncryptedScalarStream_and_friends.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@956569 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/net/Request.java", "hunks": [ { "added": [ " // DERBY-4706", " // The network server doesn't support the security mechanisms above.", " // Further, the code in writeEncryptedScalarStream is/was in a bad", " // state.", " // Throw an exception for now until we're positive the code can be", " // ditched, later this comment/code itself can also be removed.", " throw new SqlException(netAgent_.logWriter_,", " new ClientMessageId(SQLState.NOT_IMPLEMENTED),", " \"encrypted scalar streams\");" ], "header": "@@ -252,14 +252,15 @@ public class Request {", "removed": [ "\t\t\t", "\t\t\twriteEncryptedScalarStream(chained,", "\t\t\t\t\t\t\t\t\t chainedWithSameCorrelator,", "\t\t\t\t\t\t\t\t\t codePoint,", "\t\t\t\t\t\t\t\t\t length,", "\t\t\t\t\t\t\t\t\t in,", "\t\t\t\t\t\t\t\t\t writeNullByte,", "\t\t\t\t\t\t\t\t\t parameterIndex);" ] }, { "added": [], "header": "@@ -275,173 +276,6 @@ public class Request {", "removed": [ " ", " // We need to reuse the agent's sql exception accumulation mechanism", " // for this write exception, pad if the length is too big, and truncation if the length is too small", " // WARNING: The code encrypting EXTDTA still has the problems described by", " // DERBY-2017. The server doesn't support this security mechanism", " // (see for instance DERBY-1345), and it is not clear whether this", " // piece of code is ever used.", " final private void writeEncryptedScalarStream(boolean chained,", " boolean chainedWithSameCorrelator,", " int codePoint,", " int length,", " java.io.InputStream in,", " boolean writeNullByte,", " int parameterIndex) throws DisconnectException, SqlException {", "\t\tint leftToRead = length;", "\t\tint extendedLengthByteCount = prepScalarStream(chained,", "\t\t\t\t\t\t\t\t\t\t\t\t\t chainedWithSameCorrelator,", "\t\t\t\t\t\t\t\t\t\t\t\t\t writeNullByte,", "\t\t\t\t\t\t\t\t\t\t\t\t\t leftToRead);", "\t\tint bytesToRead;", "", "\t\tif (writeNullByte) {", "\t\t\tbytesToRead = Math.min(leftToRead, DssConstants.MAX_DSS_LEN - 6 - 4 - 1 - extendedLengthByteCount);", "\t\t} else {", "\t\t\tbytesToRead = Math.min(leftToRead, DssConstants.MAX_DSS_LEN - 6 - 4 - extendedLengthByteCount);", "\t\t}", "\t\t\t", "\t\tbyte[] lengthAndCodepoint;", "\t\tlengthAndCodepoint = buildLengthAndCodePointForEncryptedLob(codePoint,", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tleftToRead,", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\twriteNullByte,", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\textendedLengthByteCount);", "", "", "", "\t\t// we need to stream the input, rather than fully materialize it", "\t\t// write the data", "", "\t\tbyte[] clearedBytes = new byte[leftToRead];", "\t\tint bytesRead = 0;", "\t\tint totalBytesRead = 0;", "\t\tint pos = 0;", "\t\tdo {", "\t\t\ttry {", "\t\t\t\tbytesRead = in.read(clearedBytes, pos, leftToRead);", "\t\t\t\ttotalBytesRead += bytesRead;", "\t\t\t} catch (java.io.IOException e) {", " padScalarStreamForError(leftToRead, bytesToRead,", " false, (byte)-1);", "\t\t\t\t// set with SQLSTATE 01004: The value of a string was truncated when assigned to a host variable.", "\t\t\t\tnetAgent_.accumulateReadException(new SqlException(netAgent_.logWriter_,", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t new ClientMessageId(SQLState.NET_IOEXCEPTION_ON_READ),", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t new Integer(parameterIndex), e.getMessage(), e));", "\t\t\t\treturn;", "\t\t\t}", "\t\t\tif (bytesRead == -1) {", "\t\t\t\t//padScalarStreamForError(leftToRead, bytesToRead);", "\t\t\t\t// set with SQLSTATE 01004: The value of a string was truncated when assigned to a host variable.", "\t\t\t\t/*throw new SqlException(netAgent_.logWriter_,", "\t\t\t\t \"End of Stream prematurely reached while reading InputStream, parameter #\" +", "\t\t\t\t parameterIndex +", "\t\t\t\t \". Remaining data has been padded with 0x0.\");*/", "\t\t\t\t//is it OK to do a chain break Exception here. It's not good to", "\t\t\t\t//pad it with 0 and encrypt and send it to the server because it takes too much time", "\t\t\t\t//can't just throw a SQLException either because some of the data PRPSQLSTT etc have already", "\t\t\t\t//been sent to the server, and server is waiting for EXTDTA, server hangs for this.", "\t\t\t\tnetAgent_.accumulateChainBreakingReadExceptionAndThrow(", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t new DisconnectException(netAgent_,", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t new ClientMessageId(SQLState.NET_PREMATURE_EOS_DISCONNECT),", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t new Integer(parameterIndex)));", "\t\t\t\treturn;", "", "\t\t\t\t/*netAgent_.accumulateReadException(", "\t\t\t\t new SqlException(netAgent_.logWriter_,", "\t\t\t\t \"End of Stream prematurely reached while reading InputStream, parameter #\" +", "\t\t\t\t parameterIndex +", "\t\t\t\t \". Remaining data has been padded with 0x0.\"));", "\t\t\t\t return;*/", "\t\t\t} else {", "\t\t\t\tpos += bytesRead;", "\t\t\t\t//offset_ += bytesRead; //comment this out for data stream encryption.", "\t\t\t\tleftToRead -= bytesRead;", "\t\t\t}", "", "\t\t} while (leftToRead > 0);", "", "\t\t// check to make sure that the specified length wasn't too small", "\t\ttry {", "\t\t\tif (in.read() != -1) {", "\t\t\t\t// set with SQLSTATE 01004: The value of a string was truncated when assigned to a host variable.", "\t\t\t\tnetAgent_.accumulateReadException(new SqlException(", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t netAgent_.logWriter_,", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t new ClientMessageId(SQLState.NET_INPUTSTREAM_LENGTH_TOO_SMALL),", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t new Integer(parameterIndex)));", "\t\t\t}", "\t\t} catch (java.io.IOException e) {", "\t\t\tnetAgent_.accumulateReadException(new SqlException(", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t netAgent_.logWriter_,", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t new ClientMessageId(", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t SQLState.NET_IOEXCEPTION_ON_STREAMLEN_VERIFICATION),", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t new Integer(parameterIndex), ", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t e.getMessage(), ", "\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t e));", "\t\t}", "", "\t\tbyte[] newClearedBytes = new byte[clearedBytes.length +", "\t\t\t\t\t\t\t\t\t\t lengthAndCodepoint.length];", "\t\tSystem.arraycopy(lengthAndCodepoint, 0, newClearedBytes, 0,", "\t\t\t\t\t\t lengthAndCodepoint.length);", "\t\tSystem.arraycopy(clearedBytes, 0, newClearedBytes, lengthAndCodepoint.length, clearedBytes.length);", "\t\t//it's wrong here, need to add in the real length after the codepoing 146c", "\t\tbyte[] encryptedBytes;", "\t\tencryptedBytes = netAgent_.netConnection_.getEncryptionManager().", "\t\t\tencryptData(newClearedBytes,", "\t\t\t\t\t\tNetConfiguration.SECMEC_EUSRIDPWD,", "\t\t\t\t\t\tnetAgent_.netConnection_.getTargetPublicKey(),", "\t\t\t\t\t\tnetAgent_.netConnection_.getTargetPublicKey());", "", "\t\tint encryptedBytesLength = encryptedBytes.length;", "\t\tint sendingLength = bytes_.length - offset_;", "\t\tif (encryptedBytesLength > (bytes_.length - offset_)) {", "", "\t\t\tSystem.arraycopy(encryptedBytes, 0, bytes_, offset_, (bytes_.length - offset_));", "\t\t\toffset_ = 32767;", "\t\t\ttry {", "\t\t\t\tsendBytes(netAgent_.getOutputStream());", "\t\t\t} catch (java.io.IOException ioe) {", "\t\t\t\tnetAgent_.throwCommunicationsFailure(ioe);", "\t\t\t}", "\t\t} else {", "\t\t\tSystem.arraycopy(encryptedBytes, 0, bytes_, offset_, encryptedBytesLength);", "\t\t\toffset_ = offset_ + encryptedBytes.length;", "\t\t}", "", "\t\tencryptedBytesLength = encryptedBytesLength - sendingLength;", "\t\twhile (encryptedBytesLength > 0) {", "\t\t\t//dssLengthLocation_ = offset_;", "\t\t\toffset_ = 0;", "", "\t\t\tif ((encryptedBytesLength - 32765) > 0) {", "\t\t\t\tbytes_[offset_++] = (byte) (0xff);", "\t\t\t\tbytes_[offset_++] = (byte) (0xff);", "\t\t\t\tSystem.arraycopy(encryptedBytes, sendingLength, bytes_, offset_, 32765);", "\t\t\t\tencryptedBytesLength -= 32765;", "\t\t\t\tsendingLength += 32765;", "\t\t\t\toffset_ = 32767;", "\t\t\t\ttry {", "\t\t\t\t\tsendBytes(netAgent_.getOutputStream());", "\t\t\t\t} catch (java.io.IOException ioe) {", "\t\t\t\t\tnetAgent_.throwCommunicationsFailure(ioe);", "\t\t\t\t}", "\t\t\t} else {", "\t\t\t\tint leftlength = encryptedBytesLength + 2;", "\t\t\t\tbytes_[offset_++] = (byte) ((leftlength >>> 8) & 0xff);", "\t\t\t\tbytes_[offset_++] = (byte) (leftlength & 0xff);", "", "\t\t\t\tSystem.arraycopy(encryptedBytes, sendingLength, bytes_, offset_, encryptedBytesLength);", "", "\t\t\t\toffset_ += encryptedBytesLength;", "\t\t\t\tdssLengthLocation_ = offset_;", "\t\t\t\tencryptedBytesLength = 0;", "\t\t\t}", "", "\t\t}", " }", "\t", "\t" ] }, { "added": [], "header": "@@ -870,16 +704,6 @@ public class Request {", "removed": [ " private final byte[] writeExtendedLengthBytesForEncryption(int extendedLengthByteCount, long length) {", " int shiftSize = (extendedLengthByteCount - 1) * 8;", " byte[] extendedLengthBytes = new byte[extendedLengthByteCount];", " for (int i = 0; i < extendedLengthByteCount; i++) {", " extendedLengthBytes[i] = (byte) ((length >>> shiftSize) & 0xff);", " shiftSize -= 8;", " }", " return extendedLengthBytes;", " }", "" ] }, { "added": [], "header": "@@ -1283,16 +1107,6 @@ public class Request {", "removed": [ " final byte[] writeEXTDTALengthCodePointForEncryption(int length, int codePoint) {", " //how to encure length and offset later?", " byte[] clearedBytes = new byte[4];", " clearedBytes[0] = (byte) ((length >>> 8) & 0xff);", " clearedBytes[1] = (byte) (length & 0xff);", " clearedBytes[2] = (byte) ((codePoint >>> 8) & 0xff);", " clearedBytes[3] = (byte) (codePoint & 0xff);", " return clearedBytes;", " }", "" ] }, { "added": [], "header": "@@ -1762,49 +1576,6 @@ public class Request {", "removed": [ "", " private byte[] buildLengthAndCodePointForEncryptedLob(int codePoint,", " int leftToRead,", " boolean writeNullByte,", " int extendedLengthByteCount) throws DisconnectException {", " byte[] lengthAndCodepoint = new byte[4];", " byte[] extendedLengthBytes = new byte[extendedLengthByteCount];", "", " if (extendedLengthByteCount > 0) {", " // method should never ensure length", " lengthAndCodepoint = writeEXTDTALengthCodePointForEncryption(0x8004 + extendedLengthByteCount, codePoint);", "", " if (writeNullByte) {", "", " extendedLengthBytes = writeExtendedLengthBytesForEncryption(extendedLengthByteCount, leftToRead + 1);", " } else {", " extendedLengthBytes = writeExtendedLengthBytesForEncryption(extendedLengthByteCount, leftToRead);", " }", " } else {", " if (writeNullByte) {", " lengthAndCodepoint = writeEXTDTALengthCodePointForEncryption(leftToRead + 4 + 1, codePoint);", " } else {", " lengthAndCodepoint = writeEXTDTALengthCodePointForEncryption(leftToRead + 4, codePoint);", " }", " }", "", " if (extendedLengthByteCount > 0) {", " byte[] newLengthAndCodepoint = new byte[4 + extendedLengthBytes.length];", " System.arraycopy(lengthAndCodepoint, 0, newLengthAndCodepoint, 0, lengthAndCodepoint.length);", " System.arraycopy(extendedLengthBytes, 0, newLengthAndCodepoint, lengthAndCodepoint.length, extendedLengthBytes.length);", " lengthAndCodepoint = newLengthAndCodepoint;", " }", "", " if (writeNullByte) {", " byte[] nullByte = new byte[1 + lengthAndCodepoint.length];", " System.arraycopy(lengthAndCodepoint, 0, nullByte, 0, lengthAndCodepoint.length);", " nullByte[lengthAndCodepoint.length] = 0;", " lengthAndCodepoint = nullByte;", " }", " return lengthAndCodepoint;", " }", "", "" ] } ] } ]
derby-DERBY-4709-155fa10b
DERBY-4709: Create test that parse client trace file to detect round-trips for Derby-4653. Alternative test for commit/rollback flow optimization, which parses the DRDA protocol flow log to verify that a commit/rollback flows over the network only when required. The test is very general, it does not test correct commit/rollback behavior for specific database interaction sequences. Patch file: derby-4709-1c-alternative_test.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@963243 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-471-8ff0b23f
DERBY-471 Correct DatabaseMetaData.getTimeDateFunctions to correctly return a valid subset of the JDBC/ODBC escaped functions that Derby supports. Expanded existing tests for time date functions. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@348330 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-471-c84cf384
DERBY-471 Correct DatabaseMetaData.getNumericFunctions,getSystemFunctions and getStringFunctions to correctly return a valid subset of the JDBC/ODBC escaped functions that Derby supports. Added tests that ensure the functions listed by Derby are supported, and that all the functions defined by the spec and supported by Derby are listed by Derby. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@348289 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4711-780c0c3d
DERBY-4711: Hung thread after another thread is interrupted If a thread fails while waiting for a lock, remove that thread from the queue. Patch contributed by Luke Quinane <luke@nuix.com>. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@957902 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/services/locks/LockSet.java", "hunks": [ { "added": [ " byte wakeupReason = 0;", " try {", " wakeupReason = waitingLock.waitForGrant(actualTimeout);", " } catch(StandardException e) {", " nextWaitingLock = control.getNextWaiter(waitingLock, true, this);", " throw e;", " }", "" ], "header": "@@ -273,12 +273,18 @@ final class LockSet implements LockTable {", "removed": [ " byte wakeupReason = waitingLock.waitForGrant(actualTimeout);", " " ] } ] } ]
derby-DERBY-4712-a823c6a8
DERBY-4712 Complex nested joins problems Patch DERBY-4712b, which removes one of the source for NPE seen by the reporter. The other is covered by DERBY-4798. A corner case: the patch makes an inner join which decides it is not flattenable, propagate this fact down to any nested outer join nodes containing nested inner joins, the latter inner joins will otherwise think they are flattenable (a priori value for inner joins). Adds new test cases. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@997325 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4713-70c969f3
DERBY-4713; Subclasses of ScriptTestCase can not run correctly with the non-English default locale committing patch derby-4713-2. patch contributed by Yun Lee (yun dot lee dot bj at gmail dot com) git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1055998 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4715-6a108f11
DERBY-4715 Write jvm information and path of derby.jar to derby.log Contributed by Lily Wei (lily wei at yahoo dot com) git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@965647 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/store/raw/data/BaseDataFileFactory.java", "hunks": [ { "added": [ "import java.io.UnsupportedEncodingException;", "", "import java.net.URL;", "import java.net.URLDecoder;", "import java.security.CodeSource;" ], "header": "@@ -91,8 +91,13 @@ import java.util.Enumeration;", "removed": [] }, { "added": [ "\t", "\tprivate String jvmVersion;", "\t", "\tprivate String jarCPath;" ], "header": "@@ -137,6 +142,10 @@ public class BaseDataFileFactory", "removed": [] }, { "added": [ "\t\t", "\t\tjvmVersion = buildJvmVersion();", "\t\t", "\t\tjarCPath = jarClassPath(getClass());" ], "header": "@@ -261,6 +270,10 @@ public class BaseDataFileFactory", "removed": [] }, { "added": [ " (Object) this.getClass().getClassLoader(),", " dataDirectory + \" \" + readOnlyMsg ", " +\"\\nLoaded from \" + jarCPath + \"\\n\" +jvmVersion" ], "header": "@@ -360,9 +373,10 @@ public class BaseDataFileFactory", "removed": [ " dataDirectory + \" \" + readOnlyMsg,", " (Object) this.getClass().getClassLoader()" ] } ] } ]
derby-DERBY-4717-811c29d0
DERBY-4717: Driver trace file isn't closed/released on physical connection close when specified with the traceFile attribute/setter Made Derby close the trace file specified as 'traceFile' (data source setter or as a connection URL attribute) when the physical connection is closed. Patch file: derby-4717-1b.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@959550 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/jdbc/ClientBaseDataSource.java", "hunks": [ { "added": [ " if (printWriter != logWriter &&", " (traceDirectory != null || traceFile != null))" ], "header": "@@ -621,7 +621,8 @@ public abstract class ClientBaseDataSource implements Serializable, Referenceabl", "removed": [ " if (printWriter != logWriter && traceDirectory != null)" ] } ] } ]
derby-DERBY-4723-19311603
DERBY-4723: Using an instance lock to protect static shared data in EmbedPooledConnection Removed code using incorrect syncronization, as it was used for tracing only and the hashCode() output should suffice for that. Patch file: derby-4723-1a-remove_code.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@980089 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/jdbc/EmbedPooledConnection.java", "hunks": [ { "added": [], "header": "@@ -64,13 +64,6 @@ import javax.sql.ConnectionEvent;", "removed": [ "", " /** Static counter for connection ids */", " private static int idCounter = 0;", " ", " /** The id for this connection. */", " private int connectionId;", " " ] }, { "added": [], "header": "@@ -105,15 +98,8 @@ class EmbedPooledConnection implements javax.sql.PooledConnection, BrokeredConne", "removed": [ " private synchronized int nextId()", " {", " return idCounter++;", " }", "", " connectionId = nextId();", "" ] }, { "added": [], "header": "@@ -560,7 +546,6 @@ class EmbedPooledConnection implements javax.sql.PooledConnection, BrokeredConne", "removed": [ " \"(ID = \" + connectionId + \"), \" +" ] } ] } ]
derby-DERBY-4729-eaa0d8cf
DERBY-4729 add more information to the XACT_PROTOCOL_VIOLATION returned from sto re. When this error is reported it is often hard to reproduce, so adding printing of internal state of the Transaction when it is hit. Hopefully this help in understanding what is going on when the error is hit. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1471079 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/store/raw/xact/Xact.java", "hunks": [ { "added": [ " SQLState.XACT_PROTOCOL_VIOLATION_DETAILED, ", " toInternalDetailString());" ], "header": "@@ -757,7 +757,8 @@ public class Xact extends RawTransaction implements Limit, LockOwner {", "removed": [ " SQLState.XACT_PROTOCOL_VIOLATION);" ] }, { "added": [ " SQLState.XACT_PROTOCOL_VIOLATION_DETAILED, ", " toInternalDetailString());" ], "header": "@@ -929,7 +930,8 @@ public class Xact extends RawTransaction implements Limit, LockOwner {", "removed": [ " SQLState.XACT_PROTOCOL_VIOLATION);" ] }, { "added": [ " SQLState.XACT_PROTOCOL_VIOLATION_DETAILED, ", " toInternalDetailString());" ], "header": "@@ -1041,7 +1043,8 @@ public class Xact extends RawTransaction implements Limit, LockOwner {", "removed": [ " SQLState.XACT_PROTOCOL_VIOLATION);" ] }, { "added": [ " SQLState.XACT_PROTOCOL_VIOLATION_DETAILED, ", " toInternalDetailString());" ], "header": "@@ -1761,7 +1764,8 @@ public class Xact extends RawTransaction implements Limit, LockOwner {", "removed": [ " SQLState.XACT_PROTOCOL_VIOLATION);" ] }, { "added": [ " SQLState.XACT_PROTOCOL_VIOLATION_DETAILED, ", " toInternalDetailString());" ], "header": "@@ -1821,7 +1825,8 @@ public class Xact extends RawTransaction implements Limit, LockOwner {", "removed": [ " SQLState.XACT_PROTOCOL_VIOLATION);" ] }, { "added": [ " SQLState.XACT_PROTOCOL_VIOLATION_DETAILED, ", " toInternalDetailString());" ], "header": "@@ -1880,7 +1885,8 @@ public class Xact extends RawTransaction implements Limit, LockOwner {", "removed": [ " SQLState.XACT_PROTOCOL_VIOLATION);" ] }, { "added": [ " SQLState.XACT_PROTOCOL_VIOLATION_DETAILED, ", " toInternalDetailString());" ], "header": "@@ -2575,7 +2581,8 @@ public class Xact extends RawTransaction implements Limit, LockOwner {", "removed": [ " SQLState.XACT_PROTOCOL_VIOLATION);" ] }, { "added": [ " SQLState.XACT_PROTOCOL_VIOLATION_DETAILED, ", " toInternalDetailString());" ], "header": "@@ -2587,7 +2594,8 @@ public class Xact extends RawTransaction implements Limit, LockOwner {", "removed": [ " SQLState.XACT_PROTOCOL_VIOLATION);" ] } ] }, { "file": "java/shared/org/apache/derby/shared/common/reference/SQLState.java", "hunks": [ { "added": [ "\tString XACT_PROTOCOL_VIOLATION_DETAILED = \"40XT8\";" ], "header": "@@ -453,6 +453,7 @@ public interface SQLState {", "removed": [] } ] } ]
derby-DERBY-4730-1292a7f9
DERBY-4730: Add BOOLEAN to list of datatypes returned by DatabaseMetaData.getTypeInfo(). git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@961497 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4731-2c70a31f
DERBY-4731 XA two phase commit with active GLOBAL TEMPORARY TABLE causes An internal error identified by RawStore module For temp tables declared as following derby needs to do special work at the time of commit to arrange for the temporary table to have no rows once the commit completes.: DECLARE GLOBAL TEMPORARY TABLE SESSION.T1 ( XWSID INT) ON COMMIT DELETE ROWS NOT LOGGED ON ROLLBACK DELETE ROWS Derby implements these temporary tables as unlogged real internal tables with backing containers on disk. The ddl is all in memory so that they are only seen by the current session. On commit the underlying container is dropped and a new empty container is created. This all works fine except in the XA case. In this case the transaction has done real updates (temp table updates are unlogged and thus not seen as update operations from the XA point of view), then the transaction executes an XA prepare followed by an XA commit. No update transactions are allowed between the prepare and the commit. The problem is that the pre-commit work done for the temp tables was executing updates on the internal containers (dropping and createing new ones), and raw store identified this as an XA protocol violation. Since the work is only on internal non XA transaction related updates it is ok to do these between the prepare and commit. The fix arranges for this work to be done in a nested updatable user transaction when in an XA transaction. It is ok to commit this work independently from the parent user transaction because for XA because it does the right thing in both possible cases: 1) If the XA transaction commits successfully then the same work has been done. Because of where it is done in the code, the committing user can never get access to the global temp tables between the time the nested xact commits and the XA transaction commits. 2) If the XA transaction fails to commit somehow, then I think one of two things will happen: a) the session will go away, and then it does not matter what happens to the session life objects. b) the transaction will rollback, and on commit work which deleted the rows is the same work that needs to get done on rollback. The only locks the nested transaction gets is created new containers, so there should be no problem with lock contention of the work with either the parent transaction, or any other transactions. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@961511 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/conn/GenericLanguageConnectionContext.java", "hunks": [ { "added": [ " * Do the necessary work at commit time for temporary tables", " * <p>", " * 3)After savepoint fix up, then handle all ON COMMIT DELETE ROWS with", " * no open held cursor temp tables.", " * <p>", " *", " * @param in_xa_transaction if true, then transaction is an XA transaction,", " * and special nested transaction may be necessary", " * to cleanup internal containers supporting the", " * temp tables at commit time.", " *", " * @exception StandardException Standard exception policy.", " **/", " private void tempTablesAndCommit(boolean in_xa_transaction) ", " throws StandardException", " // loop through all declared global temporary tables and determine", " // what to do at commit time based on if they were dropped during", " // the current savepoint level." ], "header": "@@ -664,14 +664,29 @@ public class GenericLanguageConnectionContext", "removed": [ " * do the necessary work at commit time for temporary tables", " */", " private void tempTablesAndCommit() " ] }, { "added": [ "", " // at commit time, for all the temp tables declared with ", " // ON COMMIT DELETE ROWS, make sure there are no held cursor open", " // on them.", " // If there are no held cursors open on ON COMMIT DELETE ROWS, ", " // drop those temp tables and redeclare them to get rid of all the ", " // data in them", "", " // in XA use nested user updatable transaction. Delay creating", " // the transaction until loop below finds one it needs to ", " // process.", " TransactionController xa_tran = null; ", " TransactionController tran_for_drop = ", " (in_xa_transaction ? null : getTransactionExecute());", "", " try", " {", " for (int i=0; i<allDeclaredGlobalTempTables.size(); i++)", " {", " TableDescriptor td = ", " ((TempTableInfo) (allDeclaredGlobalTempTables.", " get(i))).getTableDescriptor();", " if (td.isOnCommitDeleteRows() == false) ", " {", " // do nothing for temp table with ON COMMIT PRESERVE ROWS", " continue;", " }", " else if (checkIfAnyActivationHasHoldCursor(td.getName()) == ", " false)", " {", " // temp tables with ON COMMIT DELETE ROWS and ", " // no open held cursors", " getDataDictionary().getDependencyManager().invalidateFor(", " td, DependencyManager.DROP_TABLE, this);", "", " // handle delayed creation of nested xact for XA.", " if (in_xa_transaction)", " {", " if (xa_tran == null)", " {", " xa_tran = ", " getTransactionExecute().", " startNestedUserTransaction(false);", " tran_for_drop = xa_tran;", " }", " }", "", " cleanupTempTableOnCommitOrRollback(tran_for_drop, td, true);", " }", " }", " }", " finally", " {", " // if we created a nested user transaction for XA get rid of it.", " if (xa_tran != null)", " {", " xa_tran.destroy();", " }", " }" ], "header": "@@ -694,6 +709,65 @@ public class GenericLanguageConnectionContext", "removed": [] }, { "added": [ " td = cleanupTempTableOnCommitOrRollback(", " getTransactionExecute(), td, false);" ], "header": "@@ -870,7 +944,8 @@ public class GenericLanguageConnectionContext", "removed": [ " td = cleanupTempTableOnCommitOrRollback(td, false);" ] }, { "added": [ " cleanupTempTableOnCommitOrRollback(", " getTransactionExecute(), td, true);" ], "header": "@@ -901,7 +976,8 @@ public class GenericLanguageConnectionContext", "removed": [ " cleanupTempTableOnCommitOrRollback(td, true);" ] }, { "added": [ "", " throw StandardException.newException(", " SQLState.LANG_NO_COMMIT_IN_NESTED_CONNECTION);" ], "header": "@@ -1357,12 +1433,14 @@ public class GenericLanguageConnectionContext", "removed": [ " throw StandardException.newException(SQLState.LANG_NO_COMMIT_IN_NESTED_CONNECTION);" ] }, { "added": [ " // Do clean up work required for temporary tables at commit time. ", " tempTablesAndCommit(commitflag != NON_XA);" ], "header": "@@ -1389,50 +1467,12 @@ public class GenericLanguageConnectionContext", "removed": [ " // do the clean up work required for temporary tables at the commit ", " // time. This cleanup work can possibly remove entries from ", " // allDeclaredGlobalTempTables and that's why we need to check", " // again later to see if we there are still any entries in ", " // allDeclaredGlobalTempTables", " tempTablesAndCommit();", "", " // at commit time, for all the temp tables declared with ", " // ON COMMIT DELETE ROWS, make sure there are no held cursor open", " // on them.", " // If there are no held cursors open on ON COMMIT DELETE ROWS, ", " // drop those temp tables and redeclare them to get rid of all the ", " // data in them", "", " if (allDeclaredGlobalTempTables != null) ", " {", " for (int i=0; i<allDeclaredGlobalTempTables.size(); i++)", " {", " TableDescriptor td = ", " ((TempTableInfo)", " (allDeclaredGlobalTempTables.get(i))).getTableDescriptor();", " if (td.isOnCommitDeleteRows() == false) ", " {", " //do nothing for temp table with ON COMMIT PRESERVE ROWS", " continue;", " }", "", " if (checkIfAnyActivationHasHoldCursor(td.getName()) == ", " false)", " {", " // temp tables with ON COMMIT DELETE ROWS and ", " // no open held cursors", " getDataDictionary().getDependencyManager().invalidateFor(", " td, DependencyManager.DROP_TABLE, this);", "", " cleanupTempTableOnCommitOrRollback(td, true);", " }", " }", " }", "" ] }, { "added": [ " TransactionController tc,", " TableDescriptor td, ", " boolean dropAndRedeclare)", " tc.createConglomerate(" ], "header": "@@ -1505,14 +1545,15 @@ public class GenericLanguageConnectionContext", "removed": [ " TableDescriptor td, ", " boolean dropAndRedeclare)", " tran.createConglomerate(" ] } ] } ]
derby-DERBY-4731-2d08e7af
DERBY-4731 Previous change did not completely solve the problem. There is a problem with the initial fix, not sure why it only showed an error in the 10.3 backport. The temporary tables are stored in the transaction context, so using a nested transaction creates a new context and the work there is not reflected in the parent context. Thus the previous fix which used a nested transaction does not work. This patch delays the work on the XA global temporary tables until after the xa transaction has committed but before control returns to the client executing the commit. The delay solves the issue of attempting to do work in a prepared transaction. It drops all global temporary tables following an XA commit. This gives us consistent behavior between embedded and network server implementations. At the time of an xa end the system "Ends the work performed on behalf of a transaction branch. The resource manager disassociates the XA resource from the transaction branch specified and lets the transaction complete." Given this description of the behavior of XAResource it seems reasonable to document that global temporary tables are not supported across the XA commit boundary. In the worst case one might be connecting to an resource in completely another database and the temp table implementation is tied to the transaction context in a single connection to a existing server. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@965317 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/conn/GenericLanguageConnectionContext.java", "hunks": [ { "added": [ " ", " for (int i=0; i<allDeclaredGlobalTempTables.size(); i++)", " TableDescriptor td = ", " ((TempTableInfo) (allDeclaredGlobalTempTables.", " get(i))).getTableDescriptor();", " if (td.isOnCommitDeleteRows() == false) ", " // do nothing for temp table with ON COMMIT PRESERVE ROWS", " continue;", " }", " else if (checkIfAnyActivationHasHoldCursor(td.getName()) == ", " false)", " {", " // temp tables with ON COMMIT DELETE ROWS and ", " // no open held cursors", " getDataDictionary().getDependencyManager().invalidateFor(", " td, DependencyManager.DROP_TABLE, this);", " if (!in_xa_transaction)", " {", " // delay physical cleanup to after the commit for XA", " // transactions. In XA the transaction is likely in", " // prepare state at this point and physical changes to", " // store are not allowed until after the commit.", " // Do the work here for non-XA so that fast path does", " // have to do the 2 commits that the XA path will.", " cleanupTempTableOnCommitOrRollback(td, true);", " }", "", " private void tempTablesXApostCommit() ", " throws StandardException", " {", " TransactionController tc = getTransactionExecute();", "", " // at commit time for an XA transaction drop all temporary tables.", " // A transaction context may not be maintained from one", " // XAResource.xa_commit to the next in the case of XA with", " // network server and thus there is no way to get at the temp", " // tables again. To provide consistent behavior in embedded vs", " // network server, consistently remove temp tables at XA commit", " // transaction boundary.", " for (int i=0; i < allDeclaredGlobalTempTables.size(); i++)", " // remove all temp tables from this context.", " TableDescriptor td = ", " ((TempTableInfo) ", " (allDeclaredGlobalTempTables.get(i))).getTableDescriptor();", "", " //remove the conglomerate created for this temp table", " tc.dropConglomerate(td.getHeapConglomerateId()); ", "", " //remove it from the list of temp tables", " allDeclaredGlobalTempTables.remove(i); ", "", " tc.commit();" ], "header": "@@ -720,54 +720,66 @@ public class GenericLanguageConnectionContext", "removed": [ " TransactionController xa_tran = null; ", " TransactionController tran_for_drop = ", " (in_xa_transaction ? null : getTransactionExecute());", "", " try", " for (int i=0; i<allDeclaredGlobalTempTables.size(); i++)", " TableDescriptor td = ", " ((TempTableInfo) (allDeclaredGlobalTempTables.", " get(i))).getTableDescriptor();", " if (td.isOnCommitDeleteRows() == false) ", " {", " // do nothing for temp table with ON COMMIT PRESERVE ROWS", " continue;", " }", " else if (checkIfAnyActivationHasHoldCursor(td.getName()) == ", " false)", " {", " // temp tables with ON COMMIT DELETE ROWS and ", " // no open held cursors", " getDataDictionary().getDependencyManager().invalidateFor(", " td, DependencyManager.DROP_TABLE, this);", "", " // handle delayed creation of nested xact for XA.", " if (in_xa_transaction)", " {", " if (xa_tran == null)", " {", " xa_tran = ", " getTransactionExecute().", " startNestedUserTransaction(false);", " tran_for_drop = xa_tran;", " }", " }", " cleanupTempTableOnCommitOrRollback(tran_for_drop, td, true);", " finally", " // if we created a nested user transaction for XA get rid of it.", " if (xa_tran != null)", " {", " xa_tran.destroy();", " }" ] }, { "added": [ " td = cleanupTempTableOnCommitOrRollback(td, false);" ], "header": "@@ -944,8 +956,7 @@ public class GenericLanguageConnectionContext", "removed": [ " td = cleanupTempTableOnCommitOrRollback(", " getTransactionExecute(), td, false);" ] }, { "added": [ " cleanupTempTableOnCommitOrRollback(td, true);" ], "header": "@@ -976,8 +987,7 @@ public class GenericLanguageConnectionContext", "removed": [ " cleanupTempTableOnCommitOrRollback(", " getTransactionExecute(), td, true);" ] }, { "added": [ "", " // Do post commit XA temp table cleanup if necessary.", " if ((allDeclaredGlobalTempTables != null) &&", " (commitflag != NON_XA))", " {", " tempTablesXApostCommit();", " }" ], "header": "@@ -1530,6 +1540,13 @@ public class GenericLanguageConnectionContext", "removed": [] }, { "added": [ " TransactionController tc = getTransactionExecute();", "" ], "header": "@@ -1545,11 +1562,12 @@ public class GenericLanguageConnectionContext", "removed": [ " TransactionController tc," ] } ] } ]
derby-DERBY-4731-3bb8c054
DERBY-4731 XA two phase commit with active GLOBAL TEMPORARY TABLE causes An internal error identified by RawStore module just checking in fixtures for the RawStore error and the the Assert case to XATest. The fixtures are xtestXATempTableD4731_RawStore() and xtestXATempTableD4731_Assert and can be enabled by removing the 'x' in front. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@960136 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4731-6819a865
DERBY-4743 Attempt to access a DECLARE GLOBAL TEMPORARY TABLE after commit in an XA transaction in Network server fails. The fix for DERBY-4731 made the behavior consistent across embedded and network server. Derby does not support accessing global temporary tables declared in an XA transaction subsequent to the commit of the transaction. This change updates the junit XATest to verify that accessing the table after the commit fails as expected in both the embedded and network server case. In both cases you will get the generic table does not exist error. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@966027 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4732-52694e5d
DERBY-4732: Release system resources in CanonTestCase thoroughly Added finally block to clean up resources. Contributed by Yun Lee (yun dot lee dot bj at gmail dot com). Patch file: derby-4732.patch git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@963716 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4735-452d4674
DERBY-4735 prepare of an XA read only transaction with a declared global temporary table gets and ASSERT in SANE mode. Fixing test case in the junit XATtest.java test. Changed it to not try and do an xa commit after preparing a read only transaction. The test case still gives an ASSERT, and in my run caused a subsequent error in the testDerby966 test, which I assume came from bad error processing of the ASSERT. To enable test just remove the x from "public void xtestXATempTableD4735_Assert()" git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@966039 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4741-2c09e2c0
DERBY-4741 Make embedded Derby work reliably in the presence of thread interrupts Patch derby-4741-testBatchInterrupt-b: It tests that an interrupt will stop a batch of statements by throwing 08000 just before we execute the next statement in the batch. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1066707 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4741-3a6f4cf9
DERBY-4741 Make Derby work reliably in the presence of thread interrupts Patch derby-4741-a-04-api-interruptstatus. This patch contains the new helper class InterruptStatus and inserts calls to restoreIntrFlagIfSeen in before API methods' return and in the exception handling (TransactionResourceImpl#handleException). In addition, execution of EmbedStatement#executeBatch checks for interrupts between each statement in the batch and throws the exisisting 08000 (CONN_INTERRUPT - session level severity) error if interrupts are seen. Note: Still, the machinery of InterruptStatus isn't really used to save any interrupts, that follows in (a) later patch(es), so this patch doesn't change behavior. The focus here is on the correct placement of calls to restoreIntrFlagIfSeen in the API. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1030630 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/jdbc/ConnectionChild.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.InterruptStatus;" ], "header": "@@ -22,6 +22,7 @@", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedBlob.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;", "import org.apache.derby.iapi.util.InterruptStatus;" ], "header": "@@ -30,6 +30,8 @@ import org.apache.derby.iapi.types.DataValueDescriptor;", "removed": [] }, { "added": [ " EmbedConnection ec = getEmbedConnection();", " pushStack = !ec.isClosed();" ], "header": "@@ -393,7 +395,8 @@ final class EmbedBlob extends ConnectionChild implements Blob, EngineLOB", "removed": [ " pushStack = !getEmbedConnection().isClosed();" ] }, { "added": [ "", " restoreIntrFlagIfSeen(pushStack, ec);", "" ], "header": "@@ -409,6 +412,9 @@ final class EmbedBlob extends ConnectionChild implements Blob, EngineLOB", "removed": [] }, { "added": [ " if (sz == -1) {", " InterruptStatus.restoreIntrFlagIfSeen();", " }", " InterruptStatus.restoreIntrFlagIfSeen();", " EmbedConnection ec = getEmbedConnection();", " pushStack = !ec.isClosed();" ], "header": "@@ -469,19 +475,23 @@ final class EmbedBlob extends ConnectionChild implements Blob, EngineLOB", "removed": [ " if (sz == -1)", " pushStack = !getEmbedConnection().isClosed();" ] }, { "added": [ "", " restoreIntrFlagIfSeen(pushStack, ec);", "", "", " restoreIntrFlagIfSeen(pushStack,ec);", "", "" ], "header": "@@ -501,10 +511,17 @@ final class EmbedBlob extends ConnectionChild implements Blob, EngineLOB", "removed": [] }, { "added": [ " java.io.InputStream result = control.getInputStream(0);", " return result;" ], "header": "@@ -547,7 +564,8 @@ final class EmbedBlob extends ConnectionChild implements Blob, EngineLOB", "removed": [ " return control.getInputStream(0);" ] }, { "added": [ " EmbedConnection ec = getEmbedConnection();", " pushStack = !ec.isClosed();", " UpdatableBlobStream result = new UpdatableBlobStream(", " this,", " new AutoPositioningStream (this, myStream, this));", "", " restoreIntrFlagIfSeen(pushStack, ec);", "", " return result;" ], "header": "@@ -555,15 +573,21 @@ final class EmbedBlob extends ConnectionChild implements Blob, EngineLOB", "removed": [ " pushStack = !getEmbedConnection().isClosed();", " return new UpdatableBlobStream (this, ", " new AutoPositioningStream (this, myStream, this));" ] }, { "added": [ "" ], "header": "@@ -600,6 +624,7 @@ final class EmbedBlob extends ConnectionChild implements Blob, EngineLOB", "removed": [] }, { "added": [ " EmbedConnection ec = getEmbedConnection();", "", " pushStack = !ec.isClosed();" ], "header": "@@ -612,7 +637,9 @@ final class EmbedBlob extends ConnectionChild implements Blob, EngineLOB", "removed": [ " pushStack = !getEmbedConnection().isClosed();" ] }, { "added": [ " if (c == -1) { // run out of stream", " restoreIntrFlagIfSeen(pushStack, ec);", " }", " if (checkMatch(pattern, pos)) {", " restoreIntrFlagIfSeen(pushStack, ec);", " } else" ], "header": "@@ -624,14 +651,17 @@ final class EmbedBlob extends ConnectionChild implements Blob, EngineLOB", "removed": [ " if (c == -1) // run out of stream", " if (checkMatch(pattern, pos))", " else" ] }, { "added": [ "", " EmbedConnection ec = getEmbedConnection();", "", " pushStack = !ec.isClosed();" ], "header": "@@ -704,9 +734,12 @@ final class EmbedBlob extends ConnectionChild implements Blob, EngineLOB", "removed": [ " pushStack = !getEmbedConnection().isClosed();" ] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedClob.java", "hunks": [ { "added": [ " EmbedConnection ec = getEmbedConnection();", " pushStack = !ec.isClosed();" ], "header": "@@ -329,13 +329,14 @@ final class EmbedClob extends ConnectionChild implements Clob, EngineLOB", "removed": [ " pushStack = !getEmbedConnection().isClosed();" ] }, { "added": [ " if (readCount == -1) {", " restoreIntrFlagIfSeen(pushStack, ec);", " }" ], "header": "@@ -347,8 +348,10 @@ final class EmbedClob extends ConnectionChild implements Clob, EngineLOB", "removed": [ " if (readCount == -1)" ] }, { "added": [ " restoreIntrFlagIfSeen(pushStack, ec);" ], "header": "@@ -362,6 +365,7 @@ final class EmbedClob extends ConnectionChild implements Clob, EngineLOB", "removed": [] }, { "added": [ " restoreIntrFlagIfSeen(pushStack, ec);", " restoreIntrFlagIfSeen(pushStack, ec);" ], "header": "@@ -399,9 +403,11 @@ final class EmbedClob extends ConnectionChild implements Clob, EngineLOB", "removed": [] }, { "added": [ " EmbedConnection ec = getEmbedConnection();" ], "header": "@@ -437,6 +443,7 @@ final class EmbedClob extends ConnectionChild implements Clob, EngineLOB", "removed": [] }, { "added": [ " if (!seenOneCharacter) {", " restoreIntrFlagIfSeen(pushStack, ec);", " }", "", " restoreIntrFlagIfSeen(pushStack, ec);" ], "header": "@@ -455,9 +462,13 @@ restartScan:", "removed": [ " if (!seenOneCharacter)" ] }, { "added": [ " if (firstPosition == -1) {", " restoreIntrFlagIfSeen(pushStack, ec);", " }" ], "header": "@@ -470,8 +481,10 @@ restartScan:", "removed": [ " if (firstPosition == -1)" ] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedConnection.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.InterruptStatus;" ], "header": "@@ -83,6 +83,7 @@ import java.util.Properties;", "removed": [] }, { "added": [ " InterruptStatus.restoreIntrFlagIfSeen(getLanguageConnection());", " InterruptStatus.restoreIntrFlagIfSeen();" ], "header": "@@ -600,10 +601,12 @@ public abstract class EmbedConnection implements EngineConnection", "removed": [] }, { "added": [ " InterruptStatus.restoreIntrFlagIfSeen();", "" ], "header": "@@ -617,6 +620,8 @@ public abstract class EmbedConnection implements EngineConnection", "removed": [] }, { "added": [ " LanguageConnectionContext lcc = tr.getLcc();" ], "header": "@@ -1242,7 +1247,7 @@ public abstract class EmbedConnection implements EngineConnection", "removed": [ "\t\t\tLanguageConnectionContext lcc = tr.getLcc();" ] }, { "added": [ " InterruptStatus.restoreIntrFlagIfSeen(lcc);" ], "header": "@@ -1266,6 +1271,7 @@ public abstract class EmbedConnection implements EngineConnection", "removed": [] }, { "added": [ " InterruptStatus.restoreIntrFlagIfSeen(getLanguageConnection());" ], "header": "@@ -1792,6 +1798,7 @@ public abstract class EmbedConnection implements EngineConnection", "removed": [] }, { "added": [ " InterruptStatus.restoreIntrFlagIfSeen(getLanguageConnection());" ], "header": "@@ -1828,6 +1835,7 @@ public abstract class EmbedConnection implements EngineConnection", "removed": [] }, { "added": [ " InterruptStatus.", " restoreIntrFlagIfSeen(tr.getLcc());" ], "header": "@@ -1895,6 +1903,8 @@ public abstract class EmbedConnection implements EngineConnection", "removed": [] }, { "added": [ " InterruptStatus.restoreIntrFlagIfSeen();" ], "header": "@@ -1913,6 +1923,7 @@ public abstract class EmbedConnection implements EngineConnection", "removed": [] }, { "added": [ " LanguageConnectionContext lcc = getLanguageConnection();", " lcc.setReadOnly(readOnly);", " InterruptStatus.restoreIntrFlagIfSeen(lcc);" ], "header": "@@ -2012,7 +2023,9 @@ public abstract class EmbedConnection implements EngineConnection", "removed": [ "\t\t\t\tgetLanguageConnection().setReadOnly(readOnly);" ] }, { "added": [ " LanguageConnectionContext lcc = getLanguageConnection();", " lcc.setIsolationLevel(iLevel);", " InterruptStatus.restoreIntrFlagIfSeen(lcc);" ], "header": "@@ -2106,7 +2119,9 @@ public abstract class EmbedConnection implements EngineConnection", "removed": [ "\t\t\t\tgetLanguageConnection().setIsolationLevel(iLevel);" ] }, { "added": [ " InterruptStatus.restoreIntrFlagIfSeen(getLanguageConnection());" ], "header": "@@ -2392,6 +2407,7 @@ public abstract class EmbedConnection implements EngineConnection", "removed": [] }, { "added": [ " InterruptStatus.restoreIntrFlagIfSeen(getLanguageConnection());" ], "header": "@@ -2424,6 +2440,7 @@ public abstract class EmbedConnection implements EngineConnection", "removed": [] }, { "added": [ "" ], "header": "@@ -2513,6 +2530,7 @@ public abstract class EmbedConnection implements EngineConnection", "removed": [] }, { "added": [ " // Restore here, cf. comment in", " // EmbedDatabaseMetaData#getPreparedQuery:", " InterruptStatus.", " restoreIntrFlagIfSeen(getLanguageConnection());" ], "header": "@@ -2735,6 +2753,10 @@ public abstract class EmbedConnection implements EngineConnection", "removed": [] }, { "added": [ " LanguageConnectionContext lcc = getLanguageConnection();", " lcc.resetFromPool();", " InterruptStatus.restoreIntrFlagIfSeen(lcc);" ], "header": "@@ -2839,7 +2861,9 @@ public abstract class EmbedConnection implements EngineConnection", "removed": [ "\t\t\t\tgetLanguageConnection().resetFromPool();" ] }, { "added": [ " LanguageConnectionContext lcc = getLanguageConnection();", " (XATransactionController)lcc.getTransactionExecute();" ], "header": "@@ -2875,8 +2899,9 @@ public abstract class EmbedConnection implements EngineConnection", "removed": [ "\t\t\t\t\t(XATransactionController) getLanguageConnection().getTransactionExecute();" ] }, { "added": [ " lcc.internalCommit(false /* don't commitStore again */);", " InterruptStatus.restoreIntrFlagIfSeen(lcc);" ], "header": "@@ -2891,8 +2916,9 @@ public abstract class EmbedConnection implements EngineConnection", "removed": [ "\t\t\t\t\tgetLanguageConnection().internalCommit(false /* don't commitStore again */);" ] }, { "added": [ " LanguageConnectionContext lcc = getLanguageConnection();", " lcc.xaCommit(onePhase);", " InterruptStatus.restoreIntrFlagIfSeen(lcc);" ], "header": "@@ -2918,7 +2944,9 @@ public abstract class EmbedConnection implements EngineConnection", "removed": [ "\t\t \tgetLanguageConnection().xaCommit(onePhase);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedDatabaseMetaData.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.InterruptStatus;" ], "header": "@@ -51,6 +51,7 @@ import java.sql.Types;", "removed": [] }, { "added": [ " LanguageConnectionContext lcc = getLanguageConnectionContext();", " lcc.getDataDictionary().checkVersion(", " InterruptStatus.restoreIntrFlagIfSeen();" ], "header": "@@ -2311,10 +2312,12 @@ public class EmbedDatabaseMetaData extends ConnectionChild", "removed": [ "\t\t\t\tgetLanguageConnectionContext().getDataDictionary().checkVersion(" ] }, { "added": [ " InterruptStatus.", " restoreIntrFlagIfSeen(getLanguageConnectionContext());" ], "header": "@@ -3497,6 +3500,8 @@ public class EmbedDatabaseMetaData extends ConnectionChild", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedPreparedStatement.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.InterruptStatus;" ], "header": "@@ -66,6 +66,7 @@ import org.apache.derby.iapi.jdbc.EngineParameterMetaData;", "removed": [] }, { "added": [ " InterruptStatus.restoreIntrFlagIfSeen(lcc);", " } catch (Throwable t) {", " throw handleException(t);", " }", "" ], "header": "@@ -144,9 +145,11 @@ public abstract class EmbedPreparedStatement", "removed": [ "\t\t\t} catch (Throwable t) {", "\t\t throw handleException(t);", "\t\t\t}" ] }, { "added": [ "", " InterruptStatus.restoreIntrFlagIfSeen();" ], "header": "@@ -251,6 +254,8 @@ public abstract class EmbedPreparedStatement", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedResultSet.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.InterruptStatus;" ], "header": "@@ -80,6 +80,7 @@ import org.apache.derby.iapi.jdbc.CharacterStreamDescriptor;", "removed": [] }, { "added": [ " InterruptStatus.restoreIntrFlagIfSeen(lcc);" ], "header": "@@ -459,7 +460,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\t\t\t" ] }, { "added": [ " LanguageConnectionContext lcc =", " getEmbedConnection().getLanguageConnection();", "" ], "header": "@@ -571,6 +572,9 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "", " InterruptStatus.restoreIntrFlagIfSeen(lcc);" ], "header": "@@ -579,7 +583,8 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\t\t\t " ] }, { "added": [], "header": "@@ -597,7 +602,6 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\t\t\t\tLanguageConnectionContext lcc = getEmbedConnection().getLanguageConnection();" ] }, { "added": [ " InterruptStatus.restoreIntrFlagIfSeen(lcc);" ], "header": "@@ -3657,6 +3661,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " InterruptStatus.restoreIntrFlagIfSeen(lcc);" ], "header": "@@ -3757,6 +3762,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " InterruptStatus.restoreIntrFlagIfSeen(lcc);" ], "header": "@@ -3834,6 +3840,7 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " InterruptStatus.restoreIntrFlagIfSeen(", " getEmbedConnection().getLanguageConnection());" ], "header": "@@ -3940,6 +3947,8 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ "", " InterruptStatus.restoreIntrFlagIfSeen();", "" ], "header": "@@ -3973,6 +3982,9 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [] }, { "added": [ " EmbedConnection ec = getEmbedConnection();", " if (wasNull = dvd.isNull()) {", " InterruptStatus.restoreIntrFlagIfSeen();", " }" ], "header": "@@ -4009,9 +4021,12 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\t\t\tif (wasNull = dvd.isNull())" ] }, { "added": [ " EmbedBlob result = new EmbedBlob(dvd, ec);", " restoreIntrFlagIfSeen(pushStack, ec);", " return result;" ], "header": "@@ -4022,7 +4037,9 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\t\t\treturn new EmbedBlob(dvd, getEmbedConnection());" ] }, { "added": [ " EmbedConnection ec = getEmbedConnection();", " LanguageConnectionContext lcc = ec.getLanguageConnection();", " if (wasNull = dvd.isNull()) {", " InterruptStatus.restoreIntrFlagIfSeen();", " }" ], "header": "@@ -4060,12 +4077,16 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\t\t\tif (wasNull = dvd.isNull())" ] }, { "added": [ " EmbedClob result = new EmbedClob(ec, dvd);", " restoreIntrFlagIfSeen(pushStack, ec);", " return result;" ], "header": "@@ -4074,7 +4095,9 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ " return new EmbedClob(getEmbedConnection(), dvd);" ] }, { "added": [ "", " LanguageConnectionContext lcc =", " getEmbedConnection().getLanguageConnection();", "", " try {" ], "header": "@@ -4511,7 +4534,11 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\t\ttry {" ] }, { "added": [], "header": "@@ -4519,8 +4546,6 @@ public abstract class EmbedResultSet extends ConnectionChild", "removed": [ "\t\t\t\t\tLanguageConnectionContext lcc = getEmbedConnection()", "\t\t\t\t\t\t\t.getLanguageConnection();" ] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/EmbedStatement.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.InterruptStatus;" ], "header": "@@ -37,6 +37,7 @@ import java.sql.SQLException;", "removed": [] }, { "added": [ " InterruptStatus.restoreIntrFlagIfSeen(lcc);" ], "header": "@@ -610,6 +611,7 @@ public class EmbedStatement extends ConnectionChild", "removed": [] }, { "added": [ " // If we saw an interrupt, stop execution of batch now.", " // throwIf will likely only throw after at least one stm", " // has been executed, since first time around we probably", " // didn't do anything to notice interrupts yet.", " InterruptStatus.throwIf(lcc);", "", " InterruptStatus.restoreIntrFlagIfSeen(lcc);" ], "header": "@@ -971,10 +973,17 @@ public class EmbedStatement extends ConnectionChild", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/jdbc/TransactionResourceImpl.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.InterruptStatus;" ], "header": "@@ -39,6 +39,7 @@ import org.apache.derby.iapi.reference.SQLState;", "removed": [] }, { "added": [ " InterruptStatus.restoreIntrFlagIfSeen();" ], "header": "@@ -304,6 +305,7 @@ public final class TransactionResourceImpl", "removed": [] }, { "added": [ " InterruptStatus.restoreIntrFlagIfSeen();" ], "header": "@@ -341,7 +343,7 @@ public final class TransactionResourceImpl", "removed": [ "" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/conn/GenericLanguageConnectionContext.java", "hunks": [ { "added": [ "", " /**", " * Interrupt status flag of this session's thread, in the form of an", " * exception created where an interrupt was (last) detected during operation,", " * null if no interrupt has been seen.", " */", " private StandardException interruptedException;" ], "header": "@@ -295,6 +295,13 @@ public class GenericLanguageConnectionContext", "removed": [] }, { "added": [ " interruptedException = null;" ], "header": "@@ -382,6 +389,7 @@ public class GenericLanguageConnectionContext", "removed": [] }, { "added": [ " public void setInterruptedException(StandardException e) {", " interruptedException = e;", " }", "", " public StandardException getInterruptedException() {", " return interruptedException;", " }" ], "header": "@@ -3860,4 +3868,11 @@ public class GenericLanguageConnectionContext", "removed": [] } ] } ]
derby-DERBY-4741-3abf75fd
DERBY-4741 Make embedded Derby work reliably in the presence of thread interrupts Patch derby-4741-testQueryInterrupt. * adds a new test case: InterruptResilienceTest#testLongQueryInterrupt which tests that a query will check for the interrupt flag and throw 08000 (CONN_INTERRUPT) at the same time is checks for query time-out. * adds a missing piece of code in InterruptStatus#throwIf * I also adjusted an existing test (for RAF recovery) to handle the case that we could see 08000 (CONN_INTERRUPT) when performing a query as part of that test, depending on when the interrupt happens. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1066701 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/util/InterruptStatus.java", "hunks": [ { "added": [ " * flag to allow safe operation during execution, or if the interrupt", " * status flag is set now. Called when operations want to be prematurely", " * terminated due to interrupt.", " * If an interrupt status flag was seen, but temporarily switched off, we", " * set it back ON here.", " * @throws StandardException (session level SQLState.CONN_INTERRUPT) if", " * interrupt seen", "", " if (Thread.currentThread().isInterrupted()) {", " setInterrupted();", " }", "" ], "header": "@@ -238,17 +238,24 @@ public class InterruptStatus {", "removed": [ " * flag to allow safe operation during execution. Called when operations", " * will be be prematurely terminated due to the interrupt.", " * If an interrupt status flag was seen, we set it back ON here and throw", " * session level SQLState.CONN_INTERRUPT.", " * @throws StandardException (SQLState.CONN_INTERRUPT)" ] } ] } ]
derby-DERBY-4741-482ff80f
DERBY-4967 Handle interrupt received while waiting for database lock Patch derby-4967-locking-4 which makes the existing test LockInterruptTest assert that the interrupt flag is set when we see 08000 (CONN_INTERRUPT) - in accordance with the behavior we expect after DERBY-4741. The assert is skipped on Solaris/Sun Java <= 1.6 unless the flag -XX:-UseVMInterruptibleIO is used. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1060832 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4741-5d974225
DERBY-4741 Make Derby work reliably in the presence of thread interrupts Patch derby-4741-sleeps-waits-3, which modifies all actions when seeing interrupt in Object#wait and Thread#sleep inside "org.apache.derby.impl.store.*" as well as in one other instance. Thew new code just makes a note that an interrupt occured and then retry. The old code sometimes threw 08000 (CONN_INTERRUPT) or just ignored the interrupts. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1061516 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/GenericStatement.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.InterruptStatus;" ], "header": "@@ -42,6 +42,7 @@ import org.apache.derby.iapi.sql.dictionary.SchemaDescriptor;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/data/BaseDataFileFactory.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.InterruptStatus;" ], "header": "@@ -71,6 +71,7 @@ import org.apache.derby.iapi.reference.Attribute;", "removed": [] }, { "added": [ " InterruptStatus.setInterrupted();" ], "header": "@@ -2350,13 +2351,7 @@ public class BaseDataFileFactory", "removed": [ "\t\t\t\t\t\t// make sure we are not stuck in frozen state if we", "\t\t\t\t\t\t// caught an interrupt exception and the calling ", " // thread may not have a chance to call unfreeze", "\t\t\t\t\t\tisFrozen = false;", "\t\t\t\t\t\tfreezeSemaphore.notifyAll();", "", "\t\t\t\t\t\tthrow StandardException.interrupt(ie);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/data/BasePage.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.InterruptStatus;", "" ], "header": "@@ -45,6 +45,8 @@ import org.apache.derby.iapi.store.raw.log.LogInstant;", "removed": [] }, { "added": [ " InterruptStatus.setInterrupted();" ], "header": "@@ -1683,7 +1685,7 @@ abstract class BasePage implements Page, Observer, TypedFormat", "removed": [ "\t\t\t\t\tthrow StandardException.interrupt(ie);" ] }, { "added": [ " InterruptStatus.setInterrupted();" ], "header": "@@ -1712,6 +1714,7 @@ abstract class BasePage implements Page, Observer, TypedFormat", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/data/CachedPage.java", "hunks": [ { "added": [ "", "import org.apache.derby.iapi.util.InterruptStatus;", "" ], "header": "@@ -37,6 +37,9 @@ import org.apache.derby.iapi.services.io.FormatIdUtil;", "removed": [] }, { "added": [ " InterruptStatus.setInterrupted();" ], "header": "@@ -563,7 +566,7 @@ public abstract class CachedPage extends BasePage implements Cacheable", "removed": [ "\t\t\t\t\tthrow StandardException.interrupt(ie);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/data/RAFContainer4.java", "hunks": [ { "added": [ " InterruptStatus.setInterrupted();" ], "header": "@@ -314,10 +314,7 @@ class RAFContainer4 extends RAFContainer {", "removed": [ " InterruptStatus.noteAndClearInterrupt(", " \"interrupt while waiting to gain entry\",", " threadsInPageIO,", " hashCode());" ] }, { "added": [ " InterruptStatus.setInterrupted();" ], "header": "@@ -517,10 +514,7 @@ class RAFContainer4 extends RAFContainer {", "removed": [ " InterruptStatus.noteAndClearInterrupt(", " \"interrupt while waiting to gain entry\",", " threadsInPageIO,", " hashCode());" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/log/LogAccessFile.java", "hunks": [ { "added": [], "header": "@@ -31,7 +31,6 @@ import org.apache.derby.io.StorageRandomAccessFile;", "removed": [ "import java.io.InterruptedIOException;" ] }, { "added": [ "import org.apache.derby.iapi.util.InterruptStatus;" ], "header": "@@ -39,6 +38,7 @@ import org.apache.derby.iapi.services.io.ArrayOutputStream;", "removed": [] }, { "added": [ " InterruptStatus.setInterrupted();" ], "header": "@@ -513,11 +513,7 @@ public class LogAccessFile", "removed": [ "\t\t\t\t\t\t//do nothing, let the flush request to complete.", "\t\t\t\t\t\t//because it possible that other thread which is", "\t\t\t\t\t\t//currently might have completed this request also ,", "\t\t\t\t\t\t//if exited on interrupt and throw exception, can not", "\t\t\t\t\t\t//be sure whether this transaction is COMMITTED ot not." ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/log/LogToFile.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.InterruptStatus;", "import org.apache.derby.iapi.util.InterruptDetectedException;", "" ], "header": "@@ -91,6 +91,9 @@ import org.apache.derby.io.WritableStorageFactory;", "removed": [] }, { "added": [ "import java.io.InterruptedIOException;" ], "header": "@@ -99,6 +102,7 @@ import java.io.DataOutputStream;", "removed": [] }, { "added": [ " InterruptStatus.setInterrupted();" ], "header": "@@ -715,7 +719,7 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport", "removed": [ " // do nothing" ] }, { "added": [ " private boolean checkpointWithTran(" ], "header": "@@ -1522,7 +1526,7 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport", "removed": [ "\tprotected boolean checkpointWithTran(" ] }, { "added": [ " InterruptStatus.setInterrupted();" ], "header": "@@ -1601,7 +1605,7 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport", "removed": [ " throw StandardException.interrupt(ie);" ] }, { "added": [ " InterruptStatus.setInterrupted();" ], "header": "@@ -2023,7 +2027,7 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport", "removed": [ "\t\t\t\t\tthrow StandardException.interrupt(ie);" ] }, { "added": [ " InterruptStatus.setInterrupted();" ], "header": "@@ -2985,7 +2989,7 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport", "removed": [ " // do nothing" ] }, { "added": [ " InterruptStatus.setInterrupted();" ], "header": "@@ -3946,7 +3950,7 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport", "removed": [ "\t\t\t\t\t\t\tthrow StandardException.interrupt(ie);" ] }, { "added": [ " InterruptStatus.setInterrupted();" ], "header": "@@ -4008,7 +4012,7 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport", "removed": [ "\t\t\t\t\t\t\tthrow StandardException.interrupt(ie);" ] }, { "added": [ " InterruptStatus.setInterrupted();" ], "header": "@@ -4210,7 +4214,7 @@ public final class LogToFile implements LogFactory, ModuleControl, ModuleSupport", "removed": [ " //does not matter weather I get interrupted or not" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/xact/XactFactory.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.InterruptStatus;" ], "header": "@@ -66,6 +66,7 @@ import org.apache.derby.iapi.types.DataValueFactory;", "removed": [] }, { "added": [], "header": "@@ -1068,11 +1069,8 @@ public class XactFactory implements TransactionFactory, ModuleControl, ModuleSup", "removed": [ " * @exception StandardException if interrupted while waiting for a ", " * backup to complete.", " throws StandardException " ] }, { "added": [ " InterruptStatus.setInterrupted();" ], "header": "@@ -1084,7 +1082,7 @@ public class XactFactory implements TransactionFactory, ModuleControl, ModuleSup", "removed": [ " throw StandardException.interrupt(ie);" ] }, { "added": [ " * @exception RuntimeException if runtime exception occurs, in which case", " * other threads blocked on backupSemaphore are notified" ], "header": "@@ -1133,10 +1131,10 @@ public class XactFactory implements TransactionFactory, ModuleControl, ModuleSup", "removed": [ "\t * @exception StandardException if interrupted or a runtime exception occurs", "\t\tthrows StandardException " ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/replication/master/AsynchronousLogShipper.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.InterruptStatus;" ], "header": "@@ -34,6 +34,7 @@ import org.apache.derby.impl.store.replication.ReplicationLogger;", "removed": [] }, { "added": [ " InterruptStatus.setInterrupted();" ], "header": "@@ -221,8 +222,7 @@ public class AsynchronousLogShipper extends Thread implements", "removed": [ " //Interrupt the log shipping thread.", " return;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/replication/net/ReplicationMessageReceive.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.InterruptStatus;" ], "header": "@@ -34,6 +34,7 @@ import org.apache.derby.iapi.reference.MessageId;", "removed": [] }, { "added": [ " /**", " * Whether or not the ping thread has been notified to check connection.", " * Protected by sendPingSemaphore.", " */", " private boolean doSendPing = false;", "" ], "header": "@@ -82,6 +83,12 @@ public class ReplicationMessageReceive {", "removed": [] }, { "added": [ "", "", " long startWaitingatTime;", " long giveupWaitingAtTime;", " long nextWait = DEFAULT_PING_TIMEOUT;", "", " doSendPing = true;", "", " // want result within DEFAULT_PING_TIMEOUT millis.", " startWaitingatTime = System.currentTimeMillis();", " giveupWaitingAtTime = startWaitingatTime + DEFAULT_PING_TIMEOUT;", " while (true) {", " try {", " // Wait for the pong response message", " receivePongSemaphore.wait(nextWait);", " } catch (InterruptedException ex) {", " InterruptStatus.setInterrupted();", " }", "", " nextWait = giveupWaitingAtTime - System.currentTimeMillis();", "", " if (!connectionConfirmed && nextWait > 0) {", " // we could have been interrupted or seen a spurious", " // wakeup, so wait a bit longer", " continue;", " }", " break;" ], "header": "@@ -464,17 +471,40 @@ public class ReplicationMessageReceive {", "removed": [ " try {", " // Wait for the pong response message", " receivePongSemaphore.wait(DEFAULT_PING_TIMEOUT);", " } catch (InterruptedException ex) {" ] }, { "added": [ " while (!doSendPing) {", " try {", " sendPingSemaphore.wait();", " } catch (InterruptedException e) {", " InterruptStatus.setInterrupted();", " }", " }", "", " doSendPing = false;", "" ], "header": "@@ -500,8 +530,17 @@ public class ReplicationMessageReceive {", "removed": [ " sendPingSemaphore.wait();" ] }, { "added": [], "header": "@@ -509,7 +548,6 @@ public class ReplicationMessageReceive {", "removed": [ " } catch (InterruptedException ie) {" ] } ] } ]
derby-DERBY-4741-616c0d01
DERBY-4741 Make Derby work reliably in the presence of thread interrupts Patch derby-4741-raf-stresstest-4. This patch adds a new test fixture to InterruptResilienceTest: multi-threaded read/write test under an interrupt shower. This exercises primarily the random access file recovery (RAFContainer4#recoverContainerAfterInterrupt), but since the interrupt can arrive at any time during query execution, higher levels of the embedded code are also exposed (jdbc, language, store). The new test case is InterruptResilienceTest#testRAFReadWriteMultipleThreads. I also found I had to add Class.forName(<driver>) to DriverManagerConnector#getConnectionByAttributes and a new public method BasicJDBCTestCase#openDefaultConnection(TestConfiguration). The latter makes it possible use the main thread's test configuration in the server threads (when runnin the test in client-server mode), cf. "thisConfig" member in InterruptResilienceTest. The test now runs in its own database, with derby.system.durability=test for speed. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1064174 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/store/raw/data/RAFContainer4.java", "hunks": [ { "added": [ " // Interrupt recovery \"stealthMode\": If this thread holds a monitor on", " //", " // a) \"this\" (when RAFContainer#clean calls getEmbryonicPage via", " // writeRAFHEader) or", " // b) \"allocCache\" (e.g. FileContainer#newPage,", " // #pageValid)", " //", " // we cannot grab channelCleanupMonitor lest another thread is one", " // doing recovery, since the recovery thread will try to grab both", " // those monitors during container resurrection. So, just forge ahead", " // in stealth mode (i.e. the recovery thread doesn't see us). If we see", " // retry from RAFContainer releasing \"this\", or FileContainer", " // (releasing allocCache) as the case may be, so the recovery thread", " // can do its thing." ], "header": "@@ -266,16 +266,21 @@ class RAFContainer4 extends RAFContainer {", "removed": [ " // Interrupt recovery: If this thread holds a monitor on \"this\" (when", " // RAFContainer#clean calls getEmbryonicPage via writeRAFHEader) or", " // \"allocCache\" (e.g. FileContainer#newPage, #pageValid) we cannot grab", " // channelCleanupMonitor lest another thread is one doing recovery,", " // since the recovery thread will try to grab both those monitors", " // during container resurrection. So, just forge ahead in stealth mode", " // (i.e. the recovery thread doesn't see us). If we see", " // retry from RAFContainer (\"this\") or FileContainer (\"allocCache\")", " // after having released the relevant monitor." ] } ] }, { "file": "java/testing/org/apache/derbyTesting/junit/TestConfiguration.java", "hunks": [ { "added": [ " public Connection openDefaultConnection()" ], "header": "@@ -1533,7 +1533,7 @@ public final class TestConfiguration {", "removed": [ " Connection openDefaultConnection()" ] } ] } ]
derby-DERBY-4741-794fc707
DERBY-4741 Make Derby work reliably in the presence of thread interrupts Added missing copyright header to the newly introduced class InterruptStatus. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1030814 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/util/InterruptStatus.java", "hunks": [ { "added": [ "/*", "", " Derby - Class org.apache.derby.iapi.util.InterruptStatus", "", " Licensed to the Apache Software Foundation (ASF) under one or more", " contributor license agreements. See the NOTICE file distributed with", " this work for additional information regarding copyright ownership.", " The ASF licenses this file to you under the Apache License, Version 2.0", " (the \"License\"); you may not use this file except in compliance with", " the License. You may obtain a copy of the License at", "", " http://www.apache.org/licenses/LICENSE-2.0", "", " Unless required by applicable law or agreed to in writing, software", " distributed under the License is distributed on an \"AS IS\" BASIS,", " WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", " See the License for the specific language governing permissions and", " limitations under the License.", "", " */", "" ], "header": "@@ -1,3 +1,24 @@", "removed": [] } ] } ]
derby-DERBY-4741-b231544c
DERBY-4741 Make Derby work reliably in the presence of thread interrupts Patch derby-4741-kristians-01, incorporating Kristian's comments. Should not change behavior. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1043802 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/store/raw/data/RAFContainer4.java", "hunks": [ { "added": [ " private final Object channelCleanupMonitor = new Object();" ], "header": "@@ -79,7 +79,7 @@ class RAFContainer4 extends RAFContainer {", "removed": [ " private Object channelCleanupMonitor = new Object();" ] }, { "added": [ " /**", " * Use when seeing an exception during IO and when another thread is", " * presumably doing the recovery.", " * <p/>", " * If {@code stealthMode == false}, wait for another thread to recover the", " * container after an interrupt. If {@code stealthMode == true}, throw", " * internal exception {@code InterruptDetectedException} to do retry from", " * higher in the stack.", " * <p/>", " * If {@code stealthMode == false}, maximum wait time for the container to", " * become available again is determined by the product {@code", " * FileContainer#MAX_INTERRUPT_RETRIES * FileContainer#INTERRUPT_RETRY_SLEEP}.", " * There is a chance this thread will not see any recovery occuring (yet),", " * in which case it waits for a bit and just returns, so the caller must", " * retry IO until success.", " * <p/>", " * If for some reason the recovering thread has given up on resurrecting", " * the container, cf {@code #giveUpIO}, the method throws {@code", " * FILE_IO_INTERRUPTED}.", " * ", " * @param e the exception we saw during IO", " * @param stealthMode true if the thread doing IO in stealth mode", "", " * @throws StandardException {@code InterruptDetectedException} and normal", " * error policy", " */" ], "header": "@@ -603,6 +603,32 @@ class RAFContainer4 extends RAFContainer {", "removed": [] }, { "added": [ " // Max, give up, probably way too long anyway," ], "header": "@@ -662,7 +688,7 @@ class RAFContainer4 extends RAFContainer {", "removed": [ " // Max 60s, then give up, probably way too long anyway," ] }, { "added": [ " * Use this when the thread has received a AsynchronousCloseException", " * exception during IO and its interruped flag is also set. This makes this", " * thread a likely candicate to do container recovery (aka resurrection),", " * unless another thread started it already, cf. return value.", " *", " * @return true if we did recovery, false if we saw someone else do it and" ], "header": "@@ -713,9 +739,14 @@ class RAFContainer4 extends RAFContainer {", "removed": [ " * @return true if we did it, false if we saw someone else do it and" ] }, { "added": [ " SanityManager.ASSERT(Thread.holdsLock(this));", " SanityManager.ASSERT(!Thread.holdsLock(this));" ], "header": "@@ -881,9 +912,9 @@ class RAFContainer4 extends RAFContainer {", "removed": [ " SanityManager.ASSERT(Thread.currentThread().holdsLock(this));", " SanityManager.ASSERT(!Thread.currentThread().holdsLock(this));" ] }, { "added": [], "header": "@@ -1065,9 +1096,6 @@ class RAFContainer4 extends RAFContainer {", "removed": [ " boolean beforeOpen = srcChannel.isOpen();", " boolean beforeInterrupted = Thread.currentThread().isInterrupted();", "" ] }, { "added": [], "header": "@@ -1105,9 +1133,6 @@ class RAFContainer4 extends RAFContainer {", "removed": [ " boolean beforeOpen = dstChannel.isOpen();", " boolean beforeInterrupted = Thread.currentThread().isInterrupted();", "" ] } ] } ]
derby-DERBY-4741-bd018fd9
DERBY-4741 Make Derby work reliably in the presence of thread interrupts Patch derby-4741-c-01-nio: closes two corner cases I have observed when stress testing the RAFContainer4 recovery mechanism. It also does some other small cleanups. Regressions ran OK. RAFContainer: If we receive an interrupt when the container is first being opened (i.e. during RAFContainer.run (OPEN_CONTAINER_ACTION) -> getEmbryonicPage), recovery will fail because currentIdentity needed in RAFContainer4#recoverContainerAfterInterrupt hasn't yet been set. RAFContainer4: If a stealthMode read is interrupted and is recovering the container, it erroneously increments threadsInPageIO just before exiting to retry IO. This leads to a break in the invariant that threadsInPageIO be 0 when all threads are done, causing issue (hang) down the line. If, when we are reopening the container, the read being done during that operation (getEmbryonicPage), that stealth mode read will also lead to a (recursive) recovery. We have to catch this case by adding a "catch (InterruptDetectedException e)" just after the call to openContainer, not by testing the interrupt flag as presently done, since the recovery inside the recursive call to getEmbryonicPage/readPage will already have cleared the flag and done recovery. When giving up reopening the container for some reason, we also forgot to decrement threadsInPageIO. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1040086 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/store/raw/data/RAFContainer.java", "hunks": [ { "added": [ " protected ContainerKey idAPriori = null;", "", " synchronized boolean openContainer(ContainerKey newIdentity)", " boolean success = false;", " idAPriori = currentIdentity;", "", " currentIdentity = newIdentity;", " // NIO: We need to set currentIdentity before we try to open, in", " // case we need its value to perform a recovery in the case of an", " // interrupt during readEmbryonicPage as part of", " // OPEN_CONTAINER_ACTION. Note that this gives a recursive call to", " // openContainer.", " //", " // If we don't succeed in opening, we reset currentIdentity to its", " // a priori value.", " success = AccessController.doPrivileged(this) != null;", " idAPriori = currentIdentity;" ], "header": "@@ -898,18 +898,30 @@ class RAFContainer extends FileContainer implements PrivilegedExceptionAction", "removed": [ "\tsynchronized boolean openContainer(ContainerKey newIdentity)", " boolean success = AccessController.doPrivileged(this) != null;", " if (success) {", " currentIdentity = newIdentity;", " }" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/data/RAFContainer4.java", "hunks": [ { "added": [ " // Pave way for the thread that received the interrupt that caused", " // the channel close to clean up, by signaling we are waiting (no", " // longer doing IO):", "" ], "header": "@@ -637,6 +637,10 @@ class RAFContainer4 extends RAFContainer {", "removed": [] }, { "added": [ " // Since the channel is presumably ok (lest giveUpIO is set,", " // see below), we put ourselveds back in the IO set of threads:", "" ], "header": "@@ -671,6 +675,9 @@ class RAFContainer4 extends RAFContainer {", "removed": [] }, { "added": [ " threadsInPageIO--;" ], "header": "@@ -685,6 +692,7 @@ class RAFContainer4 extends RAFContainer {", "removed": [] }, { "added": [ " // 1) Another interrupted thread got to do the cleanup before us, so" ], "header": "@@ -715,7 +723,7 @@ class RAFContainer4 extends RAFContainer {", "removed": [ " // Another interrupted thread got to do the cleanup before us, so" ] }, { "added": [ " //", " // 2) The other way to end up here is if we get interrupted during", " // getEmbryonicPage called during container recovery from the same", " // thread (restoreChannelInProgress is set then, and", " // getEmbryonicPage is stealthMode)" ], "header": "@@ -734,6 +742,11 @@ class RAFContainer4 extends RAFContainer {", "removed": [] }, { "added": [ " } catch (InterruptDetectedException e) {", " debugTrace(\"interrupted during recovery's \" +", " \"readEmbryonicPage\");", " continue;", " } catch (Exception newE) {", " // Something else failed - shutdown happening?", " synchronized(giveUpIOm) {", " // Make sure other threads will give up and", " // throw, too.", " giveUpIO = true;", "", " if (SanityManager.DEBUG) {", " debugTrace(", " \"can't resurrect container: \" +", " newE);" ], "header": "@@ -808,26 +821,22 @@ class RAFContainer4 extends RAFContainer {", "removed": [ " } catch (Exception newE) {", "", " if (InterruptStatus.noteAndClearInterrupt(", " \"RAF: isInterrupted during recovery\",", " threadsInPageIO,", " hashCode())) {", " continue;", " } else {", " // Something else failed - shutdown happening?", " synchronized(giveUpIOm) {", " // Make sure other threads will give up and", " // throw, too.", " giveUpIO = true;", "", " if (SanityManager.DEBUG) {", " debugTrace(", " \"can't resurrect container: \" +", " newE);", " }" ] }, { "added": [ " if (stealthMode) {", " // don't touch threadsInPageIO", " } else {", " threadsInPageIO++;", " }", "" ], "header": "@@ -838,7 +847,12 @@ class RAFContainer4 extends RAFContainer {", "removed": [ " threadsInPageIO++;" ] } ] } ]
derby-DERBY-4741-c9528432
DERBY-4769 Handle interrupt received while waiting for database lock (subtask of DERBY-4741): Patch derby-4967-locking-1 which lets Derby throw CONN_INTERRUPT if an interrupt is received while waiting for a database lock. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1058245 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/services/locks/LockSet.java", "hunks": [ { "added": [ " // If we were not woken by another then we have timed", " // out. Either deadlock out or timeout. Or thread has", " // been interrupted." ], "header": "@@ -346,8 +346,9 @@ forever:\tfor (;;) {", "removed": [ " // If we were not woken by another then we have", " // timed out. Either deadlock out or timeout" ] }, { "added": [ " ", " // ending wait because of lock timeout or interrupt", " if (wakeupReason == Constants.WAITING_LOCK_INTERRUPTED) {", " Thread.currentThread().interrupt();", " throw StandardException.newException(SQLState.CONN_INTERRUPT);", "", " } else if (deadlockTrace)" ], "header": "@@ -405,11 +406,16 @@ forever:\tfor (;;) {", "removed": [ " // ending wait because of lock timeout.", " if (deadlockTrace)" ] } ] } ]
derby-DERBY-4741-ec9d167b
DERBY-4741 Make embedded Derby work reliably in the presence of thread interrupts Patch derby-4741-sleeps-waits-more, which "regularizes" a few more instances of interrrupt handling to follow the idiom established in this issue's patches. This leaves a few instances in BasicDaemon.java (as far as embedded code is concerned), which will need more consideration. In any case, interrupting the demon threads is less of a valid use case I believe, i.e. Derby's ability to tolerate that is less crucial that tolerating interrupts to the user's connection threads. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1073595 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/db/SlaveDatabase.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.InterruptStatus;" ], "header": "@@ -30,6 +30,7 @@ import org.apache.derby.iapi.jdbc.AuthenticationService;", "removed": [] }, { "added": [ " InterruptStatus.setInterrupted();" ], "header": "@@ -271,7 +272,7 @@ public class SlaveDatabase extends BasicDatabase {", "removed": [ " // do nothing" ] } ] }, { "file": "java/engine/org/apache/derby/impl/services/daemon/IndexStatisticsDaemonImpl.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.InterruptStatus;" ], "header": "@@ -51,6 +51,7 @@ import org.apache.derby.iapi.store.access.GroupFetchScanController;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/services/monitor/TopService.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.InterruptStatus;" ], "header": "@@ -28,6 +28,7 @@ import org.apache.derby.iapi.services.monitor.PersistentService;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/store/replication/net/ReplicationMessageTransmit.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.util.InterruptStatus;" ], "header": "@@ -30,6 +30,7 @@ import java.security.PrivilegedExceptionAction;", "removed": [] }, { "added": [ " long startMillis = System.currentTimeMillis();", " long waited = 0L;", "", " while (receivedMsg == null &&", " waited < DEFAULT_MESSAGE_RESPONSE_TIMEOUT) {", "", " synchronized (receiveSemaphore) {", " try {", " receiveSemaphore.wait(", " DEFAULT_MESSAGE_RESPONSE_TIMEOUT - waited);", " } catch (InterruptedException ie) {", " InterruptStatus.setInterrupted();", " waited = System.currentTimeMillis() - startMillis;", " continue;", " }", " break;", "" ], "header": "@@ -202,12 +203,25 @@ public class ReplicationMessageTransmit {", "removed": [ " synchronized (receiveSemaphore) {", " try {", " receiveSemaphore.wait(DEFAULT_MESSAGE_RESPONSE_TIMEOUT);", " } catch (InterruptedException ie) {" ] } ] } ]
derby-DERBY-4743-6819a865
DERBY-4743 Attempt to access a DECLARE GLOBAL TEMPORARY TABLE after commit in an XA transaction in Network server fails. The fix for DERBY-4731 made the behavior consistent across embedded and network server. Derby does not support accessing global temporary tables declared in an XA transaction subsequent to the commit of the transaction. This change updates the junit XATest to verify that accessing the table after the commit fails as expected in both the embedded and network server case. In both cases you will get the generic table does not exist error. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@966027 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4743-af45571f
DERBY-4743 Checking in changes to XATest.java to repro this bug. The test case is currently disabled. To enable it remove the x from: public void xtestXATempTableD4743() git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@963931 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4748-7b374f98
DERBY-4748: StringIndexOutOfBoundsException on syntax error (invalid COMMIT) Rewrote logic in isolateAnyInitialIdentifier and added test case. Patch contributed by Knut Anders Hatlen (knut dot hatlen at oracle dot com) and Kristian Waagan (kristwaa at apache dot org). Patch file: derby-4748-1b-sioobe.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@980684 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-475-14d5f009
DERBY-475 Add a table driven mechanism to allow simple one argument functions to be added easily. These functions exist only at runtime in the SYSFUN schema and are resolved to with an explicit use of SYSFUN or an unqualified functon name. Start off with a set of stanadard functions from the java.lang.Math class. git-svn-id: https://svn.apache.org/repos/asf/incubator/derby/code/trunk@226528 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/catalog/types/RoutineAliasInfo.java", "hunks": [ { "added": [], "header": "@@ -122,7 +122,6 @@ public class RoutineAliasInfo extends MethodAliasInfo", "removed": [ "\t\tsetAliasType();" ] }, { "added": [], "header": "@@ -232,7 +231,6 @@ public class RoutineAliasInfo extends MethodAliasInfo", "removed": [ "\t\tsetAliasType();" ] }, { "added": [ "\t\t\tif (returnType == null) {" ], "header": "@@ -286,7 +284,7 @@ public class RoutineAliasInfo extends MethodAliasInfo", "removed": [ "\t\t\tif (aliasType == AliasInfo.ALIAS_TYPE_PROCEDURE_AS_CHAR) {" ] }, { "added": [ "\t\tif (returnType != null) {", "\t\tif ((returnType == null) &&" ], "header": "@@ -300,14 +298,14 @@ public class RoutineAliasInfo extends MethodAliasInfo", "removed": [ "\t\tif (aliasType == AliasInfo.ALIAS_TYPE_FUNCTION_AS_CHAR) {", "\t\tif ((aliasType == AliasInfo.ALIAS_TYPE_PROCEDURE_AS_CHAR) &&" ] }, { "added": [ "\t\tif (returnType != null) {" ], "header": "@@ -315,7 +313,7 @@ public class RoutineAliasInfo extends MethodAliasInfo", "removed": [ "\t\tif (aliasType == AliasInfo.ALIAS_TYPE_FUNCTION_AS_CHAR) {" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/catalog/DataDictionaryImpl.java", "hunks": [ { "added": [ "\t", "\t/**", "\t* SYSFUN functions. Table of functions that automatically appear", "\t* in the SYSFUN schema. This simple table assumes a single parameter", "\t* and RETURNS NULL ON NULL INPUT. The scheme could be expanded", "\t* to handle other function options such as other parameters if needed.", "\t*[0] = FUNCTION name", "\t*[1] = RETURNS type", "\t*[2] = Java class", "\t*[3] = method name and signature", "\t*[4] = parameter type (single parameter)", "\t*", "\t*/", "\tprivate static final String[][] SYSFUN_FUNCTIONS = {", "\t\t\t{\"ACOS\", \"DOUBLE\", \"java.lang.Math\", \"acos(double)\", \"DOUBLE\"},", "\t\t\t{\"ASIN\", \"DOUBLE\", \"java.lang.Math\", \"asin(double)\", \"DOUBLE\"},", "\t\t\t{\"ATAN\", \"DOUBLE\", \"java.lang.Math\", \"atan(double)\", \"DOUBLE\"},", "\t\t\t{\"COS\", \"DOUBLE\", \"java.lang.Math\", \"cos(double)\", \"DOUBLE\"},", "\t\t\t{\"SIN\", \"DOUBLE\", \"java.lang.Math\", \"sin(double)\", \"DOUBLE\"},", "\t\t\t{\"TAN\", \"DOUBLE\", \"java.lang.Math\", \"tan(double)\", \"DOUBLE\"},", "\t\t\t{\"DEGREES\", \"DOUBLE\", \"java.lang.Math\", \"toDegrees(double)\", \"DOUBLE\"},", "\t\t\t{\"RADIANS\", \"DOUBLE\", \"java.lang.Math\", \"toRadians(double)\", \"DOUBLE\"},", "\t\t\t{\"LN\", \"DOUBLE\", \"java.lang.Math\", \"log(double)\", \"DOUBLE\"},", "\t\t\t{\"EXP\", \"DOUBLE\", \"java.lang.Math\", \"exp(double)\", \"DOUBLE\"},", "\t\t\t{\"CEIL\", \"DOUBLE\", \"java.lang.Math\", \"ceil(double)\", \"DOUBLE\"},", "\t\t\t{\"CEILING\", \"DOUBLE\", \"java.lang.Math\", \"ceil(double)\", \"DOUBLE\"},", "\t\t\t{\"FLOOR\", \"DOUBLE\", \"java.lang.Math\", \"floor(double)\", \"DOUBLE\"},\t\t\t", "\t};", "\t", "\t/**", "\t * Runtime definition of the functions from SYSFUN_FUNCTIONS.", "\t * Populated dynamically as functions are called.", "\t */", "\tprivate static final AliasDescriptor[] SYSFUN_AD =", "\t\tnew AliasDescriptor[SYSFUN_FUNCTIONS.length];", "\t", "\t/**", "\t * Dummy parameter name for functions from SYSFUN_FUNCTIONS.", "\t */", "\tprivate static final String[] SYSFUN_PNAME = {\"P1\"};", "\t", "\t/**", "\t * Parameter mode (IN as required) for functions from SYSFUN_FUNCTIONS.", "\t */\t", "\tprivate static final int[] SYSFUN_PMODE = {JDBC30Translation.PARAMETER_MODE_IN};" ], "header": "@@ -183,6 +183,51 @@ public final class\tDataDictionaryImpl", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/StaticMethodCallNode.java", "hunks": [ { "added": [ "\t\t\t\t\t\t\t\t\t", "\t\t\t\tboolean noSchema = schemaName == null;", "\t\t\t\tresolveRoutine(fromList, subqueryList, aggregateVector, sd);", "\t\t\t\t", "\t\t\t\tif (ad == null && noSchema && !forCallStatement)", "\t\t\t\t{", "\t\t\t\t\t// Resolve to a built-in SYSFUN function but only", "\t\t\t\t\t// if this is a function call and the call", "\t\t\t\t\t// was not qualified. E.g. COS(angle). The", "\t\t\t\t\t// SYSFUN functions are not in SYSALIASES but", "\t\t\t\t\t// an in-memory table, set up in DataDictioanryImpl.", "\t\t\t\t\tsd = getSchemaDescriptor(\"SYSFUN\", true);", "\t\t\t\t\t", "\t\t\t\t\tresolveRoutine(fromList, subqueryList, aggregateVector, sd);" ], "header": "@@ -183,246 +183,25 @@ public class StaticMethodCallNode extends MethodCallNode", "removed": [ "\t\t\t\tif (sd.getUUID() != null) {", "", "\t\t\t\tjava.util.List list = getDataDictionary().getRoutineList(", "\t\t\t\t\tsd.getUUID().toString(), methodName,", "\t\t\t\t\tforCallStatement ? AliasInfo.ALIAS_NAME_SPACE_PROCEDURE_AS_CHAR : AliasInfo.ALIAS_NAME_SPACE_FUNCTION_AS_CHAR", "\t\t\t\t\t);", "", "\t\t\t\tfor (int i = list.size() - 1; i >= 0; i--) {", "", "\t\t\t\t\tAliasDescriptor proc = (AliasDescriptor) list.get(i);", "", "\t\t\t\t\tRoutineAliasInfo routineInfo = (RoutineAliasInfo) proc.getAliasInfo();", "\t\t\t\t\tint parameterCount = routineInfo.getParameterCount();", "\t\t\t\t\tif (parameterCount != methodParms.length)", "\t\t\t\t\t\tcontinue;", "", "\t\t\t\t\t// pre-form the method signature. If it is a dynamic result set procedure", "\t\t\t\t\t// then we need to add in the ResultSet array", "", "\t\t\t\t\tTypeDescriptor[] parameterTypes = routineInfo.getParameterTypes();", "", "\t\t\t\t\tint sigParameterCount = parameterCount;", "\t\t\t\t\tif (routineInfo.getMaxDynamicResultSets() > 0)", "\t\t\t\t\t\tsigParameterCount++;", "", "\t\t\t\t\tsignature = new JSQLType[sigParameterCount];", "\t\t\t\t\tfor (int p = 0; p < parameterCount; p++) {", "", "\t\t\t\t\t\t// find the declared type.", "", "\t\t\t\t\t\tTypeDescriptor td = parameterTypes[p];", "", "\t\t\t\t\t\tTypeId typeId = TypeId.getBuiltInTypeId(td.getJDBCTypeId());", "", "\t\t\t\t\t\tTypeId parameterTypeId = typeId;", "", "", "\t\t\t\t\t\t// if it's an OUT or INOUT parameter we need an array.", "\t\t\t\t\t\tint parameterMode = routineInfo.getParameterModes()[p];", "", "\t\t\t\t\t\tif (parameterMode != JDBC30Translation.PARAMETER_MODE_IN) {", "", "\t\t\t\t\t\t\tString arrayType;", "\t\t\t\t\t\t\tswitch (typeId.getJDBCTypeId()) {", "\t\t\t\t\t\t\t\tcase java.sql.Types.SMALLINT:", "\t\t\t\t\t\t\t\tcase java.sql.Types.INTEGER:", "\t\t\t\t\t\t\t\tcase java.sql.Types.BIGINT:", "\t\t\t\t\t\t\t\tcase java.sql.Types.REAL:", "\t\t\t\t\t\t\t\tcase java.sql.Types.DOUBLE:", "\t\t\t\t\t\t\t\t\tarrayType = getTypeCompiler(typeId).getCorrespondingPrimitiveTypeName().concat(\"[]\");", "\t\t\t\t\t\t\t\t\tbreak;", "\t\t\t\t\t\t\t\tdefault:", "\t\t\t\t\t\t\t\t\tarrayType = typeId.getCorrespondingJavaTypeName().concat(\"[]\");", "\t\t\t\t\t\t\t\t\tbreak;", "\t\t\t\t\t\t\t}", "", "\t\t\t\t\t\t\ttypeId = TypeId.getUserDefinedTypeId(arrayType, false);", "\t\t\t\t\t\t}", "", "\t\t\t\t\t\t// this is the type descriptor of the require method parameter", "\t\t\t\t\t\tDataTypeDescriptor methoddtd = new DataTypeDescriptor(", "\t\t\t\t\t\t\t\ttypeId,", "\t\t\t\t\t\t\t\ttd.getPrecision(),", "\t\t\t\t\t\t\t\ttd.getScale(),", "\t\t\t\t\t\t\t\ttd.isNullable(),", "\t\t\t\t\t\t\t\ttd.getMaximumWidth()", "\t\t\t\t\t\t\t);", "", "\t\t\t\t\t\tsignature[p] = new JSQLType(methoddtd);", "", "\t\t\t\t\t\t// check parameter is a ? node for INOUT and OUT parameters.", "", "\t\t\t\t\t\tValueNode sqlParamNode = null;", "", "\t\t\t\t\t\tif (methodParms[p] instanceof SQLToJavaValueNode) {", "\t\t\t\t\t\t\tSQLToJavaValueNode sql2j = (SQLToJavaValueNode) methodParms[p];", "\t\t\t\t\t\t\tsqlParamNode = sql2j.getSQLValueNode();", "\t\t\t\t\t\t}", "\t\t\t\t\t\telse", "\t\t\t\t\t\t{", "\t\t\t\t\t\t}", "", "\t\t\t\t\t\tboolean isParameterMarker = true;", "\t\t\t\t\t\tif ((sqlParamNode == null) || !sqlParamNode.isParameterNode())", "\t\t\t\t\t\t{", "\t\t\t\t\t\t\tif (parameterMode != JDBC30Translation.PARAMETER_MODE_IN) {", "\t\t\t\t\t\t\t ", "\t\t\t\t\t\t\t\tthrow StandardException.newException(SQLState.LANG_DB2_PARAMETER_NEEDS_MARKER,", "\t\t\t\t\t\t\t\t\tRoutineAliasInfo.parameterMode(parameterMode),", "\t\t\t\t\t\t\t\t\troutineInfo.getParameterNames()[p]);", "\t\t\t\t\t\t\t}", "\t\t\t\t\t\t\tisParameterMarker = false;", "\t\t\t\t\t\t}", "\t\t\t\t\t\telse", "\t\t\t\t\t\t{", "\t\t\t\t\t\t\tif (applicationParameterNumbers == null)", "\t\t\t\t\t\t\t\tapplicationParameterNumbers = new int[parameterCount];", "\t\t\t\t\t\t\tapplicationParameterNumbers[p] = ((ParameterNode) sqlParamNode).getParameterNumber();", "\t\t\t\t\t\t}", "", "\t\t\t\t\t\t// this is the SQL type of the procedure parameter.", "\t\t\t\t\t\tDataTypeDescriptor paramdtd = new DataTypeDescriptor(", "\t\t\t\t\t\t\tparameterTypeId,", "\t\t\t\t\t\t\ttd.getPrecision(),", "\t\t\t\t\t\t\ttd.getScale(),", "\t\t\t\t\t\t\ttd.isNullable(),", "\t\t\t\t\t\t\ttd.getMaximumWidth()", "\t\t\t\t\t\t);", "", "\t\t\t\t\t\tboolean needCast = false;", "\t\t\t\t\t\tif (!isParameterMarker)", "\t\t\t\t\t\t{", "", "\t\t\t\t\t\t\t// can only be an IN parameter.", "\t\t\t\t\t\t\t// check that the value can be assigned to the", "\t\t\t\t\t\t\t// type of the procedure parameter.", "\t\t\t\t\t\t\tif (sqlParamNode instanceof UntypedNullConstantNode)", "\t\t\t\t\t\t\t{", "\t\t\t\t\t\t\t\tsqlParamNode.setDescriptor(paramdtd);", "\t\t\t\t\t\t\t}", "\t\t\t\t\t\t\telse", "\t\t\t\t\t\t\t{", "", "", "\t\t\t\t\t\t\t\tDataTypeDescriptor dts;", "\t\t\t\t\t\t\t\tTypeId argumentTypeId;", "", "\t\t\t\t\t\t\t\tif (sqlParamNode != null)", "\t\t\t\t\t\t\t\t{", "\t\t\t\t\t\t\t\t\t// a node from the SQL world", "\t\t\t\t\t\t\t\t\targumentTypeId = sqlParamNode.getTypeId();", "\t\t\t\t\t\t\t\t\tdts = sqlParamNode.getTypeServices();", "\t\t\t\t\t\t\t\t}", "\t\t\t\t\t\t\t\telse", "\t\t\t\t\t\t\t\t{", "\t\t\t\t\t\t\t\t\t// a node from the Java world", "\t\t\t\t\t\t\t\t\tdts = DataTypeDescriptor.getSQLDataTypeDescriptor(methodParms[p].getJavaTypeName());", "\t\t\t\t\t\t\t\t\tif (dts == null)", "\t\t\t\t\t\t\t\t\t{", "\t\t\t\t\t\t\t\t\t\tthrow StandardException.newException(SQLState.LANG_NO_CORRESPONDING_S_Q_L_TYPE, ", "\t\t\t\t\t\t\t\t\t\t\tmethodParms[p].getJavaTypeName());", "\t\t\t\t\t\t\t\t\t}", "", "\t\t\t\t\t\t\t\t\targumentTypeId = dts.getTypeId();", "\t\t\t\t\t\t\t\t}", "", "\t\t\t\t\t\t\t\tif (! getTypeCompiler(parameterTypeId).storable(argumentTypeId, getClassFactory()))", "\t\t\t\t\t\t\t\t\t\tthrow StandardException.newException(SQLState.LANG_NOT_STORABLE, ", "\t\t\t\t\t\t\t\t\t\t\tparameterTypeId.getSQLTypeName(),", "\t\t\t\t\t\t\t\t\t\t\targumentTypeId.getSQLTypeName() );", "", "\t\t\t\t\t\t\t\t// if it's not an exact length match then some cast will be needed.", "\t\t\t\t\t\t\t\tif (!paramdtd.isExactTypeAndLengthMatch(dts))", "\t\t\t\t\t\t\t\t\tneedCast = true;", "\t\t\t\t\t\t\t}", "\t\t\t\t\t\t}", "\t\t\t\t\t\telse", "\t\t\t\t\t\t{", "\t\t\t\t\t\t\t// any variable length type will need a cast from the", "\t\t\t\t\t\t\t// Java world (the ? parameter) to the SQL type. This", "\t\t\t\t\t\t\t// ensures values like CHAR(10) are passed into the procedure", "\t\t\t\t\t\t\t// correctly as 10 characters long.", "\t\t\t\t\t\t\tif (parameterTypeId.variableLength()) {", "", "\t\t\t\t\t\t\t\tif (parameterMode != JDBC30Translation.PARAMETER_MODE_OUT)", "\t\t\t\t\t\t\t\t\tneedCast = true;", "\t\t\t\t\t\t\t}", "\t\t\t\t\t\t}", "\t\t\t\t\t\t", "", "\t\t\t\t\t\tif (needCast)", "\t\t\t\t\t\t{", "\t\t\t\t\t\t\t// push a cast node to ensure the", "\t\t\t\t\t\t\t// correct type is passed to the method", "\t\t\t\t\t\t\t// this gets tacky because before we knew", "\t\t\t\t\t\t\t// it was a procedure call we ensured all the", "\t\t\t\t\t\t\t// parameter are JavaNodeTypes. Now we need to", "\t\t\t\t\t\t\t// push them back to the SQL domain, cast them", "\t\t\t\t\t\t\t// and then push them back to the Java domain.", "", "\t\t\t\t\t\t\tif (sqlParamNode == null) {", "", "\t\t\t\t\t\t\t\tsqlParamNode = (ValueNode) getNodeFactory().getNode(", "\t\t\t\t\t\t\t\t\tC_NodeTypes.JAVA_TO_SQL_VALUE_NODE,", "\t\t\t\t\t\t\t\t\tmethodParms[p], ", "\t\t\t\t\t\t\t\t\tgetContextManager());", "\t\t\t\t\t\t\t}", "", "\t\t\t\t\t\t\tValueNode castNode = (ValueNode) getNodeFactory().getNode(", "\t\t\t\t\t\t\t\tC_NodeTypes.CAST_NODE,", "\t\t\t\t\t\t\t\tsqlParamNode, ", "\t\t\t\t\t\t\t\tparamdtd,", "\t\t\t\t\t\t\t\tgetContextManager());", "", "", "\t\t\t\t\t\t\tmethodParms[p] = (JavaValueNode) getNodeFactory().getNode(", "\t\t\t\t\t\t\t\t\tC_NodeTypes.SQL_TO_JAVA_VALUE_NODE,", "\t\t\t\t\t\t\t\t\tcastNode, ", "\t\t\t\t\t\t\t\t\tgetContextManager());", "", "\t\t\t\t\t\t\tmethodParms[p] = methodParms[p].bindExpression(fromList, subqueryList, aggregateVector);", "\t\t\t\t\t\t}", "", "\t\t\t\t\t\t// only force the type for a ? so that the correct type shows up", "\t\t\t\t\t\t// in parameter meta data", "\t\t\t\t\t\tif (isParameterMarker)", "\t\t\t\t\t\t\tsqlParamNode.setDescriptor(paramdtd);", "\t\t\t\t\t}", "", "\t\t\t\t\tif (sigParameterCount != parameterCount) {", "", "\t\t\t\t\t\tTypeId typeId = TypeId.getUserDefinedTypeId(\"java.sql.ResultSet[]\", false);", "", "\t\t\t\t\t\tDataTypeDescriptor dtd = new DataTypeDescriptor(", "\t\t\t\t\t\t\t\ttypeId,", "\t\t\t\t\t\t\t\t0,", "\t\t\t\t\t\t\t\t0,", "\t\t\t\t\t\t\t\tfalse,", "\t\t\t\t\t\t\t\t-1", "\t\t\t\t\t\t\t);", "", "\t\t\t\t\t\tsignature[parameterCount] = new JSQLType(dtd);", "", "\t\t\t\t\t}", "", "\t\t\t\t\tthis.routineInfo = routineInfo;", "\t\t\t\t\tad = proc;", "", "\t\t\t\t\t// If a procedure is in the system schema and defined as executing", "\t\t\t\t\t// SQL do we set we are in system code.", "\t\t\t\t\tif (sd.isSystemSchema() && (routineInfo.getReturnType() == null) && routineInfo.getSQLAllowed() != RoutineAliasInfo.NO_SQL)", "\t\t\t\t\t\tisSystemCode = true;", "", "\t\t\t\t\tbreak;", "\t\t\t}" ] } ] } ]
derby-DERBY-475-b7e5031d
DERBY-475 DERBY-592 Expand the builtin function table to handle zero parameter functions and add the functions PI and LOG10 as builting and JDBC escaped functions. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@374471 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/catalog/SystemProcedures.java", "hunks": [ { "added": [ "\t", "\t<P>", "\tAlso used for builtin-routines, such as SYSFUN functions, when direct calls", "\tinto Java libraries cannot be made." ], "header": "@@ -54,6 +54,10 @@ import org.apache.derby.iapi.sql.conn.ConnectionUtil;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/catalog/DataDictionaryImpl.java", "hunks": [ { "added": [ "\t* in the SYSFUN schema. These functions are resolved to directly", "\t* if no schema name is given, e.g.", "\t* ", "\t* <code>", "\t* SELECT COS(angle) FROM ROOM_WALLS", "\t* </code>", "\t* ", "\t* Adding a function here is suitable when the function defintion", "\t* can have a single return type and fixed parameter types.", "\t* ", "\t* Functions that need to have a return type based upon the", "\t* input type(s) are not supported here. Typically those are", "\t* added into the parser and methods added into the DataValueDescriptor interface.", "\t* Examples are character based functions whose return type", "\t* length is based upon the passed in type, e.g. passed a CHAR(10)", "\t* returns a CHAR(10).", "\t* ", "\t* ", "\t* This simple table assumes zero or a single parameter", "\t*[4] = parameter type (single parameter) or null for no parameters." ], "header": "@@ -190,14 +190,32 @@ public final class\tDataDictionaryImpl", "removed": [ "\t* in the SYSFUN schema. This simple table assumes a single parameter", "\t*[4] = parameter type (single parameter)" ] }, { "added": [ "\t\t\t{\"PI\", \"DOUBLE\", \"org.apache.derby.catalog.SystemProcedures\", \"PI()\", null},", "\t\t\t{\"LOG10\", \"DOUBLE\", \"org.apache.derby.catalog.SystemProcedures\", \"LOG10(double)\", \"DOUBLE\"}," ], "header": "@@ -207,10 +225,12 @@ public final class\tDataDictionaryImpl", "removed": [] } ] } ]
derby-DERBY-4752-49174a90
DERBY-4752: CheapDateFormatter returns incorrect and invalid date strings Use java.util.Calendar to get the calculations right. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@967000 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/util/CheapDateFormatter.java", "hunks": [ { "added": [ "import java.util.Calendar;", "import java.util.Locale;", "import java.util.TimeZone;", "", " private static final TimeZone GMT = TimeZone.getTimeZone(\"GMT\");", "\t * The output is a String in the form yyyy-mm-dd hh:mm:ss.ddd GMT." ], "header": "@@ -21,29 +21,22 @@", "removed": [ "\tstatic final long SECONDS = 1000L;", "\tstatic final long MINUTES = SECONDS * 60L;", "\tstatic final long HOURS = MINUTES * 60L;", "\tstatic final long DAYS = HOURS * 24L;", "\tstatic final long NORMAL_YEAR = DAYS * 365L;", "\tstatic final long LEAP_YEAR = NORMAL_YEAR + DAYS;", "\tstatic final long FOURYEARS = (NORMAL_YEAR * 3L) + LEAP_YEAR;", "\tstatic final long END_OF_FIRST_YEAR = NORMAL_YEAR;", "\tstatic final long END_OF_SECOND_YEAR = END_OF_FIRST_YEAR + LEAP_YEAR;", "\tstatic final long END_OF_THIRD_YEAR = END_OF_SECOND_YEAR + NORMAL_YEAR;", "\tstatic final int[] DAYS_IN_MONTH = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};", "\tstatic final int FEBRUARY = 1;", "\t * The output is a String in the form yyyy/mm/dd hh:mm:ss.ddd GMT." ] }, { "added": [ "\t * @return The date formatted as yyyy-mm-dd hh:mm:ss.ddd GMT.", " // Get a GMT calendar with a well-known locale to help us calculate", " // the components of the date.", " Calendar cal = Calendar.getInstance(GMT, Locale.US);", " cal.setTimeInMillis(time);", "", " int year = cal.get(Calendar.YEAR);", " int month = cal.get(Calendar.MONTH) + 1; // convert 0-based to 1-based", " int days = cal.get(Calendar.DAY_OF_MONTH);", " int hours = cal.get(Calendar.HOUR_OF_DAY);", " int minutes = cal.get(Calendar.MINUTE);", " int seconds = cal.get(Calendar.SECOND);", " int millis = cal.get(Calendar.MILLISECOND);" ], "header": "@@ -57,97 +50,21 @@ public class CheapDateFormatter {", "removed": [ "\t * @return The date formatted as yyyy/mm/dd hh:mm:ss.ddd GMT.", "\t\t// Assume not a leap year until we know otherwise", "\t\tboolean leapYear = false;", "", "\t\t// How many four year periods since Jan. 1, 1970?", "\t\tlong year = ((time / FOURYEARS) * 4L);", "", "\t\t// How much time is left over after the four-year periods?", "\t\tlong leftover = time % FOURYEARS;", "\t\ttime -= (year / 4L) * FOURYEARS;", "", "\t\tyear += 1970L;", "", "\t\t// Does time extend past end of first year in four-year period?", "\t\tif (leftover >= END_OF_FIRST_YEAR) {", "\t\t\tyear++;", "\t\t\ttime -= NORMAL_YEAR;", "\t\t}", "", "\t\t// Does time extend past end of second year in four-year period?", "\t\tif (leftover >= END_OF_SECOND_YEAR) {", "\t\t\tyear++;", "\t\t\ttime -= NORMAL_YEAR;", "\t\t}", "", "\t\t// Does time extend past end of third year in four-year period?", "\t\tif (leftover >= END_OF_THIRD_YEAR) {", "\t\t\tyear++;", "\t\t\ttime -= LEAP_YEAR;", "\t\t}", "", "\t\t// It's a leap year if divisible by 4, unless divisible by 100,", "\t\t// unless divisible by 400.", "\t\tif ((year % 4L) == 0) {", "\t\t\tif ((year % 100L) == 0) {", "\t\t\t\tif ((year % 400L) == 0) {", "\t\t\t\t\tleapYear = true;", "\t\t\t\t}", "\t\t\t}", "\t\t\tleapYear = true;", "\t\t}", "", "\t\t// What day of the year is this, starting at 1?", "\t\tlong days = (time / DAYS) + 1;", "", "\t\t// What month is this, starting at 1?", "\t\tint month = 1;", "\t\tfor (int i = 0; i < DAYS_IN_MONTH.length; i++) {", "\t\t\tint daysInMonth;", "", "\t\t\tif (leapYear && (i == FEBRUARY)) {", "\t\t\t\t// February has 29 days in a leap year", "\t\t\t\tdaysInMonth = 29;", "\t\t\t} else {", "\t\t\t\t// Get number of days in next month", "\t\t\t\tdaysInMonth = DAYS_IN_MONTH[i];", "\t\t\t}", "", "\t\t\t// Is date after the month we are looking at?", "\t\t\tif (days > daysInMonth) {", "\t\t\t\t// Count number of months", "\t\t\t\tmonth++;", "", "\t\t\t\t// Subtract number of days in month", "\t\t\t\tdays -= daysInMonth;", "\t\t\t} else {", "\t\t\t\t// Don't bother to look any more - the date is within", "\t\t\t\t// the current month.", "\t\t\t\tbreak;", "\t\t\t}", "\t\t}", "", "\t\t// How much time is left after days are accounted for?", "\t\ttime %= DAYS;", "", "\t\tlong hours = time / HOURS;", "", "\t\t// How much time is left after hours are accounted for?", "\t\ttime %= HOURS;", "", "\t\tlong minutes = time / MINUTES;", "", "\t\t// How much time is left after minutes are accounted for?", "\t\ttime %= MINUTES;", "", "\t\tlong seconds = time / SECONDS;", "", "\t\t// How much time is left after seconds are accounted for?", "\t\ttime %= SECONDS;" ] }, { "added": [ "\t\t\t\tthreeDigits(millis) + \" GMT\";", "\tprivate static String twoDigits(int val) {", "\t\t\tretval = Integer.toString(val);", "\tprivate static String threeDigits(int val) {" ], "header": "@@ -155,22 +72,22 @@ public class CheapDateFormatter {", "removed": [ "\t\t\t\tthreeDigits(time) + \" GMT\";", "\tprivate static String twoDigits(long val) {", "\t\t\tretval = Long.toString(val);", "\tprivate static String threeDigits(long val) {" ] } ] }, { "file": "java/testing/org/apache/derbyTesting/unitTests/junit/_Suite.java", "hunks": [ { "added": [ " suite.addTest(CheapDateFormatterTest.suite());" ], "header": "@@ -47,6 +47,7 @@ public class _Suite extends BaseTestCase {", "removed": [] } ] } ]
derby-DERBY-4752-6500326f
DERBY-4752: CheapDateFormatter returns incorrect and invalid date strings Removed the CheapDateFormatter class and made the code use java.util.Date and its toString() method instead. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@988874 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/drda/org/apache/derby/impl/drda/NetworkServerControlImpl.java", "hunks": [ { "added": [ "import java.util.Date;" ], "header": "@@ -53,6 +53,7 @@ import java.sql.DriverManager;", "removed": [] }, { "added": [], "header": "@@ -78,13 +79,11 @@ import org.apache.derby.iapi.services.property.PropertyUtil;", "removed": [ "import org.apache.derby.iapi.util.CheapDateFormatter;", "import org.apache.derby.iapi.security.SecurityUtil;" ] }, { "added": [ " lw.println(new Date() + \" : \" + msg);" ], "header": "@@ -604,7 +603,7 @@ public final class NetworkServerControlImpl {", "removed": [ " lw.println(getFormattedTimestamp() + \" : \" + msg);" ] }, { "added": [ " Monitor.logMessage(new Date() + \" : \" + msg);" ], "header": "@@ -616,7 +615,7 @@ public final class NetworkServerControlImpl {", "removed": [ " Monitor.logMessage(getFormattedTimestamp() + \" : \" + msg);" ] } ] }, { "file": "java/engine/org/apache/derby/impl/services/locks/Timeout.java", "hunks": [ { "added": [], "header": "@@ -21,8 +21,6 @@", "removed": [ "import org.apache.derby.impl.services.locks.TableNameInfo;", "" ] }, { "added": [ "import java.util.Date;" ], "header": "@@ -36,8 +34,7 @@ import org.apache.derby.iapi.error.StandardException;", "removed": [ "import org.apache.derby.iapi.util.CheapDateFormatter;", "" ] } ] }, { "file": "java/engine/org/apache/derby/impl/services/monitor/BaseMonitor.java", "hunks": [ { "added": [], "header": "@@ -58,22 +58,14 @@ import org.apache.derby.iapi.services.loader.InstanceGetter;", "removed": [ "", "", "import org.apache.derby.iapi.services.jmx.ManagementService;", "", "import org.apache.derby.impl.services.monitor.StorageFactoryService;", "", "import org.apache.derby.iapi.util.CheapDateFormatter;", "import java.io.StringWriter;" ] }, { "added": [], "header": "@@ -82,10 +74,8 @@ import java.io.ByteArrayInputStream;", "removed": [ "import java.util.Hashtable;", "import java.util.Map;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/services/stream/BasicGetLogHeader.java", "hunks": [ { "added": [ "import java.util.Date;" ], "header": "@@ -21,8 +21,8 @@", "removed": [ "import org.apache.derby.iapi.util.CheapDateFormatter;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/store/raw/data/BaseDataFileFactory.java", "hunks": [ { "added": [], "header": "@@ -24,15 +24,6 @@ package org.apache.derby.impl.store.raw.data;", "removed": [ "import org.apache.derby.impl.store.raw.data.AllocationActions;", "import org.apache.derby.impl.store.raw.data.BaseContainerHandle;", "import org.apache.derby.impl.store.raw.data.BasePage;", "import org.apache.derby.impl.store.raw.data.DirectActions;", "import org.apache.derby.impl.store.raw.data.LoggableActions;", "import org.apache.derby.impl.store.raw.data.PageActions;", "import org.apache.derby.impl.store.raw.data.RecordId;", "import org.apache.derby.impl.store.raw.data.ReclaimSpace;", "" ] }, { "added": [ "import java.util.Date;" ], "header": "@@ -81,20 +72,18 @@ import org.apache.derby.iapi.reference.Property;", "removed": [ "import org.apache.derby.iapi.util.CheapDateFormatter;", "import java.io.UnsupportedEncodingException;", "import java.net.URLDecoder;" ] }, { "added": [ " logMsg(new Date() +" ], "header": "@@ -363,13 +352,12 @@ public class BaseDataFileFactory", "removed": [ "\t\tlong bootTime = System.currentTimeMillis();", "\t\tlogMsg(CheapDateFormatter.formatDate(bootTime) +" ] } ] }, { "file": "java/testing/org/apache/derbyTesting/unitTests/junit/_Suite.java", "hunks": [ { "added": [], "header": "@@ -47,7 +47,6 @@ public class _Suite extends BaseTestCase {", "removed": [ " suite.addTest(CheapDateFormatterTest.suite());" ] } ] } ]
derby-DERBY-4753-7b6ad6da
DERBY-4753: "ERROR 42X01: Syntax error: FALSE." during call to java.sql.DatabaseMetaData.getIndexInfo When recompiling a meta-data query after detecting that its plan is stale, pass down a flag to the compiler to say that it's a meta-data query and that use of internal syntax is allowed. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1570490 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/sql/PreparedStatement.java", "hunks": [ { "added": [ " * @param forMetaData true if this is a meta-data query" ], "header": "@@ -102,6 +102,7 @@ public interface PreparedStatement", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/GenericPreparedStatement.java", "hunks": [ { "added": [], "header": "@@ -47,7 +47,6 @@ import org.apache.derby.iapi.sql.ResultDescription;", "removed": [ "import org.apache.derby.iapi.sql.conn.SQLSessionContext;" ] }, { "added": [ " rePrepare(lcc, false);", " }", "", " public void rePrepare(LanguageConnectionContext lcc, boolean forMetaData)", " throws StandardException {", " PreparedStatement ps = statement.prepare(lcc, forMetaData);" ], "header": "@@ -251,8 +250,13 @@ public class GenericPreparedStatement", "removed": [ "\t\t\tPreparedStatement ps = statement.prepare(lcc);" ] }, { "added": [ " return executeStmt(a, rollbackParentContext, false, timeoutMillis);" ], "header": "@@ -315,7 +319,7 @@ public class GenericPreparedStatement", "removed": [ "\t\treturn executeStmt(a, rollbackParentContext, timeoutMillis);" ] }, { "added": [ " return executeStmt(activation, rollbackParentContext,", " false, timeoutMillis);" ], "header": "@@ -329,7 +333,8 @@ public class GenericPreparedStatement", "removed": [ "\t\treturn executeStmt(activation, rollbackParentContext, timeoutMillis);" ] }, { "added": [ " boolean forMetaData,", " return executeStmt(activation, false, forMetaData, timeoutMillis);" ], "header": "@@ -337,10 +342,11 @@ public class GenericPreparedStatement", "removed": [ "\t\treturn executeStmt(activation, false, timeoutMillis);" ] }, { "added": [ " * @param forMetaData true if this is a meta-data query" ], "header": "@@ -351,6 +357,7 @@ public class GenericPreparedStatement", "removed": [] }, { "added": [ " boolean forMetaData," ], "header": "@@ -358,6 +365,7 @@ public class GenericPreparedStatement", "removed": [] } ] } ]
derby-DERBY-4754-a2a0ff24
DERBY-4754: Make the getObject() methods of Derby's LOBs always return jdbc LOBs. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@982585 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/types/SQLClob.java", "hunks": [ { "added": [ " /**", " * @exception StandardException Thrown on error", " */", " public Object getObject() throws StandardException", " {", " if ( _clobValue != null ) { return _clobValue; }", " else", " {", " String stringValue = getString();", "", " if ( stringValue == null ) { return null; }", " else { return new HarmonySerialClob( stringValue.toCharArray() ); }", " }", " }", "" ], "header": "@@ -332,6 +332,21 @@ public class SQLClob", "removed": [] } ] } ]
derby-DERBY-4764-782dbe17
DERBY-4764: Files with missing ASF license headers Added or modified Apache license headers where missing or old. Patch file: derby-4764-1a-license_headers_trunk.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@984922 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/tools/org/apache/derby/tools/PlanExporter.java", "hunks": [ { "added": [ "/*", "", " Derby - Class org.apache.derby.tools.PlanExporter", "", " Licensed to the Apache Software Foundation (ASF) under one or more", " contributor license agreements. See the NOTICE file distributed with", " this work for additional information regarding copyright ownership.", " The ASF licenses this file to You under the Apache License, Version 2.0", " (the \"License\"); you may not use this file except in compliance with", " the License. You may obtain a copy of the License at", "", " http://www.apache.org/licenses/LICENSE-2.0", "", " Unless required by applicable law or agreed to in writing, software", " distributed under the License is distributed on an \"AS IS\" BASIS,", " WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", " See the License for the specific language governing permissions and", " limitations under the License.", "", " */", "" ], "header": "@@ -1,3 +1,24 @@", "removed": [] } ] } ]
derby-DERBY-4767-4eeab6ca
DERBY-4767; Detailed prompt for Error CSL16 is different between Client and Embedded adding a check for the value of the operation, and fixing up some areas where an incorrect operation value was passed on. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@986145 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/ResultSet.java", "hunks": [ { "added": [ " return getBoolean(findColumnX(columnName, \"getBoolean\"));" ], "header": "@@ -1440,7 +1440,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getBoolean(findColumnX(columnName));" ] }, { "added": [ " return getByte(findColumnX(columnName, \"getByte\"));" ], "header": "@@ -1454,7 +1454,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getByte(findColumnX(columnName));" ] }, { "added": [ " return getShort(findColumnX(columnName, \"getShort\"));" ], "header": "@@ -1468,7 +1468,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getShort(findColumnX(columnName));" ] }, { "added": [ " return getInt(findColumnX(columnName, \"getInt\"));" ], "header": "@@ -1482,7 +1482,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getInt(findColumnX(columnName));" ] }, { "added": [ " return getLong(findColumnX(columnName, \"getLong\"));" ], "header": "@@ -1496,7 +1496,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getLong(findColumnX(columnName));" ] }, { "added": [ " return getFloat(findColumnX(columnName, \"getFloat\"));" ], "header": "@@ -1510,7 +1510,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getFloat(findColumnX(columnName));" ] }, { "added": [ " return getDouble(findColumnX(columnName, \"getDouble\"));" ], "header": "@@ -1524,7 +1524,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getDouble(findColumnX(columnName));" ] }, { "added": [ " return getBigDecimal(findColumnX(columnName, \"getBigDecimal\"), scale);" ], "header": "@@ -1539,7 +1539,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getBigDecimal(findColumnX(columnName), scale);" ] }, { "added": [ " return getBigDecimal(findColumnX(columnName, \"getBigDecimal\"));" ], "header": "@@ -1553,7 +1553,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getBigDecimal(findColumnX(columnName));" ] }, { "added": [ " return getDate(findColumnX(columnName, \"getDate\"));" ], "header": "@@ -1567,7 +1567,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getDate(findColumnX(columnName));" ] }, { "added": [ " return getDate(findColumnX(columnName, \"getDate\"), cal);" ], "header": "@@ -1581,7 +1581,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getDate(findColumnX(columnName), cal);" ] }, { "added": [ " return getTime(findColumnX(columnName, \"getTime\"));" ], "header": "@@ -1595,7 +1595,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getTime(findColumnX(columnName));" ] }, { "added": [ " return getTime(findColumnX(columnName, \"getTime\"), cal);" ], "header": "@@ -1609,7 +1609,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getTime(findColumnX(columnName), cal);" ] }, { "added": [ " return getTimestamp(findColumnX(columnName, \"getTimestamp\"));" ], "header": "@@ -1623,7 +1623,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getTimestamp(findColumnX(columnName));" ] }, { "added": [ " return getTimestamp(findColumnX(columnName, \"getTimestamp\"), cal);" ], "header": "@@ -1637,7 +1637,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getTimestamp(findColumnX(columnName), cal);" ] }, { "added": [ " return getString(findColumnX(columnName, \"getString\"));" ], "header": "@@ -1651,7 +1651,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getString(findColumnX(columnName));" ] }, { "added": [ " return getBytes(findColumnX(columnName, \"getBytes\"));" ], "header": "@@ -1665,7 +1665,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getBytes(findColumnX(columnName));" ] }, { "added": [ " return getBinaryStream(findColumnX(columnName, \"getBinaryStream\"));" ], "header": "@@ -1679,7 +1679,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getBinaryStream(findColumnX(columnName));" ] }, { "added": [ " return getAsciiStream(findColumnX(columnName, \"getAsciiStream\"));" ], "header": "@@ -1693,7 +1693,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getAsciiStream(findColumnX(columnName));" ] }, { "added": [ " return getUnicodeStream(findColumnX(columnName, \"getUnicodeStream\"));" ], "header": "@@ -1708,7 +1708,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getUnicodeStream(findColumnX(columnName));" ] }, { "added": [ " return getCharacterStream(findColumnX(columnName, \"getCharacterStream\"));" ], "header": "@@ -1722,7 +1722,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getCharacterStream(findColumnX(columnName));" ] }, { "added": [ " return getBlob(findColumnX(columnName, \"getBlob\"));" ], "header": "@@ -1736,7 +1736,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getBlob(findColumnX(columnName));" ] }, { "added": [ " return getClob(findColumnX(columnName, \"getClob\"));" ], "header": "@@ -1750,7 +1750,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getClob(findColumnX(columnName));" ] }, { "added": [ " return getArray(findColumnX(columnName, \"getArray\"));" ], "header": "@@ -1764,7 +1764,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getArray(findColumnX(columnName));" ] }, { "added": [ " return getRef(findColumnX(columnName, \"getRef\"));" ], "header": "@@ -1778,7 +1778,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getRef(findColumnX(columnName));" ] }, { "added": [ " return getObject(findColumnX(columnName, \"getObject\"));" ], "header": "@@ -1792,7 +1792,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getObject(findColumnX(columnName));" ] }, { "added": [ " return getObject(findColumnX(columnName, \"getObject\"), map);" ], "header": "@@ -1806,7 +1806,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " return getObject(findColumnX(columnName), map);" ] }, { "added": [ " int column = findColumnX(columnName, \"findColumn\");" ], "header": "@@ -1924,7 +1924,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " int column = findColumnX(columnName);" ] }, { "added": [ " private final int findColumnX(String columnName, String operation) throws SqlException {", " checkForClosedResultSet(operation);" ], "header": "@@ -1938,8 +1938,8 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " private final int findColumnX(String columnName) throws SqlException {", " checkForClosedResultSet(\"findColumn\");" ] }, { "added": [ " updateNull(findColumnX(columnName, \"updateNull\"));" ], "header": "@@ -3125,7 +3125,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateNull(findColumnX(columnName));" ] }, { "added": [ " updateBoolean(findColumnX(columnName, \"updateBoolean\"), x);" ], "header": "@@ -3139,7 +3139,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateBoolean(findColumnX(columnName), x);" ] }, { "added": [ " updateByte(findColumnX(columnName, \"updateByte\"), x);" ], "header": "@@ -3153,7 +3153,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateByte(findColumnX(columnName), x);" ] }, { "added": [ " updateShort(findColumnX(columnName, \"updateShort\"), x);" ], "header": "@@ -3167,7 +3167,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateShort(findColumnX(columnName), x);" ] }, { "added": [ " updateInt(findColumnX(columnName, \"updateInt\"), x);" ], "header": "@@ -3181,7 +3181,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateInt(findColumnX(columnName), x);" ] }, { "added": [ " updateLong(findColumnX(columnName, \"updateLong\"), x);" ], "header": "@@ -3195,7 +3195,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateLong(findColumnX(columnName), x);" ] }, { "added": [ " updateFloat(findColumnX(columnName, \"updateFloat\"), x);" ], "header": "@@ -3209,7 +3209,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateFloat(findColumnX(columnName), x);" ] }, { "added": [ " updateDouble(findColumnX(columnName, \"updateDouble\"), x);" ], "header": "@@ -3223,7 +3223,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateDouble(findColumnX(columnName), x);" ] }, { "added": [ " updateBigDecimal(findColumnX(columnName, \"updateBigDecimal\"), x);" ], "header": "@@ -3237,7 +3237,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateBigDecimal(findColumnX(columnName), x);" ] }, { "added": [ " updateDate(findColumnX(columnName, \"updateDate\"), x);" ], "header": "@@ -3251,7 +3251,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateDate(findColumnX(columnName), x);" ] }, { "added": [ " updateTime(findColumnX(columnName, \"updateTime\"), x);" ], "header": "@@ -3265,7 +3265,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateTime(findColumnX(columnName), x);" ] }, { "added": [ " updateTimestamp(findColumnX(columnName, \"updateTimestamp\"), x);" ], "header": "@@ -3279,7 +3279,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateTimestamp(findColumnX(columnName), x);" ] }, { "added": [ " updateString(findColumnX(columnName, \"updateString\"), x);" ], "header": "@@ -3293,7 +3293,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateString(findColumnX(columnName), x);" ] }, { "added": [ " updateBytes(findColumnX(columnName, \"updateBytes\"), x);" ], "header": "@@ -3307,7 +3307,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateBytes(findColumnX(columnName), x);" ] }, { "added": [ " updateBinaryStream(findColumnX(columnName, \"updateBinaryStream\"), x, length);" ], "header": "@@ -3323,7 +3323,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateBinaryStream(findColumnX(columnName), x, length);" ] }, { "added": [ " updateAsciiStream(findColumnX(columnName, \"updateAsciiStream\"), x, length);" ], "header": "@@ -3339,7 +3339,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateAsciiStream(findColumnX(columnName), x, length);" ] }, { "added": [ " updateCharacterStream(findColumnX(columnName, \"updateCharacterStream\"), x, length);" ], "header": "@@ -3355,7 +3355,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateCharacterStream(findColumnX(columnName), x, length);" ] }, { "added": [ " updateObject(findColumnX(columnName, \"updateObject\"), x, scale);" ], "header": "@@ -3369,7 +3369,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateObject(findColumnX(columnName), x, scale);" ] }, { "added": [ " updateObject(findColumnX(columnName, \"updateObject\"), x);" ], "header": "@@ -3383,7 +3383,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateObject(findColumnX(columnName), x);" ] }, { "added": [ " updateBlob(findColumnX(columnName, \"updateBlob\"), x);" ], "header": "@@ -3879,7 +3879,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateBlob(findColumnX(columnName), x);" ] }, { "added": [ " updateBlob(findColumnX(columnName, \"updateBlob\"), x, length);" ], "header": "@@ -3943,7 +3943,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateBlob(findColumnX(columnName), x, length);" ] }, { "added": [ " updateAsciiStream(findColumnX(columnName, \"updateAsciiStream\"), x);" ], "header": "@@ -5853,7 +5853,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateAsciiStream(findColumnX(columnName), x);" ] }, { "added": [ " updateAsciiStream(findColumnX(columnName, \"updateAsciiStream\"), x, length);" ], "header": "@@ -5879,7 +5879,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateAsciiStream(findColumnX(columnName), x, length);" ] }, { "added": [ " updateBinaryStream(findColumnX(columnLabel, \"updateBinaryStream\"), x);" ], "header": "@@ -5908,7 +5908,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateBinaryStream(findColumnX(columnLabel), x);" ] }, { "added": [ " updateBinaryStream(findColumnX(columnName, \"updateBinaryStream\"), x, length);" ], "header": "@@ -5934,7 +5934,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateBinaryStream(findColumnX(columnName), x, length);" ] }, { "added": [ " updateBlob(findColumnX(columnLabel, \"updateBlob\"), x);" ], "header": "@@ -5963,7 +5963,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateBlob(findColumnX(columnLabel), x);" ] }, { "added": [ " updateCharacterStream(findColumnX(columnLabel, \"updateCharacterStream\"), reader);" ], "header": "@@ -5991,7 +5991,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateCharacterStream(findColumnX(columnLabel), reader);" ] }, { "added": [ " updateCharacterStream(findColumnX(columnName, \"updateCharacterStream\"), reader, length);" ], "header": "@@ -6017,7 +6017,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateCharacterStream(findColumnX(columnName), reader, length);" ] }, { "added": [ " updateClob(findColumnX(columnLabel, \"updateClob\"), reader);" ], "header": "@@ -6049,7 +6049,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateClob(findColumnX(columnLabel), reader);" ] }, { "added": [ " updateClob(findColumnX(columnName, \"updateClob\"), x);" ], "header": "@@ -6128,7 +6128,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " updateClob(findColumnX(columnName), x);" ] } ] } ]
derby-DERBY-4767-c9687fc4
DERBY-4767; Detailed prompt for Error XCL16 is different between Client and Embedded patch contributed by Yun Lee (yun dot lee dot bj at gmail dot com) git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@985550 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/ResultSet.java", "hunks": [ { "added": [ " checkForClosedResultSet(\"next\");" ], "header": "@@ -293,7 +293,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"wasNull\");" ], "header": "@@ -543,7 +543,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkGetterPreconditions(column, \"getBoolean\");" ], "header": "@@ -572,7 +572,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getByte\");" ], "header": "@@ -606,7 +606,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getShort\");" ], "header": "@@ -640,7 +640,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getInt\");" ], "header": "@@ -674,7 +674,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getLong\");" ], "header": "@@ -708,7 +708,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getFloat\");" ], "header": "@@ -742,7 +742,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getDouble\");" ], "header": "@@ -776,7 +776,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getBigDecimal\");" ], "header": "@@ -811,7 +811,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getBigDecimal\");" ], "header": "@@ -843,7 +843,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getDate\");" ], "header": "@@ -873,7 +873,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getTime\");" ], "header": "@@ -916,7 +916,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getTimestamp\");" ], "header": "@@ -961,7 +961,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getString\");" ], "header": "@@ -1052,7 +1052,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getBytes\");" ], "header": "@@ -1080,7 +1080,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getBinaryStream\");" ], "header": "@@ -1109,7 +1109,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getAsciiStream\");" ], "header": "@@ -1140,7 +1140,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getCharacterStream\");" ], "header": "@@ -1191,7 +1191,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getBlob\");" ], "header": "@@ -1222,7 +1222,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getClob\");" ], "header": "@@ -1252,7 +1252,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getRef\");" ], "header": "@@ -1282,7 +1282,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getArray\");" ], "header": "@@ -1309,7 +1309,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getObject\");" ], "header": "@@ -1350,7 +1350,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkGetterPreconditions(column, \"getObject\");" ], "header": "@@ -1370,7 +1370,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkGetterPreconditions(column);" ] }, { "added": [ " checkForClosedResultSet(\"getWarnings\");" ], "header": "@@ -1829,7 +1829,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"clearWarnings\");" ], "header": "@@ -1853,7 +1853,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"getCursorName\");" ], "header": "@@ -1873,7 +1873,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"getMetaData\");" ], "header": "@@ -1912,7 +1912,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"findColumn\");" ], "header": "@@ -1939,7 +1939,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"isBeforeFirst\");" ], "header": "@@ -1951,7 +1951,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"isAfterLast\");" ], "header": "@@ -1982,7 +1982,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"isFirst\");" ], "header": "@@ -2015,7 +2015,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"isLast\");" ], "header": "@@ -2044,7 +2044,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"beforeFirst\");" ], "header": "@@ -2075,7 +2075,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"afterLast\");" ], "header": "@@ -2116,7 +2116,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"first\");" ], "header": "@@ -2170,7 +2170,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"last\");" ], "header": "@@ -2225,7 +2225,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"getRow\");" ], "header": "@@ -2290,7 +2290,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"absolute\");" ], "header": "@@ -2342,7 +2342,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"relative\");" ], "header": "@@ -2427,7 +2427,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"previous\");" ], "header": "@@ -2553,7 +2553,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"setFetchDirection\");" ], "header": "@@ -2603,7 +2603,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"getFetchDirection\");" ], "header": "@@ -2628,7 +2628,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"setFetchSize\");" ], "header": "@@ -2647,7 +2647,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"getFetchSize\");" ], "header": "@@ -2668,7 +2668,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"getType\");" ], "header": "@@ -2683,7 +2683,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"getConcurrency\");" ], "header": "@@ -2698,7 +2698,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"rowUpdated\");" ], "header": "@@ -2712,7 +2712,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"rowInserted\");" ], "header": "@@ -2731,7 +2731,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"rowDeleted\");" ], "header": "@@ -2753,7 +2753,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"insertRow\");", " checkForUpdatableResultSet(\"insertRow\");" ], "header": "@@ -3420,8 +3420,8 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();", "\tcheckForUpdatableResultSet(\"insertRow\");" ] }, { "added": [ " checkForClosedResultSet(\"updateRow\");" ], "header": "@@ -3491,7 +3491,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"deleteRow\");" ], "header": "@@ -3611,7 +3611,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"refreshRow\");" ], "header": "@@ -3666,7 +3666,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"cancelRowUpdates\");" ], "header": "@@ -3695,7 +3695,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"moveToInsertRow\");" ], "header": "@@ -3723,7 +3723,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"moveToCurrentRow\");" ], "header": "@@ -3746,7 +3746,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(\"getStatement\");" ], "header": "@@ -3796,7 +3796,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " private final void checkGetterPreconditions(int column, String operation)", " throws SqlException {", " checkForClosedResultSet(operation);" ], "header": "@@ -4715,8 +4715,9 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " private final void checkGetterPreconditions(int column) throws SqlException {", " checkForClosedResultSet();" ] }, { "added": [ " checkForClosedResultSet(operation);" ], "header": "@@ -4725,7 +4726,7 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " checkForClosedResultSet();" ] }, { "added": [ " protected final void checkForClosedResultSet(String operation)", " throws SqlException {", " throw new SqlException(agent_.logWriter_, new ClientMessageId(", " SQLState.LANG_RESULT_SET_NOT_OPEN), operation);" ], "header": "@@ -4753,11 +4754,12 @@ public abstract class ResultSet implements java.sql.ResultSet,", "removed": [ " protected final void checkForClosedResultSet() throws SqlException {", " throw new SqlException(agent_.logWriter_, ", " new ClientMessageId(SQLState.CLIENT_RESULT_SET_NOT_OPEN));" ] } ] }, { "file": "java/client/org/apache/derby/client/net/NetResultSet40.java", "hunks": [ { "added": [ " checkForClosedResultSet(\"isWrapperFor\");" ], "header": "@@ -160,7 +160,7 @@ public class NetResultSet40 extends NetResultSet{", "removed": [ " checkForClosedResultSet();" ] } ] } ]
derby-DERBY-4769-c9528432
DERBY-4769 Handle interrupt received while waiting for database lock (subtask of DERBY-4741): Patch derby-4967-locking-1 which lets Derby throw CONN_INTERRUPT if an interrupt is received while waiting for a database lock. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1058245 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/services/locks/LockSet.java", "hunks": [ { "added": [ " // If we were not woken by another then we have timed", " // out. Either deadlock out or timeout. Or thread has", " // been interrupted." ], "header": "@@ -346,8 +346,9 @@ forever:\tfor (;;) {", "removed": [ " // If we were not woken by another then we have", " // timed out. Either deadlock out or timeout" ] }, { "added": [ " ", " // ending wait because of lock timeout or interrupt", " if (wakeupReason == Constants.WAITING_LOCK_INTERRUPTED) {", " Thread.currentThread().interrupt();", " throw StandardException.newException(SQLState.CONN_INTERRUPT);", "", " } else if (deadlockTrace)" ], "header": "@@ -405,11 +406,16 @@ forever:\tfor (;;) {", "removed": [ " // ending wait because of lock timeout.", " if (deadlockTrace)" ] } ] } ]
derby-DERBY-4771-174db5a9
DERBY-4771: Continue investigation of automatic creation/update of index statistics. Removed unused method getRowEstimate. Spotted by Dag H. Wanvik (dag dot wanvik at oracle dot com). git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1042461 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/QueryTreeNode.java", "hunks": [ { "added": [], "header": "@@ -579,23 +579,6 @@ public abstract class QueryTreeNode implements Visitable", "removed": [ "\t/**", "\t * Get the optimizer's estimate of the number of rows returned or affected", "\t * for an optimized QueryTree.", "\t *", "\t * For non-optimizable statements (for example, CREATE TABLE),", "\t * return 0. For optimizable statements, this method will be", "\t * over-ridden in the statement's root node (DMLStatementNode", "\t * in all cases we know about so far).", "\t *", "\t * @return\t0L", "\t */", "", "\tpublic long\tgetRowEstimate()", "\t{", "\t\treturn\t0L;", "\t}", "" ] } ] } ]
derby-DERBY-4772-4771f1f4
DERBY-4772: Data truncation error with XPLAIN-functionality enabled Increased max length of string fields with undefined max lenghts to the maximum allowed length of the VARCHAR data type (and also changed the data type from CHAR to VARCHAR). Patch file: derby-4772-1b-increase_max_len.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1033864 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/catalog/XPLAINResultSetDescriptor.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.types.TypeId;" ], "header": "@@ -26,7 +26,7 @@ import java.sql.SQLException;", "removed": [ "import org.apache.derby.impl.sql.catalog.SystemColumnImpl;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/catalog/XPLAINScanPropsDescriptor.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.types.TypeId;" ], "header": "@@ -26,7 +26,7 @@ import java.sql.SQLException;", "removed": [ "import org.apache.derby.impl.sql.catalog.SystemColumnImpl;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/catalog/XPLAINSortPropsDescriptor.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.types.TypeId;", "" ], "header": "@@ -26,11 +26,11 @@ import java.sql.SQLException;", "removed": [ "import org.apache.derby.impl.sql.catalog.SystemColumnImpl;", " *" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/catalog/XPLAINStatementDescriptor.java", "hunks": [ { "added": [], "header": "@@ -27,7 +27,6 @@ import java.sql.Timestamp;", "removed": [ "import org.apache.derby.impl.sql.catalog.SystemColumnImpl;" ] }, { "added": [ " SystemColumnImpl.getColumn(\"JVM_ID\", Types.VARCHAR, false,", " TypeId.VARCHAR_MAXWIDTH),", " SystemColumnImpl.getColumn(\"OS_IDENTIFIER\", Types.VARCHAR, false,", " TypeId.VARCHAR_MAXWIDTH),", " SystemColumnImpl.getColumn(\"XPLAIN_THREAD_ID\", Types.VARCHAR, false,", " TypeId.VARCHAR_MAXWIDTH),", " SystemColumnImpl.getColumn(\"TRANSACTION_ID\", Types.VARCHAR, false,", " TypeId.VARCHAR_MAXWIDTH),", " SystemColumnImpl.getColumn(\"SESSION_ID\", Types.VARCHAR, false,", " TypeId.VARCHAR_MAXWIDTH),", " SystemColumnImpl.getColumn(\"DRDA_ID\", Types.VARCHAR, true,", " TypeId.VARCHAR_MAXWIDTH)," ], "header": "@@ -122,15 +121,21 @@ public class XPLAINStatementDescriptor extends XPLAINTableDescriptor", "removed": [ " SystemColumnImpl.getColumn(\"JVM_ID\", Types.CHAR, false, 30),", " SystemColumnImpl.getColumn(\"OS_IDENTIFIER\", Types.CHAR, false, 30),", " SystemColumnImpl.getColumn(\"XPLAIN_THREAD_ID\", Types.CHAR, false, 32),", " SystemColumnImpl.getColumn(\"TRANSACTION_ID\", Types.CHAR, false, 32),", " SystemColumnImpl.getColumn(\"SESSION_ID\", Types.CHAR, false, 32),", " SystemColumnImpl.getColumn(\"DRDA_ID\", Types.CHAR, true, 32)," ] } ] } ]
derby-DERBY-4772-f1c83832
DERBY-4772 (cleanup): Data truncation error with XPLAIN-functionality enabled Cleaned up a bunch of unused imports, some unused variables and removed an Exception.printStackTrace(). git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1027921 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/execute/xplain/XPLAINSystemTableVisitor.java", "hunks": [ { "added": [], "header": "@@ -28,22 +28,15 @@ import java.sql.Timestamp;", "removed": [ "import java.util.Properties;", "import org.apache.derby.jdbc.InternalDriver;", "import org.apache.derby.impl.jdbc.Util;", "import org.apache.derby.iapi.services.io.FormatableProperties;", "import org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator;", "import org.apache.derby.iapi.sql.dictionary.SchemaDescriptor;", "import org.apache.derby.iapi.sql.dictionary.TupleDescriptor;" ] }, { "added": [], "header": "@@ -52,38 +45,6 @@ import org.apache.derby.impl.sql.catalog.XPLAINStatementDescriptor;", "removed": [ "import org.apache.derby.iapi.store.access.TransactionController;", "import org.apache.derby.impl.sql.compile.IntersectOrExceptNode;", "import org.apache.derby.impl.sql.execute.rts.RealAnyResultSetStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealDeleteCascadeResultSetStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealDeleteResultSetStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealDeleteVTIResultSetStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealDistinctScalarAggregateStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealDistinctScanStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealGroupedAggregateStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealHashJoinStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealHashLeftOuterJoinStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealHashScanStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealHashTableStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealIndexRowToBaseRowStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealInsertResultSetStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealInsertVTIResultSetStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealLastIndexKeyScanStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealMaterializedResultSetStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealNestedLoopJoinStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealNestedLoopLeftOuterJoinStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealNormalizeResultSetStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealOnceResultSetStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealProjectRestrictStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealRowResultSetStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealScalarAggregateStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealScrollInsensitiveResultSetStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealSetOpResultSetStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealSortStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealTableScanStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealUnionResultSetStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealUpdateResultSetStatistics;", "import org.apache.derby.impl.sql.execute.rts.RealVTIStatistics;" ] }, { "added": [], "header": "@@ -104,8 +65,6 @@ public class XPLAINSystemTableVisitor implements XPLAINVisitor {", "removed": [ " private TransactionController tc;", " private DataDescriptorGenerator ddg;" ] }, { "added": [], "header": "@@ -217,8 +176,6 @@ public class XPLAINSystemTableVisitor implements XPLAINVisitor {", "removed": [ " tc = lcc.getTransactionExecute();", " ddg = dd.getDataDescriptorGenerator(); " ] }, { "added": [], "header": "@@ -324,7 +281,6 @@ public class XPLAINSystemTableVisitor implements XPLAINVisitor {", "removed": [ " e.printStackTrace();" ] }, { "added": [], "header": "@@ -347,7 +303,6 @@ public class XPLAINSystemTableVisitor implements XPLAINVisitor {", "removed": [ " tc = null;" ] } ] } ]
derby-DERBY-4779-d9a720b2
DERBY-4779: Commit patch contributed by Siddharth Srivastava which eliminates an NPE caused by the interaction of generated columns and triggers. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1140222 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4781-11ab591f
DERBY-4781 suites.All fails completely with Failed to invoke suite():java.lang.reflect.InvocationTargetException because of XplainStatisticsTest Change getADocument() to return an Object instead of Document so weme does not fail to load the class. Contributed by Knut Anders Hatlen git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@988321 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4781-158ccc5d
DERBY-4781: suites.All fails completely with Failed to invoke suite():java.lang.reflect.InvocationTargetException because of XplainStatisticsTest Fixed replace() method. The old version would sometimes erroneously return an empty string. Patch contributed by Nirmal Fernando <nirmal070125@gmail.com>. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@989036 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/tools/org/apache/derby/impl/tools/planexporter/AccessDatabase.java", "hunks": [ { "added": [ " \t int idx = stmt.indexOf(expr);", " \t while (idx >= 0)", " \t {", " \t stmt = stmt.substring(0, idx) + replace + stmt.substring(idx+1);", " \t idx = stmt.indexOf(expr);", " \t }", " \t return stmt;" ], "header": "@@ -460,15 +460,13 @@ public class AccessDatabase {", "removed": [ " \tif(stmt.indexOf(expr)!=-1){", " \t\tstmt=stmt.substring(0, stmt.indexOf(expr))", " \t\t+replace+stmt.substring(stmt.indexOf(expr)+1);", " \t\treplace(stmt,expr,replace);", " \t\treturn \"\";", " \t}", " \telse{", " \t\treturn stmt;", " \t}" ] } ] } ]
derby-DERBY-4781-b1b255cc
DERBY-4781 (partial) Fix XplainStatisticsTest failure with J2ME on split method introduced by DERBY-4587 Contributed by C.S. Nirmal J Fernando ( nirmal070125 at gmail dot com ) git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@988264 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/tools/org/apache/derby/impl/tools/planexporter/AccessDatabase.java", "hunks": [ { "added": [ " \tif(stmt.indexOf(expr)!=-1){", " \t\tstmt=stmt.substring(0, stmt.indexOf(expr))", " \t\t+replace+stmt.substring(stmt.indexOf(expr)+1);", " \t\treplace(stmt,expr,replace);", " \t\treturn \"\";", " \t}", " \telse{", " \t\treturn stmt;", " \t}", " " ], "header": "@@ -460,15 +460,17 @@ public class AccessDatabase {", "removed": [ " String[] part=stmt.split(expr);", " String newStmt= part[0];", " for(int i=1;i<part.length;i++){", " newStmt += \" \"+replace+\" \"+part[i];", " }", "", " return newStmt;", "" ] } ] } ]
derby-DERBY-4786-04243ad4
DERBY-4786 (Shutdown command without username and password should work with mixed client and network server releases.) This change will send shutdown command with protocol level 2 whether there is username or password provided or not. If this command fails because of DRDA_InvalidReplyHead from server, then it will resend the shutdown command but this time with protocol level 1. The attempt with protocol level 1 will be made only if there was no username and password supplied with the shutdown command because shutdown command at protocol level 1 does not support username/password. If a client with these changes sends a shutdown command to a 10.3 server(which does not have DERBY-2109 changes) without the username and password, it will get 2 exceptions back from the server. 1st exception will be for sending the shutdown command at protocol level 2 and the 2nd will be for shutting down the server succesfully. Server will also have 2 messages, first indicating that an invalid protocol level command was received and 2nd message saying that the server is shutdown. I will create a release note for this behavior. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@999119 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/drda/org/apache/derby/impl/drda/NetworkServerControlImpl.java", "hunks": [ { "added": [ "\t//All the commands except shutdown with username and password are at ", "\t//protocol level 1. ", "\tprivate final static int DEFAULT_PROTOCOL_VERSION = 1;", "\t// DERBY-2109: shutdown command now transmits optional user credentials", "\t//For shutdown with username/password, we have added a new protocol level", "\tprivate final static int SHUTDOWN_WITH_CREDENTIAL_PROTOCOL_VERSION = 2;", "\t//The highest protocol level is 2. The reason for it to be at 2 is ", "\t//the shutdown command with username/password", "\tprivate final static int MAX_ALLOWED_PROTOCOL_VERSION = 2;", "" ], "header": "@@ -129,10 +129,16 @@ public final class NetworkServerControlImpl {", "removed": [ "\t// command protocol version - you need to increase this number each time", "\t// the command protocol changes ", " // DERBY-2109: shutdown command now transmits user credentials", "\tprivate final static int PROTOCOL_VERSION = 2;" ] }, { "added": [ " try {", " writeCommandHeader(COMMAND_SHUTDOWN, SHUTDOWN_WITH_CREDENTIAL_PROTOCOL_VERSION);", " // DERBY-2109: transmit user credentials for System Privileges check", " writeLDString(userArg);", " writeLDString(passwordArg);", " send();", " readResult();", " } catch (Exception e) {", " \t//The shutdown command with protocol level 2 failed. If ", " \t//the username or password were supplied then we can't ", " \t//try the shutdown with protocol level 1 because protocol", " \t//leve 1 does not support username/password. Because of", " \t//that, we should simply throw the caught exception to the", " \t//client", " \tif(userArg != null || passwordArg != null)", " \t\tthrow e;", " //If no username and password is specified then we can try", " \t//shutdown with the old protocol level of 1 which is the ", " \t//default protocol level. But this can be tried only if the", " \t//exception for attempt of shutdown with protocol level 2", " \t//was DRDA_InvalidReplyHead. This can happen if we are ", " \t//dealing with an older Network server product which do not", " \t//recognize shutdown at protocol level 2.", " \tif (e.getMessage().indexOf(\"DRDA_InvalidReplyHead\") != -1)", " \t{", " try {", " closeSocket();", " setUpSocket();", " writeCommandHeader(COMMAND_SHUTDOWN);", " send();", " readResult();", " } catch (Exception e1) {", " \te1.initCause(e);", " \tthrow e1;", " }", " \t}", " \telse", " \t\tthrow e;", " }" ], "header": "@@ -1028,12 +1034,45 @@ public final class NetworkServerControlImpl {", "removed": [ " writeCommandHeader(COMMAND_SHUTDOWN);", " // DERBY-2109: transmit user credentials for System Privileges check", " writeLDString(userArg);", " writeLDString(passwordArg);", " send();", " readResult();" ] }, { "added": [ "\t\t\tif (version <= 0 || version > MAX_ALLOWED_PROTOCOL_VERSION)" ], "header": "@@ -1612,7 +1651,7 @@ public final class NetworkServerControlImpl {", "removed": [ "\t\t\tif (version <= 0 || version > PROTOCOL_VERSION)" ] }, { "added": [ "\t\t\t\t\tif (version == SHUTDOWN_WITH_CREDENTIAL_PROTOCOL_VERSION) {", "\t\t\t\t\t\t//Protocol version of client is not at default protocol", "\t\t\t\t\t\t//of 1 because this version of shutdown command has", "\t\t\t\t\t\t//username and password supplied with it. When the", "\t\t\t\t\t\t//protocol version of client is ", "\t\t\t\t\t\t//SHUTDOWN_WITH_CREDENTIAL_PROTOCOL_VERSION, then we ", "\t\t\t\t\t\t//know to expect username and password", "\t\t\t\t\t\t// DERBY-2109: receive user credentials for shutdown", "\t\t\t\t\t\t// System Privileges check", "\t\t\t\t\t\tuserArg = reader.readCmdString();", "\t\t\t\t\t\tpasswordArg = reader.readCmdString();", "\t\t\t\t\t}" ], "header": "@@ -1645,10 +1684,18 @@ public final class NetworkServerControlImpl {", "removed": [ "\t\t\t\t\t// DERBY-2109: receive user credentials for shutdown", "\t\t\t\t\t// System Privileges check", "\t\t\t\t\tuserArg = reader.readCmdString();", "\t\t\t\t\tpasswordArg = reader.readCmdString();" ] }, { "added": [ "\t * Write command header consisting of command header string and default", "\t * protocol version and command. At this point, all the commands except", "\t * shutdown with username/passwrod use default protocol version.", "\t{", "\t\twriteCommandHeader(command, DEFAULT_PROTOCOL_VERSION);", "\t}", "\t", "\t/**", "\t * Write command header consisting of command header string and passed", "\t * protocol version and command. At this point, all the commands except", "\t * shutdown with username/passwrod use default protocol version.", "\t *", "\t * @param command\tcommand to be written", "\t * @param protocol_version_for_command protocol version to be used", "\t * for the given command", "\t *", "\t * @exception Exception\tthrows an exception if an error occurs", "\t */", "\tprivate void writeCommandHeader(int command, int protocol_version_for_command) throws Exception", "\t\t\tcommandOs.writeByte((byte)((protocol_version_for_command & 0xf0) >> 8 ));", "\t\t\tcommandOs.writeByte((byte)(protocol_version_for_command & 0x0f));" ], "header": "@@ -2612,19 +2659,36 @@ public final class NetworkServerControlImpl {", "removed": [ "\t * Write command header consisting of command header string and protocol", "\t * version and command", "\t\t\tcommandOs.writeByte((byte)((PROTOCOL_VERSION & 0xf0) >> 8 ));", "\t\t\tcommandOs.writeByte((byte)(PROTOCOL_VERSION & 0x0f));" ] } ] } ]
derby-DERBY-4789-66f38fa6
DERBY-4789 Add more tests for bulk insert on self join, union, left outer join and expected results. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@996700 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4789-d7ee7fac
DERBY-4789: Attempt bulk-insert optimization when inserting from a table function. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@993374 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-479-3905037d
DERBY-479 Fix linkage error when passing the value of a RETURNS NULL ON NULL INPUT function to another function. Fixed by only removing SQLToJava/JavaToSQL nodes for the function's return value when the function is a CALLED ON NULL INPUT function. Fix contributed by Mamta Satoor - msatoor@gmail.com git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@381553 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/MethodCallNode.java", "hunks": [ { "added": [ "\t\t\t** Since we need the parameter to be in Java domain format, put a", "\t\t\t** SQLToJavaValueNode on top of the parameter node if it is a ", "\t\t\t** SQLValueNode. But if the parameter is already in Java domain ", "\t\t\t** format, then we don't need to do anything.", "\t\t\t\tqt = (SQLToJavaValueNode) getNodeFactory().getNode(", "\t\t\t\t\t\tC_NodeTypes.SQL_TO_JAVA_VALUE_NODE, ", "\t\t\t\t\t\tqt, ", "\t\t\t\t\t\tgetContextManager());" ], "header": "@@ -200,32 +200,18 @@ abstract class MethodCallNode extends JavaValueNode", "removed": [ "", "", "\t\t\t** If the parameter is a SQL ValueNode, there are two", "\t\t\t** possibilities. Either it is a JavaValueNode with", "\t\t\t** a JavaToSQLValueNode on top of it, or it is a plain", "\t\t\t** SQL ValueNode. In the former case, just get rid of", "\t\t\t** the JavaToSQLValueNode. In the latter case, put a", "\t\t\t** SQLToJavaValueNode on top of it. In general, we", "\t\t\t** want to avoid converting the same value back and forth", "\t\t\t** between the SQL and Java domains.", "\t\t\t\tif (qt instanceof JavaToSQLValueNode)", "\t\t\t\t{", "\t\t\t\t\tqt = ((JavaToSQLValueNode) qt).getJavaValueNode();", "\t\t\t\t}", "\t\t\t\telse", "\t\t\t\t{", "\t\t\t\t\tqt = (SQLToJavaValueNode) getNodeFactory().", "\t\t\t\t\t\t\tgetNode(", "\t\t\t\t\t\t\t\tC_NodeTypes.SQL_TO_JAVA_VALUE_NODE,", "\t\t\t\t\t\t\t\tqt,", "\t\t\t\t\t\t\t\tgetContextManager());", "\t\t\t\t}" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/StaticMethodCallNode.java", "hunks": [ { "added": [ "\t\t\tif (methodParms != null) ", "\t\t\t\toptimizeDomainValueConversion();", "\t\t\t" ], "header": "@@ -240,6 +240,9 @@ public class StaticMethodCallNode extends MethodCallNode", "removed": [] } ] } ]
derby-DERBY-4791-19984411
DERBY-4791 (partial) LIKE operator optimizations and concatenation Made ConcatenationOperatorNode capable of constant folding so that for example 'ab' || '%' can be handled the same way as 'ab%'. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@993074 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/ConcatenationOperatorNode.java", "hunks": [ { "added": [ " /**", " * Check if this node always evaluates to the same value. If so, return", " * a constant node representing the known result.", " *", " * @return a constant node representing the result of this concatenation", " * operation, or {@code this} if the result is not known up front", " */", " ValueNode evaluateConstantExpressions() throws StandardException {", " if (leftOperand instanceof CharConstantNode &&", " rightOperand instanceof CharConstantNode) {", " CharConstantNode leftOp = (CharConstantNode) leftOperand;", " CharConstantNode rightOp = (CharConstantNode) rightOperand;", " StringDataValue leftValue = (StringDataValue) leftOp.getValue();", " StringDataValue rightValue = (StringDataValue) rightOp.getValue();", "", " StringDataValue resultValue =", " (StringDataValue) getTypeServices().getNull();", " resultValue.concatenate(leftValue, rightValue, resultValue);", "", " return (ValueNode) getNodeFactory().getNode(", " C_NodeTypes.CHAR_CONSTANT_NODE,", " resultValue.getString(),", " getContextManager());", " }", "", " return this;", " }", "" ], "header": "@@ -65,6 +65,34 @@ public class ConcatenationOperatorNode extends BinaryOperatorNode {", "removed": [] } ] } ]
derby-DERBY-4798-a823c6a8
DERBY-4712 Complex nested joins problems Patch DERBY-4712b, which removes one of the source for NPE seen by the reporter. The other is covered by DERBY-4798. A corner case: the patch makes an inner join which decides it is not flattenable, propagate this fact down to any nested outer join nodes containing nested inner joins, the latter inner joins will otherwise think they are flattenable (a priori value for inner joins). Adds new test cases. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@997325 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4798-d9dd1e59
DERBY-4798 NPE in nested outer join Patch derby-4798a. Reintroduces the bailout code in BaseActivation#getColumnFromRow which was removed in DERBY-3097 until we understand why it is needed. Adds the repro for this issue to OuterJoinTest git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@998170 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-48-4d8deb63
DERBY-48 A connection request that has a default schema that is being created by another transaction will fail to connect Patch derby-48-7, which auto-creates the schema in a nested transaction if possible, thus allowing early release of write locks used to auto-create the schema. This has the side-effect of making auto-created schema persist even if the user transaction that triggered it rolls back. Added a new test. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@662446 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/catalog/DataDictionaryImpl.java", "hunks": [ { "added": [ "\t\t\t\t\t\tfalse,", "\t\t\t\t\t\ttc);" ], "header": "@@ -1522,7 +1522,8 @@ public final class\tDataDictionaryImpl", "removed": [ "\t\t\t\t\t\tfalse);" ] }, { "added": [ "\t\t\t\t\t\tfalse,", "\t\t\t\t\t\ttc);" ], "header": "@@ -1563,7 +1564,8 @@ public final class\tDataDictionaryImpl", "removed": [ "\t\t\t\t\t\tfalse);" ] }, { "added": [ "\t * @param list The list to build, if supplied. If null, then", "\t * caller expects a single descriptor" ], "header": "@@ -8042,8 +8044,8 @@ public final class\tDataDictionaryImpl", "removed": [ "\t * @param list\t\tThe list to build, if supplied. If null, then caller expects", "\t *\t\t\t\t\ta single descriptor" ] }, { "added": [ "\t{", "\t\t// Get the current transaction controller", "\t\tTransactionController tc = getTransactionCompile();", "", "\t\treturn getDescriptorViaIndexMinion(indexId,", "\t\t\t\t\t\t\t\t\t\t keyRow,", "\t\t\t\t\t\t\t\t\t\t scanQualifiers,", "\t\t\t\t\t\t\t\t\t\t ti,", "\t\t\t\t\t\t\t\t\t\t parentTupleDescriptor,", "\t\t\t\t\t\t\t\t\t\t list,", "\t\t\t\t\t\t\t\t\t\t forUpdate,", "\t\t\t\t\t\t\t\t\t\t tc);", "\t}", "", "\t/**", "\t * Return a (single or list of) catalog row descriptor(s) from a", "\t * system table where the access is from the index to the heap.", "\t *", "\t * This overload variant takes an explicit tc, in contrast to the normal", "\t * one which uses the one returned by getTransactionCompile.", "\t *", "\t * @param indexId\tThe id of the index (0 to # of indexes on table) to use", "\t * @param keyRow\tThe supplied ExecIndexRow for search", "\t * @param ti\t\tThe TabInfoImpl to use", "\t * @param parentTupleDescriptor\t\tThe parentDescriptor, if applicable.", "\t * @param list The list to build, if supplied. If null, then", "\t *\t\t\t\t\tcaller expects a single descriptor", "\t * @param forUpdate\tWhether or not to open the index for update.", "\t * @param tc Transaction controller", "\t *", "\t * @return\tThe last matching descriptor", "\t *", "\t * @exception StandardException\t\tThrown on error", "\t */", "\tprivate final TupleDescriptor getDescriptorViaIndex(", "\t\t\t\t\t\tint indexId,", "\t\t\t\t\t\tExecIndexRow keyRow,", "\t\t\t\t\t\tScanQualifier [][] scanQualifiers,", "\t\t\t\t\t\tTabInfoImpl ti,", "\t\t\t\t\t\tTupleDescriptor parentTupleDescriptor,", "\t\t\t\t\t\tList list,", "\t\t\t\t\t\tboolean forUpdate,", "\t\t\t\t\t\tTransactionController tc)", "\t\t\tthrows StandardException", "\t{", "\t\tif (tc == null) {", "\t\t\ttc = getTransactionCompile();", "\t\t}", "", "\t\treturn getDescriptorViaIndexMinion(indexId,", "\t\t\t\t\t\t\t\t\t\t keyRow,", "\t\t\t\t\t\t\t\t\t\t scanQualifiers,", "\t\t\t\t\t\t\t\t\t\t ti,", "\t\t\t\t\t\t\t\t\t\t parentTupleDescriptor,", "\t\t\t\t\t\t\t\t\t\t list,", "\t\t\t\t\t\t\t\t\t\t forUpdate,", "\t\t\t\t\t\t\t\t\t\t tc);", "\t}", "", "", "\tprivate final TupleDescriptor getDescriptorViaIndexMinion(", "\t\t\t\t\t\tint indexId,", "\t\t\t\t\t\tExecIndexRow keyRow,", "\t\t\t\t\t\tScanQualifier [][] scanQualifiers,", "\t\t\t\t\t\tTabInfoImpl ti,", "\t\t\t\t\t\tTupleDescriptor parentTupleDescriptor,", "\t\t\t\t\t\tList list,", "\t\t\t\t\t\tboolean forUpdate,", "\t\t\t\t\t\tTransactionController tc)", "\t\t\tthrows StandardException" ], "header": "@@ -8059,6 +8061,76 @@ public final class\tDataDictionaryImpl", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/DDLConstantAction.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.reference.Property;", "import org.apache.derby.iapi.services.property.PropertyUtil;", "import org.apache.derby.iapi.services.sanity.SanityManager;" ], "header": "@@ -27,6 +27,9 @@ import java.util.List;", "removed": [] }, { "added": [ "\t * @param activation activation", "\t * @param schemaName name of the schema" ], "header": "@@ -79,8 +82,8 @@ abstract class DDLConstantAction implements ConstantAction", "removed": [ "\t @param activation activation", "\t @param schemaName name of the schema" ] } ] } ]
derby-DERBY-48-9e04f690
DERBY-48 A connection request that has a default schema that is being created by another transaction will fail to connect Patch derby-48b, which limits the use of a subtransaction to the initial default schema, other implicit schema creation is transactional. A new testcase is added to show the latter. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@685141 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-48-ddae94a2
DERBY-48 A connection request that has a default schema that is being created by another transaction will fail to connect Follow-up patch to svn 685141, which accidentally committed the wrong patch revision. With this patch, the net effect is DERBY-48b-1 which should have been committed. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@685232 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/execute/DDLConstantAction.java", "hunks": [ { "added": [ "\t\t\tCreateSchemaConstantAction csca", "\t\t\t\t= new CreateSchemaConstantAction(schemaName, (String) null);", "\t\t\t\t// DERBY-48: This operation creates the user's initial", "\t\t\t\t// default schema and we don't want to hold a lock for", "\t\t\t\t// SYSSCHEMAS for the duration of the user transaction", "\t\t\t\t// since connection attempts may block, so we perform", "\t\t\t\t// the creation in a nested transaction (if possible)", "\t\t\t\t// so we can commit at once and release locks.", "\t\t\t\texecuteCAPreferSubTrans(csca, tc, activation);", "\t\t\t\t// create the schema in the user transaction" ], "header": "@@ -101,88 +101,20 @@ abstract class DDLConstantAction implements ConstantAction", "removed": [ " CreateSchemaConstantAction csca", " = new CreateSchemaConstantAction(schemaName, (String) null);", "\t\t\t\t// DERBY-48: This operation creates the user's initial default", "\t\t\t\t// schema and we don't want to hold a lock for SYSSCHEMAS for", "\t\t\t\t// the duration of the user transaction, so we perform the", "\t\t\t\t// creation in a nested transaction if possible.", "\t\t\t\tTransactionController useTc = null;", "\t\t\t\tTransactionController nestedTc = null;", "", "\t\t\t\ttry {", "\t\t\t\t\tnestedTc = tc.startNestedUserTransaction(false);", "\t\t\t\t\tuseTc = nestedTc;", "\t\t\t\t} catch (StandardException e) {", "\t\t\t\t\tif (SanityManager.DEBUG) {", "\t\t\t\t\t\tSanityManager.THROWASSERT(", "\t\t\t\t\t\t\t\"Unexpected: not able to start nested transaction \" +", "\t\t\t\t\t\t\t\"to auto-create schema\", e);", "\t\t\t\t\t}", "\t\t\t\t\tuseTc = tc;", "\t\t\t\t}", "", "\t\t\t\t// Try max twice: if nested transaction times out, try", "\t\t\t\t// again in the outer transaction because it may be a", "\t\t\t\t// self-lock, that is, the outer transaction may hold some", "\t\t\t\t// lock(s) that make the nested transaction attempt to set", "\t\t\t\t// a write lock time out. Trying it again in the outer", "\t\t\t\t// transaction will then succeed. If the reason is some", "\t\t\t\t// other transaction barring us, trying again in the outer", "\t\t\t\t// transaction will possibly time out again.", "\t\t\t\t//", "\t\t\t\t// Also, if creating a nested transaction failed, only try", "\t\t\t\t// once in the outer transaction.", "\t\t\t\twhile (true) {", "\t\t\t\t\ttry {", "\t\t\t\t\t\tcsca.executeConstantAction(activation, useTc);", "\t\t\t\t\t} catch (StandardException se) {", "\t\t\t\t\t\tif (se.getMessageId().equals(SQLState.LOCK_TIMEOUT)) {", "\t\t\t\t\t\t\t// We don't test for SQLState.DEADLOCK or", "\t\t\t\t\t\t\t// .LOCK_TIMEOUT_LOG here because a) if it is a", "\t\t\t\t\t\t\t// deadlock, it may be better to expose it, and b)", "\t\t\t\t\t\t\t// LOCK_TIMEOUT_LOG happens when the app has set", "\t\t\t\t\t\t\t// derby.locks.deadlockTrace=true, in which case we", "\t\t\t\t\t\t\t// don't want to mask the timeout. So in both the", "\t\t\t\t\t\t\t// latter cases we just throw.", "\t\t\t\t\t\t\tif (useTc == nestedTc) {", "", "\t\t\t\t\t\t\t\t// clean up after use of nested transaction,", "\t\t\t\t\t\t\t\t// then try again in outer transaction", "\t\t\t\t\t\t\t\tuseTc = tc;", "\t\t\t\t\t\t\t\tnestedTc.destroy();", "\t\t\t\t\t\t\t\tcontinue;", "\t\t\t\t\t\t\t}", "\t\t\t\t\t\t} else if (se.getMessageId()", "\t\t\t\t\t\t\t\t\t .equals(SQLState.LANG_OBJECT_ALREADY_EXISTS)) {", "\t\t\t\t\t\t\t// Ignore \"Schema already exists\". Another thread has", "\t\t\t\t\t\t\t// probably created it after we checked for it", "\t\t\t\t\t\t\tbreak;", "\t\t\t\t\t\t}", "", "\t\t\t\t\t\t// We got an non-expected exception, either in", "\t\t\t\t\t\t// the nested transaction or in the outer", "\t\t\t\t\t\t// transaction; we had better pass that on", "\t\t\t\t\t\tif (useTc == nestedTc) {", "\t\t\t\t\t\t\tnestedTc.destroy();", "\t\t\t\t\t\t}", "", "\t\t\t\t\t\tthrow se;", "\t\t\t\t\t}", "\t\t\t\t\tbreak;", "\t\t\t\t}", "", "\t\t\t\t// We either succeeded or got LANG_OBJECT_ALREADY_EXISTS.", "\t\t\t\t// Clean up if we did this in a nested transaction.", "\t\t\t\tif (useTc == nestedTc) {", "\t\t\t\t\tnestedTc.commit();", "\t\t\t\t\tnestedTc.destroy();", "\t\t\t\t}", "\t\t\t\t// create the schema in the user transaction always" ] } ] } ]
derby-DERBY-4803-30dd4c58
DERBY-4803: Make it possible to use sequences in INSERT...SELECT statements. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@999908 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4804-0d6a26d8
DERBY-4804: Make database used in store.OSReadOnlyTest fully read-only Properly simulate a read-only media. Since Java cannot make a file writeable after first making it read-only until Java SE 6, the db.lck file (with invalid contents) was created and made read-only instead. Patch file: derby-4804-1a-test_change.diff (modified some comments) git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@998844 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4804-c7b35898
DERBY-4804: Make database used in store.OSReadOnlyTest fully read-only Followup patch moving file operation code into PrivilegedFileOpsForTests, since it is probably useful for other tests as well. Also note the new assert-method in BaseJDBCTestCase, used to assert that a directory is fully deleted. Patch file: derby-4804-2a-common_file_ops.diff git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@999796 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-4805-813aa38e
DERBY-4805(Increase the length of the RDBNAM field in the DRDA implementation) This commit will allow RDBNAM to be 1024 bytes, thus lifting the limit from current 255bytes. I have changed the existing boundary test case to test for the new limit but the tests now use in-memory db jdbc url since on disk long urls were running into problem because of OS/file system limitation on the length of file name length. As for newer client(version 10.11 and higher) going against an older server(10.10 and lower which will not have the fix for DERBY-4805 and hence will not accept the longer RDBNAM) with new RDBNAM limit, it will result in protocol exception. This is because when the client makes the first connect request to server, it has no way of knowing what server version it is talking to and hence there is no way to catch the length violation on the client side at this point. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1565491 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/client/org/apache/derby/client/am/ClientDatabaseMetaData.java", "hunks": [ { "added": [ " /**", " * DERBY-4805(Increase the length of the RDBNAM field in the DRDA ", " * implementation) ", " * True if the server supports RDBNAM longer than 255 character", " */", " private boolean supportsLongRDBNAM_;", "" ], "header": "@@ -101,6 +101,13 @@ public abstract class ClientDatabaseMetaData implements DatabaseMetaData {", "removed": [] }, { "added": [ "", " supportsLongRDBNAM_ =", " productLevel_.greaterThanOrEqualTo(10, 11, 0);" ], "header": "@@ -2372,6 +2379,9 @@ public abstract class ClientDatabaseMetaData implements DatabaseMetaData {", "removed": [] } ] }, { "file": "java/client/org/apache/derby/client/net/NetConnectionReply.java", "hunks": [ { "added": [ " // SQLRDBNAME; PROTOCOL TYPE VCS; ENVLID 0x32; Length Override 1024" ], "header": "@@ -2665,7 +2665,7 @@ class NetConnectionReply extends Reply", "removed": [ " // SQLRDBNAME; PROTOCOL TYPE VCS; ENVLID 0x32; Length Override 255" ] }, { "added": [ " // SQLRDBNAME; PROTOCOL TYPE VCS; ENVLID 0x32; Length Override 1024" ], "header": "@@ -2688,7 +2688,7 @@ class NetConnectionReply extends Reply", "removed": [ " // SQLRDBNAME; PROTOCOL TYPE VCS; ENVLID 0x32; Length Override 255" ] }, { "added": [ " // SQLCNRDB; PROTOCOL TYPE VCS; ENVLID 0x32; Length Override 1024" ], "header": "@@ -3067,7 +3067,7 @@ class NetConnectionReply extends Reply", "removed": [ " // SQLCNRDB; PROTOCOL TYPE VCS; ENVLID 0x32; Length Override 255" ] } ] }, { "file": "java/client/org/apache/derby/client/net/NetCursor.java", "hunks": [ { "added": [ " // SQLRDBNAME; PROTOCOL TYPE VCS; ENVLID 0x32; Length Override 1024" ], "header": "@@ -664,7 +664,7 @@ class NetCursor extends Cursor {", "removed": [ " // SQLRDBNAME; PROTOCOL TYPE VCS; ENVLID 0x32; Length Override 255" ] }, { "added": [ " // SQLRDBNAME; PROTOCOL TYPE VCS; ENVLID 0x32; Length Override 1024" ], "header": "@@ -684,7 +684,7 @@ class NetCursor extends Cursor {", "removed": [ " // SQLRDBNAME; PROTOCOL TYPE VCS; ENVLID 0x32; Length Override 255" ] } ] }, { "file": "java/client/org/apache/derby/client/net/NetStatementReply.java", "hunks": [ { "added": [ " int maxDDMlength;", " //For SQLAM level 7, this was harcoded to be 781 in 10.10 codeline. But", " // after DERBY-4805 is fixed in Derby 10.11, we allow 1024 bytes for", " // RDBNAM rather than just 255 characters. Because of this, the ", " // DDM length in Derby 10.11 can be higher than 781. To be precise,", " // it is 781-255+1024=1550. The following if statement is doing this", " // calculation using constant identifiers rather than constant values", " if (netAgent_.netConnection_.databaseMetaData_.serverSupportLongRDBNAM()) {", " \tmaxDDMlength = 781-NetConfiguration.PKG_IDENTIFIER_MAX_LEN+", " \t\t\tNetConfiguration.RDBNAM_MAX_LEN;", " } else", " \tmaxDDMlength = 781;", "" ], "header": "@@ -1587,6 +1587,19 @@ class NetStatementReply extends NetPackageReply", "removed": [] }, { "added": [ " } else if ((ddmLength >= 71) && (ddmLength <= maxDDMlength)) {" ], "header": "@@ -1600,7 +1613,7 @@ class NetStatementReply extends NetPackageReply", "removed": [ " } else if ((ddmLength >= 71) && (ddmLength <= 781)) {" ] }, { "added": [ " int maxRDBlength =", " ((netAgent_.netConnection_.databaseMetaData_.serverSupportLongRDBNAM())? ", " NetConfiguration.RDBNAM_MAX_LEN ", " : NetConfiguration.PKG_IDENTIFIER_MAX_LEN);", " if (scldtaLen < NetConfiguration.PKG_IDENTIFIER_FIXED_LEN || ", " \t\tscldtaLen > maxRDBlength) {" ], "header": "@@ -1608,7 +1621,12 @@ class NetStatementReply extends NetPackageReply", "removed": [ " if (scldtaLen < 18 || scldtaLen > 255) {" ] }, { "added": [ " // SQLXRDBNAM; PROTOCOL TYPE VCS; ENVLID 0x32; Length Override 1024" ], "header": "@@ -2110,7 +2128,7 @@ class NetStatementReply extends NetPackageReply", "removed": [ " // SQLXRDBNAM; PROTOCOL TYPE VCS; ENVLID 0x32; Length Override 255" ] }, { "added": [ " // SQLXRDBNAM; PROTOCOL TYPE VCS; ENVLID 0x32; Length Override 1024" ], "header": "@@ -2138,7 +2156,7 @@ class NetStatementReply extends NetPackageReply", "removed": [ " // SQLXRDBNAM; PROTOCOL TYPE VCS; ENVLID 0x32; Length Override 255" ] }, { "added": [ " // SQLDRDBNAM; PROTOCOL TYPE VCS; ENVLID 0x32; Length Override 1024" ], "header": "@@ -2206,7 +2224,7 @@ class NetStatementReply extends NetPackageReply", "removed": [ " // SQLDRDBNAM; PROTOCOL TYPE VCS; ENVLID 0x32; Length Override 255" ] } ] }, { "file": "java/drda/org/apache/derby/impl/drda/DRDAConnThread.java", "hunks": [ { "added": [ " // but Derby 10.11 allows 1024", " rdbName.length > CodePoint.RDBNAM_MAX_NAME) {" ], "header": "@@ -3011,8 +3011,9 @@ class DRDAConnThread extends Thread {", "removed": [ " rdbName.length > CodePoint.MAX_NAME) {" ] }, { "added": [ " if (length < CodePoint.RDBNAM_LEN || length > CodePoint.RDBNAM_MAX_NAME) {" ], "header": "@@ -5732,7 +5733,7 @@ class DRDAConnThread extends Thread {", "removed": [ " if (length < CodePoint.RDBNAM_LEN || length > CodePoint.MAX_NAME) {" ] }, { "added": [ " * SQLRDBNAME; DRDA TYPE VCS; ENVLID 0x32; Length Override 1024" ], "header": "@@ -6604,7 +6605,7 @@ class DRDAConnThread extends Thread {", "removed": [ " * SQLRDBNAME; DRDA TYPE VCS; ENVLID 0x32; Length Override 255" ] } ] } ]
derby-DERBY-4806-7a7a289e
DERBY-4806 DERBY-4597 removes references to IBM jcc driver on Main.java, fixing extracting derbyTesting.jar information when it is not in the same directory as derbyrun.jar git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1002682 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/tools/org/apache/derby/impl/tools/sysinfo/Main.java", "hunks": [ { "added": [ "" ], "header": "@@ -139,9 +139,9 @@ public static void getMainInfo (java.io.PrintWriter aw, boolean pause) {", "removed": [ "" ] }, { "added": [ " private static final String USAGESTRINGPARTA = MAINUSAGESTRING + \" [ [ \"", " + EMBEDDED + \" ][ \" + NET + \" ][ \" + CLIENT + \"] [ \" + TOOLS", " + \" ] [\";" ], "header": "@@ -512,13 +512,14 @@ public static void getMainInfo (java.io.PrintWriter aw, boolean pause) {", "removed": [ "\tprivate static final String DB2DRIVER = \"db2driver\";", "\tprivate static final String USAGESTRINGPARTA = MAINUSAGESTRING + \" [ [ \" + EMBEDDED + \" ][ \" + NET + \" ][ \" + CLIENT + \"] [ \" + DB2DRIVER + \" ] [ \" + TOOLS + \" ] [ \";" ] }, { "added": [], "header": "@@ -566,7 +567,6 @@ public static void getMainInfo (java.io.PrintWriter aw, boolean pause) {", "removed": [ "\t\t tryDB2DriverClasspath(successes, failures);" ] }, { "added": [], "header": "@@ -602,10 +602,6 @@ public static void getMainInfo (java.io.PrintWriter aw, boolean pause) {", "removed": [ "\t\tif (argumentsContain(args,DB2DRIVER)) {", "\t\t\ttryDB2DriverClasspath(successes, failures);", "\t\t\tseenArg =true;", "\t\t}" ] }, { "added": [], "header": "@@ -650,13 +646,6 @@ public static void getMainInfo (java.io.PrintWriter aw, boolean pause) {", "removed": [ " private static void tryDB2DriverClasspath(StringBuffer successes,", " StringBuffer failures)", " {", " tryMyClasspath(\"com.ibm.db2.jcc.DB2Driver\",", " Main.getTextMessage(\"SIF08.L\", \"db2jcc.jar\"),", " successes, failures);", " }" ] }, { "added": [ " * Check inside a jar file for the presence of a Derby info properties file.", " * ", " * @param filename", " * the jar file to check", " * @return ZipInfoProperties with the jar file set as the location or null", " * if not found." ], "header": "@@ -1021,14 +1010,12 @@ public static void getMainInfo (java.io.PrintWriter aw, boolean pause) {", "removed": [ " * Check inside a jar file for the presence of a Derby info properties", " * file. There is a special case for db2jcc, which does not have a Derby", " * info propeties file. If db2jcc is in the filename, acquire DB2Driver", " * via reflection and get the version number from it.", " *", " * @param filename the jar file to check", " * @return ZipInfoProperties with the jar file set as the location", " * or null if not found." ] }, { "added": [ "", " try {", " // DERBY-4806 Should use UTF-8 according to", " // http://www.w3.org/TR/html40/appendix/notes.html#non-ascii-chars", " // to get the string of the file name", " return URLDecoder.decode(result.toString(), \"UTF-8\");", " } catch (UnsupportedEncodingException e) {", " // All JVMs are required to support UTF-8.", " return e.getMessage();", " }" ], "header": "@@ -1126,8 +1113,16 @@ public static void getMainInfo (java.io.PrintWriter aw, boolean pause) {", "removed": [ " ", " return formatURL(result);" ] }, { "added": [ " result = e.getMessage();" ], "header": "@@ -1228,7 +1223,7 @@ public static void getMainInfo (java.io.PrintWriter aw, boolean pause) {", "removed": [ " result = \"IOException\";" ] } ] } ]
derby-DERBY-4808-ce433402
DERBY-4808; write a test that checks that optional packages are in place and clearly exposes when this is not the case Adding some more checks to the EnvTest, plus modifying output mechanism git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@1603557 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-481-15b837ec
DERBY-481: Cleanup based on comments from Dag and Knut. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@719760 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/DMLModStatementNode.java", "hunks": [ { "added": [ " *", " * @param targetRCL the row in the table being INSERTed or UPDATEd", " * @param forUpdate true if this is an UPDATE. false otherwise.", " * @param addedGeneratedColumns generated columns which the compiler added earlier on" ], "header": "@@ -415,6 +415,10 @@ abstract class DMLModStatementNode extends DMLStatementNode", "removed": [] }, { "added": [ " *", " * @param dataDictionary metadata", " * @param targetTableDescriptor metadata for the table that has the generated columns", " * @param sourceRCL the tuple stream which drives the INSERT or UPDATE", " * @param targetRCL the row in the table that's being INSERTed or UPDATEd", " * @param forUpdate true if this is an UPDATE. false otherwise.", " * @param updateResultSet more information on the tuple stream driving the UPDATE" ], "header": "@@ -485,6 +489,13 @@ abstract class DMLModStatementNode extends DMLStatementNode", "removed": [] }, { "added": [ " *", " * @param rcl describes the row of expressions to be put into the bas table", " * @param resultSetNumber index of base table into array of ResultSets", " * @param ecb code generation state variable", " * @param mb the method being generated" ], "header": "@@ -1568,6 +1579,11 @@ abstract class DMLModStatementNode extends DMLStatementNode", "removed": [] }, { "added": [], "header": "@@ -1586,9 +1602,6 @@ abstract class DMLModStatementNode extends DMLStatementNode", "removed": [ "\t\t // generate statements of the form", "\t\t\t// fieldX.setColumn(columnNumber, (DataValueDescriptor) columnExpr);", "\t\t\t// and add them to exprFun." ] }, { "added": [ " break;" ], "header": "@@ -1599,7 +1612,7 @@ abstract class DMLModStatementNode extends DMLStatementNode", "removed": [ " continue;" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/TableElementList.java", "hunks": [ { "added": [ "\t\t\t// bind the generation clause", " ProviderList prevAPL = cc.getCurrentAuxiliaryProviderList();" ], "header": "@@ -751,9 +751,9 @@ public class TableElementList extends QueryTreeNodeVector", "removed": [ "\t\t\t// bind the check condition", "\t\t\t// verify that it evaluates to a boolean" ] }, { "added": [], "header": "@@ -766,7 +766,6 @@ public class TableElementList extends QueryTreeNodeVector", "removed": [ "\t\t\t\tProviderList prevAPL = cc.getCurrentAuxiliaryProviderList();" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/UpdateNode.java", "hunks": [ { "added": [], "header": "@@ -39,7 +39,6 @@ import org.apache.derby.iapi.services.compiler.MethodBuilder;", "removed": [ "import org.apache.derby.iapi.sql.compile.C_NodeTypes;" ] } ] } ]
derby-DERBY-481-291f9137
DERBY-481: Add tests for altering the datatype of generated columns and of the columns they depend on. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@711663 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-481-2a0827ac
DERBY-481: Forbid generation clauses which reference generated columns. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@709219 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/CreateTableNode.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.services.io.FormatableBitSet;" ], "header": "@@ -25,6 +25,7 @@ import org.apache.derby.iapi.reference.Property;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/TableElementList.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.services.io.FormatableBitSet;" ], "header": "@@ -21,6 +21,7 @@", "removed": [] }, { "added": [ "\t * @param generatedColumns Bitmap of generated columns in the table. Vacuous for CREATE TABLE, but may be non-trivial for ALTER TABLE. This routine may set bits for new generated columns.", "\tvoid bindAndValidateGenerationClauses(FromList fromList, FormatableBitSet generatedColumns )", " int columnCount = table.getResultColumns().size();", " generatedColumns.grow( columnCount + 1 );", " " ], "header": "@@ -656,16 +657,20 @@ public class TableElementList extends QueryTreeNodeVector", "removed": [ "\tvoid bindAndValidateGenerationClauses(FromList fromList)" ] }, { "added": [ " generationClauseNode = cdn.getGenerationClauseNode();" ], "header": "@@ -689,7 +694,7 @@ public class TableElementList extends QueryTreeNodeVector", "removed": [ "\t\t generationClauseNode = cdn.getGenerationClauseNode();" ] }, { "added": [ " int position = rcl.getPosition( cdn.getColumnName(), 1 );", " generatedColumns.set( position );", " " ], "header": "@@ -763,6 +768,9 @@ public class TableElementList extends QueryTreeNodeVector", "removed": [] } ] } ]
derby-DERBY-481-3c09be76
DERBY-481: Prevent users from dropping a function mentioned in a generation clause. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@712840 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/compile/TableElementList.java", "hunks": [ { "added": [ "\tpublic int genColumnInfos( ColumnInfo[] colInfos)", " throws StandardException" ], "header": "@@ -430,7 +430,8 @@ public class TableElementList extends QueryTreeNodeVector", "removed": [ "\tpublic int genColumnInfos(ColumnInfo[] colInfos)" ] }, { "added": [ "\t\t\t\t\t\t\t\tnull, null, null, null, null, null," ], "header": "@@ -441,7 +442,7 @@ public class TableElementList extends QueryTreeNodeVector", "removed": [ "\t\t\t\t\t\t\t\tnull, null, null, null, null," ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/ColumnInfo.java", "hunks": [ { "added": [ "import org.apache.derby.iapi.sql.depend.ProviderInfo;", "import org.apache.derby.iapi.services.io.FormatableArrayHolder;" ], "header": "@@ -29,12 +29,13 @@ import org.apache.derby.iapi.error.StandardException;", "removed": [ "" ] }, { "added": [ " public ProviderInfo[] providers;" ], "header": "@@ -70,6 +71,7 @@ public class ColumnInfo implements Formatable", "removed": [] }, { "added": [ "\t * @param providers Array of providers that this column depends on." ], "header": "@@ -107,6 +109,7 @@ public class ColumnInfo implements Formatable", "removed": [] }, { "added": [ "\t\t\t\t\t ProviderInfo[]\t\t\t\t\tproviders," ], "header": "@@ -120,6 +123,7 @@ public class ColumnInfo implements Formatable", "removed": [] }, { "added": [ " this.providers = providers;" ], "header": "@@ -131,6 +135,7 @@ public class ColumnInfo implements Formatable", "removed": [] }, { "added": [ "", " FormatableArrayHolder fah = (FormatableArrayHolder) fh.get( \"providers\" );", " if ( fah != null )", " {", " providers = (ProviderInfo[]) fah.getArray( ProviderInfo.class );", " }" ], "header": "@@ -173,6 +178,12 @@ public class ColumnInfo implements Formatable", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/CreateTableConstantAction.java", "hunks": [ { "added": [], "header": "@@ -22,7 +22,6 @@", "removed": [ "" ] }, { "added": [ "import org.apache.derby.catalog.DependableFinder;" ], "header": "@@ -49,6 +48,7 @@ import org.apache.derby.iapi.error.StandardException;", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/DDLConstantAction.java", "hunks": [ { "added": [ "import org.apache.derby.catalog.DependableFinder;", "import org.apache.derby.iapi.services.context.ContextManager;", "import org.apache.derby.iapi.sql.depend.Provider;", "import org.apache.derby.iapi.sql.dictionary.ColumnDescriptor;", "import org.apache.derby.iapi.sql.dictionary.DefaultDescriptor;", "import org.apache.derby.iapi.sql.dictionary.TableDescriptor;" ], "header": "@@ -24,18 +24,24 @@ package org.apache.derby.impl.sql.execute;", "removed": [] } ] } ]
derby-DERBY-481-45c66917
DERBY-481: Add tests to verify that you can't use updatable ResultSets to corrupt generated columns. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@719656 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-481-4e6df7ab
DERBY-481: Incorporate Dag's patch review feedback. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@722214 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/impl/sql/execute/NoRowsResultSetImpl.java", "hunks": [ { "added": [ " private int firstColumn = -1; // First column being stuffed. For UPDATES, this lies in the second half of the row.", " private int[] generatedColumnPositions; // 1-based positions of generated columns in the target row", "", " // One cell for each slot in generatedColumnPositions. These are temporary", " // values which hold the result of running the generation clause before we", " // stuff the result into the target row.", " private DataValueDescriptor[] normalizedGeneratedValues;" ], "header": "@@ -74,9 +74,13 @@ abstract class NoRowsResultSetImpl implements ResultSet", "removed": [ " private int firstColumn = -1;", " private int[] generatedColumnPositions; // 1-based positions", " private DataValueDescriptor[] normalizedGeneratedValues; // one for each slot in generatedColumnPositions" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/execute/NormalizeResultSet.java", "hunks": [ { "added": [ " *", " * @param isUpdate True if we are executing an UPDATE statement", " * @param desc Metadata describing a result row" ], "header": "@@ -272,6 +272,9 @@ class NormalizeResultSet extends NoPutResultSetImpl", "removed": [] }, { "added": [ "\t * Normalize a column. For now, this means calling constructors through", " *", " * @param dtd Data type to coerce to", " * @param sourceRow row holding the source column", " * @param sourceColumnPosition position of column in row", " * @param resultCol where to stuff the coerced value", " * @param desc Additional metadata for error reporting if necessary", " *" ], "header": "@@ -292,17 +295,20 @@ class NormalizeResultSet extends NoPutResultSetImpl", "removed": [ "\t * Normalize a row. For now, this means calling constructors through", "\t * In the future, this mechanism will be extended to do type conversions,", "\t * as well. I didn't implement type conversions yet because it looks", "\t * like a lot of work, and we needed char and varchar right away.", "\t *" ] } ] } ]
derby-DERBY-481-5a0cfa25
DERBY-481: Generation clauses may not invoke functions which run SQL. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@709415 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/iapi/sql/compile/CompilerContext.java", "hunks": [ { "added": [ "\tpublic static final int\t\t\tSQL_IN_ROUTINES_ILLEGAL\t\t=\t0x00002000;" ], "header": "@@ -100,6 +100,7 @@ public interface CompilerContext extends Context", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/QueryTreeNode.java", "hunks": [ { "added": [ " throwReliabilityException( fragmentType, fragmentBitMask );" ], "header": "@@ -1502,7 +1502,7 @@ public abstract class QueryTreeNode implements Visitable", "removed": [ " throwReliabilityException( fragmentType );" ] }, { "added": [ " throwReliabilityException( fragmentTypeTxt, fragmentBitMask );" ], "header": "@@ -1525,7 +1525,7 @@ public abstract class QueryTreeNode implements Visitable", "removed": [ " throwReliabilityException( fragmentTypeTxt );" ] }, { "added": [ " private void throwReliabilityException( String fragmentType, int fragmentBitMask ) throws StandardException" ], "header": "@@ -1535,7 +1535,7 @@ public abstract class QueryTreeNode implements Visitable", "removed": [ " private void throwReliabilityException( String fragmentType ) throws StandardException" ] } ] }, { "file": "java/engine/org/apache/derby/impl/sql/compile/StaticMethodCallNode.java", "hunks": [ { "added": [ " if ( permitsSQL( routineInfo ) )", " {", " checkReliability( getMethodName(), CompilerContext.SQL_IN_ROUTINES_ILLEGAL );", " }" ], "header": "@@ -221,6 +221,10 @@ public class StaticMethodCallNode extends MethodCallNode", "removed": [] } ] } ]
derby-DERBY-481-7101c06c
DERBY-481: Add negative test cases for generation clauses which mention CURRENT SCHEMA and CURRENT SQLID. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@714188 13f79535-47bb-0310-9956-ffa450edef68
[]
derby-DERBY-481-9188c1cc
DERBY-481: Catalog changes supporting generated columns. git-svn-id: https://svn.apache.org/repos/asf/db/derby/code/trunk@707414 13f79535-47bb-0310-9956-ffa450edef68
[ { "file": "java/engine/org/apache/derby/catalog/DefaultInfo.java", "hunks": [ { "added": [ "\t/**", "\t * If this default is a generation clause, then return the 1-based ids of", "\t * other columns in the row which the generation clause references.", "\t */", "\tpublic int[] getReferencedColumnIDs();", "\t" ], "header": "@@ -33,6 +33,12 @@ public interface DefaultInfo", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/catalog/types/DefaultInfoImpl.java", "hunks": [ { "added": [ " private int[] referencedColumnIDs;", "\tfinal private static int BITS_MASK_IS_GENERATED_COLUMN = 0x2;" ], "header": "@@ -55,8 +55,10 @@ public class DefaultInfoImpl implements DefaultInfo, Formatable", "removed": [] }, { "added": [ "\t/**", "\t * Constructor for use with generated columns", "\t */", "\tpublic DefaultInfoImpl", " (", " String defaultText,", " int[] referencedColumnIDs", " )", "\t{", " if ( referencedColumnIDs == null ) { referencedColumnIDs = new int[0]; }", " ", "\t\tthis.type = BITS_MASK_IS_GENERATED_COLUMN;", "\t\tthis.defaultText = defaultText;", "\t\tthis.referencedColumnIDs = referencedColumnIDs;", "\t}", "" ], "header": "@@ -78,6 +80,22 @@ public class DefaultInfoImpl implements DefaultInfo, Formatable", "removed": [] }, { "added": [ "\t/**", "\t * @see DefaultInfo#getReferencedColumnIDs", "\t */", "\tpublic int[] getReferencedColumnIDs()", "\t{", "\t\treturn referencedColumnIDs;", "\t}", "" ], "header": "@@ -86,6 +104,14 @@ public class DefaultInfoImpl implements DefaultInfo, Formatable", "removed": [] }, { "added": [ "", " if ( isGeneratedColumn() )", " {", " int count = in.readInt();", " referencedColumnIDs = new int[ count ];", " for ( int i = 0; i < count; i++ ) { referencedColumnIDs[ i ] = in.readInt(); }", " }" ], "header": "@@ -110,6 +136,13 @@ public class DefaultInfoImpl implements DefaultInfo, Formatable", "removed": [] }, { "added": [ " ", " if ( isGeneratedColumn() )", " {", " int count = referencedColumnIDs.length;", " out.writeInt( count );", " for ( int i = 0; i < count; i++ ) { out.writeInt( referencedColumnIDs[ i ] ); }", " }" ], "header": "@@ -125,6 +158,13 @@ public class DefaultInfoImpl implements DefaultInfo, Formatable", "removed": [] } ] }, { "file": "java/engine/org/apache/derby/iapi/sql/dictionary/TableDescriptor.java", "hunks": [ { "added": [ "\t/**", "\t * Gets the list of columns defined by generation clauses.", "\t */", "\tpublic ColumnDescriptorList getGeneratedColumns()", "\t{", " ColumnDescriptorList fullList = getColumnDescriptorList();", " ColumnDescriptorList result = new ColumnDescriptorList();", " int count = fullList.size();", "", " for ( int i = 0; i < count; i++ )", " {", " ColumnDescriptor cd = fullList.elementAt( i );", " if ( cd.hasGenerationClause() ) { result.add( oid, cd ); }", " }", " ", "\t\treturn result;", "\t}", "" ], "header": "@@ -888,6 +888,24 @@ public class TableDescriptor extends TupleDescriptor", "removed": [] } ] } ]